]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.8.2-201303082043.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.8.2-201303082043.patch
CommitLineData
8a668c8d
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..b47493f 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52@@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60@@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85@@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113@@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125-linux
126+lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130@@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134-media
135 mconf
136+mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143+mkpiggy
144 mkprep
145 mkregtable
146 mktables
147@@ -185,6 +204,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151+parse-events*
152+pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156@@ -194,6 +215,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160+pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164@@ -203,7 +225,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168+realmode.lds
169+realmode.relocs
170 recordmcount
171+regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175@@ -213,8 +238,12 @@ series
176 setup
177 setup.bin
178 setup.elf
179+signing_key*
180+size_overflow_hash.h
181 sImage
182+slabinfo
183 sm_tbl*
184+sortextable
185 split-include
186 syscalltab.h
187 tables.c
188@@ -224,6 +253,7 @@ tftpboot.img
189 timeconst.h
190 times.h*
191 trix_boot.h
192+user_constants.h
193 utsrelease.h*
194 vdso-syms.lds
195 vdso.lds
196@@ -235,13 +265,17 @@ vdso32.lds
197 vdso32.so.dbg
198 vdso64.lds
199 vdso64.so.dbg
200+vdsox32.lds
201+vdsox32-syms.lds
202 version.h*
203 vmImage
204 vmlinux
205 vmlinux-*
206 vmlinux.aout
207 vmlinux.bin.all
208+vmlinux.bin.bz2
209 vmlinux.lds
210+vmlinux.relocs
211 vmlinuz
212 voffset.h
213 vsyscall.lds
214@@ -249,9 +283,12 @@ vsyscall_32.lds
215 wanxlfw.inc
216 uImage
217 unifdef
218+utsrelease.h
219 wakeup.bin
220 wakeup.elf
221 wakeup.lds
222+x509*
223 zImage*
224 zconf.hash.c
225+zconf.lex.c
226 zoffset.h
227diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
228index 986614d..0afd461 100644
229--- a/Documentation/kernel-parameters.txt
230+++ b/Documentation/kernel-parameters.txt
231@@ -922,6 +922,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
232 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
233 Default: 1024
234
235+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
236+ ignore grsecurity's /proc restrictions
237+
238+
239 hashdist= [KNL,NUMA] Large hashes allocated during boot
240 are distributed across NUMA nodes. Defaults on
241 for 64-bit NUMA, off otherwise.
242@@ -2121,6 +2125,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
243 the specified number of seconds. This is to be used if
244 your oopses keep scrolling off the screen.
245
246+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
247+ virtualization environments that don't cope well with the
248+ expand down segment used by UDEREF on X86-32 or the frequent
249+ page table updates on X86-64.
250+
251+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
252+
253 pcbit= [HW,ISDN]
254
255 pcd. [PARIDE]
256diff --git a/Makefile b/Makefile
257index 20d5318..d5cec9c 100644
258--- a/Makefile
259+++ b/Makefile
260@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
261
262 HOSTCC = gcc
263 HOSTCXX = g++
264-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
265-HOSTCXXFLAGS = -O2
266+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
267+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
268+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
269
270 # Decide whether to build built-in, modular, or both.
271 # Normally, just do built-in.
272@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
273 # Rules shared between *config targets and build targets
274
275 # Basic helpers built in scripts/
276-PHONY += scripts_basic
277-scripts_basic:
278+PHONY += scripts_basic gcc-plugins
279+scripts_basic: gcc-plugins
280 $(Q)$(MAKE) $(build)=scripts/basic
281 $(Q)rm -f .tmp_quiet_recordmcount
282
283@@ -575,6 +576,62 @@ else
284 KBUILD_CFLAGS += -O2
285 endif
286
287+ifndef DISABLE_PAX_PLUGINS
288+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
289+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
290+else
291+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
292+endif
293+ifneq ($(PLUGINCC),)
294+ifndef CONFIG_UML
295+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
296+endif
297+ifdef CONFIG_PAX_MEMORY_STACKLEAK
298+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
299+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
300+endif
301+ifdef CONFIG_KALLOCSTAT_PLUGIN
302+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
303+endif
304+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
305+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
306+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
307+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
308+endif
309+ifdef CONFIG_CHECKER_PLUGIN
310+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
311+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
312+endif
313+endif
314+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
315+ifdef CONFIG_PAX_SIZE_OVERFLOW
316+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
317+endif
318+ifdef CONFIG_PAX_LATENT_ENTROPY
319+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
320+endif
321+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
322+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
323+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
324+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
325+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
326+ifeq ($(KBUILD_EXTMOD),)
327+gcc-plugins:
328+ $(Q)$(MAKE) $(build)=tools/gcc
329+else
330+gcc-plugins: ;
331+endif
332+else
333+gcc-plugins:
334+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
335+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
336+else
337+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
338+endif
339+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
340+endif
341+endif
342+
343 include $(srctree)/arch/$(SRCARCH)/Makefile
344
345 ifdef CONFIG_READABLE_ASM
346@@ -731,7 +788,7 @@ export mod_sign_cmd
347
348
349 ifeq ($(KBUILD_EXTMOD),)
350-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
351+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
352
353 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
354 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
355@@ -778,6 +835,8 @@ endif
356
357 # The actual objects are generated when descending,
358 # make sure no implicit rule kicks in
359+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
360+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
361 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
362
363 # Handle descending into subdirectories listed in $(vmlinux-dirs)
364@@ -787,7 +846,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
365 # Error messages still appears in the original language
366
367 PHONY += $(vmlinux-dirs)
368-$(vmlinux-dirs): prepare scripts
369+$(vmlinux-dirs): gcc-plugins prepare scripts
370 $(Q)$(MAKE) $(build)=$@
371
372 # Store (new) KERNELRELASE string in include/config/kernel.release
373@@ -831,6 +890,7 @@ prepare0: archprepare FORCE
374 $(Q)$(MAKE) $(build)=.
375
376 # All the preparing..
377+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
378 prepare: prepare0
379
380 # Generate some files
381@@ -938,6 +998,8 @@ all: modules
382 # using awk while concatenating to the final file.
383
384 PHONY += modules
385+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
386+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
387 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
388 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
389 @$(kecho) ' Building modules, stage 2.';
390@@ -953,7 +1015,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
391
392 # Target to prepare building external modules
393 PHONY += modules_prepare
394-modules_prepare: prepare scripts
395+modules_prepare: gcc-plugins prepare scripts
396
397 # Target to install modules
398 PHONY += modules_install
399@@ -1019,7 +1081,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
400 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
401 signing_key.priv signing_key.x509 x509.genkey \
402 extra_certificates signing_key.x509.keyid \
403- signing_key.x509.signer
404+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
405
406 # clean - Delete most, but leave enough to build external modules
407 #
408@@ -1059,6 +1121,7 @@ distclean: mrproper
409 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
410 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
411 -o -name '.*.rej' \
412+ -o -name '.*.rej' -o -name '*.so' \
413 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
414 -type f -print | xargs rm -f
415
416@@ -1219,6 +1282,8 @@ PHONY += $(module-dirs) modules
417 $(module-dirs): crmodverdir $(objtree)/Module.symvers
418 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
419
420+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
421+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
422 modules: $(module-dirs)
423 @$(kecho) ' Building modules, stage 2.';
424 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
425@@ -1355,17 +1420,21 @@ else
426 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
427 endif
428
429-%.s: %.c prepare scripts FORCE
430+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
431+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
432+%.s: %.c gcc-plugins prepare scripts FORCE
433 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
434 %.i: %.c prepare scripts FORCE
435 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
436-%.o: %.c prepare scripts FORCE
437+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
438+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
439+%.o: %.c gcc-plugins prepare scripts FORCE
440 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
441 %.lst: %.c prepare scripts FORCE
442 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
443-%.s: %.S prepare scripts FORCE
444+%.s: %.S gcc-plugins prepare scripts FORCE
445 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
446-%.o: %.S prepare scripts FORCE
447+%.o: %.S gcc-plugins prepare scripts FORCE
448 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
449 %.symtypes: %.c prepare scripts FORCE
450 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
451@@ -1375,11 +1444,15 @@ endif
452 $(cmd_crmodverdir)
453 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
454 $(build)=$(build-dir)
455-%/: prepare scripts FORCE
456+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
457+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
458+%/: gcc-plugins prepare scripts FORCE
459 $(cmd_crmodverdir)
460 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
461 $(build)=$(build-dir)
462-%.ko: prepare scripts FORCE
463+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
464+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
465+%.ko: gcc-plugins prepare scripts FORCE
466 $(cmd_crmodverdir)
467 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
468 $(build)=$(build-dir) $(@:.ko=.o)
469diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
470index c2cbe4f..f7264b4 100644
471--- a/arch/alpha/include/asm/atomic.h
472+++ b/arch/alpha/include/asm/atomic.h
473@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
474 #define atomic_dec(v) atomic_sub(1,(v))
475 #define atomic64_dec(v) atomic64_sub(1,(v))
476
477+#define atomic64_read_unchecked(v) atomic64_read(v)
478+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
479+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
480+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
481+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
482+#define atomic64_inc_unchecked(v) atomic64_inc(v)
483+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
484+#define atomic64_dec_unchecked(v) atomic64_dec(v)
485+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
486+
487 #define smp_mb__before_atomic_dec() smp_mb()
488 #define smp_mb__after_atomic_dec() smp_mb()
489 #define smp_mb__before_atomic_inc() smp_mb()
490diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
491index ad368a9..fbe0f25 100644
492--- a/arch/alpha/include/asm/cache.h
493+++ b/arch/alpha/include/asm/cache.h
494@@ -4,19 +4,19 @@
495 #ifndef __ARCH_ALPHA_CACHE_H
496 #define __ARCH_ALPHA_CACHE_H
497
498+#include <linux/const.h>
499
500 /* Bytes per L1 (data) cache line. */
501 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
502-# define L1_CACHE_BYTES 64
503 # define L1_CACHE_SHIFT 6
504 #else
505 /* Both EV4 and EV5 are write-through, read-allocate,
506 direct-mapped, physical.
507 */
508-# define L1_CACHE_BYTES 32
509 # define L1_CACHE_SHIFT 5
510 #endif
511
512+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
513 #define SMP_CACHE_BYTES L1_CACHE_BYTES
514
515 #endif
516diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
517index 968d999..d36b2df 100644
518--- a/arch/alpha/include/asm/elf.h
519+++ b/arch/alpha/include/asm/elf.h
520@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
521
522 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
523
524+#ifdef CONFIG_PAX_ASLR
525+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
526+
527+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
528+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
529+#endif
530+
531 /* $0 is set by ld.so to a pointer to a function which might be
532 registered using atexit. This provides a mean for the dynamic
533 linker to call DT_FINI functions for shared libraries that have
534diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
535index bc2a0da..8ad11ee 100644
536--- a/arch/alpha/include/asm/pgalloc.h
537+++ b/arch/alpha/include/asm/pgalloc.h
538@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
539 pgd_set(pgd, pmd);
540 }
541
542+static inline void
543+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
544+{
545+ pgd_populate(mm, pgd, pmd);
546+}
547+
548 extern pgd_t *pgd_alloc(struct mm_struct *mm);
549
550 static inline void
551diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
552index 81a4342..348b927 100644
553--- a/arch/alpha/include/asm/pgtable.h
554+++ b/arch/alpha/include/asm/pgtable.h
555@@ -102,6 +102,17 @@ struct vm_area_struct;
556 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
557 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
558 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
559+
560+#ifdef CONFIG_PAX_PAGEEXEC
561+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
562+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
563+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
564+#else
565+# define PAGE_SHARED_NOEXEC PAGE_SHARED
566+# define PAGE_COPY_NOEXEC PAGE_COPY
567+# define PAGE_READONLY_NOEXEC PAGE_READONLY
568+#endif
569+
570 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
571
572 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
573diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
574index 2fd00b7..cfd5069 100644
575--- a/arch/alpha/kernel/module.c
576+++ b/arch/alpha/kernel/module.c
577@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
578
579 /* The small sections were sorted to the end of the segment.
580 The following should definitely cover them. */
581- gp = (u64)me->module_core + me->core_size - 0x8000;
582+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
583 got = sechdrs[me->arch.gotsecindex].sh_addr;
584
585 for (i = 0; i < n; i++) {
586diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
587index 14db93e..47bed62 100644
588--- a/arch/alpha/kernel/osf_sys.c
589+++ b/arch/alpha/kernel/osf_sys.c
590@@ -1295,16 +1295,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
591 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
592
593 static unsigned long
594-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
595- unsigned long limit)
596+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
597+ unsigned long limit, unsigned long flags)
598 {
599 struct vm_area_struct *vma = find_vma(current->mm, addr);
600-
601+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
602 while (1) {
603 /* At this point: (!vma || addr < vma->vm_end). */
604 if (limit - len < addr)
605 return -ENOMEM;
606- if (!vma || addr + len <= vma->vm_start)
607+ if (check_heap_stack_gap(vma, addr, len, offset))
608 return addr;
609 addr = vma->vm_end;
610 vma = vma->vm_next;
611@@ -1340,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
612 merely specific addresses, but regions of memory -- perhaps
613 this feature should be incorporated into all ports? */
614
615+#ifdef CONFIG_PAX_RANDMMAP
616+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
617+#endif
618+
619 if (addr) {
620- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
621+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
622 if (addr != (unsigned long) -ENOMEM)
623 return addr;
624 }
625
626 /* Next, try allocating at TASK_UNMAPPED_BASE. */
627- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
628- len, limit);
629+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
630+
631 if (addr != (unsigned long) -ENOMEM)
632 return addr;
633
634 /* Finally, try allocating in low memory. */
635- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
636+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
637
638 return addr;
639 }
640diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
641index 0c4132d..88f0d53 100644
642--- a/arch/alpha/mm/fault.c
643+++ b/arch/alpha/mm/fault.c
644@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
645 __reload_thread(pcb);
646 }
647
648+#ifdef CONFIG_PAX_PAGEEXEC
649+/*
650+ * PaX: decide what to do with offenders (regs->pc = fault address)
651+ *
652+ * returns 1 when task should be killed
653+ * 2 when patched PLT trampoline was detected
654+ * 3 when unpatched PLT trampoline was detected
655+ */
656+static int pax_handle_fetch_fault(struct pt_regs *regs)
657+{
658+
659+#ifdef CONFIG_PAX_EMUPLT
660+ int err;
661+
662+ do { /* PaX: patched PLT emulation #1 */
663+ unsigned int ldah, ldq, jmp;
664+
665+ err = get_user(ldah, (unsigned int *)regs->pc);
666+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
667+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
668+
669+ if (err)
670+ break;
671+
672+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
673+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
674+ jmp == 0x6BFB0000U)
675+ {
676+ unsigned long r27, addr;
677+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
678+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
679+
680+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
681+ err = get_user(r27, (unsigned long *)addr);
682+ if (err)
683+ break;
684+
685+ regs->r27 = r27;
686+ regs->pc = r27;
687+ return 2;
688+ }
689+ } while (0);
690+
691+ do { /* PaX: patched PLT emulation #2 */
692+ unsigned int ldah, lda, br;
693+
694+ err = get_user(ldah, (unsigned int *)regs->pc);
695+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
696+ err |= get_user(br, (unsigned int *)(regs->pc+8));
697+
698+ if (err)
699+ break;
700+
701+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
702+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
703+ (br & 0xFFE00000U) == 0xC3E00000U)
704+ {
705+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
706+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
707+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
708+
709+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
710+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
711+ return 2;
712+ }
713+ } while (0);
714+
715+ do { /* PaX: unpatched PLT emulation */
716+ unsigned int br;
717+
718+ err = get_user(br, (unsigned int *)regs->pc);
719+
720+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
721+ unsigned int br2, ldq, nop, jmp;
722+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
723+
724+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
725+ err = get_user(br2, (unsigned int *)addr);
726+ err |= get_user(ldq, (unsigned int *)(addr+4));
727+ err |= get_user(nop, (unsigned int *)(addr+8));
728+ err |= get_user(jmp, (unsigned int *)(addr+12));
729+ err |= get_user(resolver, (unsigned long *)(addr+16));
730+
731+ if (err)
732+ break;
733+
734+ if (br2 == 0xC3600000U &&
735+ ldq == 0xA77B000CU &&
736+ nop == 0x47FF041FU &&
737+ jmp == 0x6B7B0000U)
738+ {
739+ regs->r28 = regs->pc+4;
740+ regs->r27 = addr+16;
741+ regs->pc = resolver;
742+ return 3;
743+ }
744+ }
745+ } while (0);
746+#endif
747+
748+ return 1;
749+}
750+
751+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
752+{
753+ unsigned long i;
754+
755+ printk(KERN_ERR "PAX: bytes at PC: ");
756+ for (i = 0; i < 5; i++) {
757+ unsigned int c;
758+ if (get_user(c, (unsigned int *)pc+i))
759+ printk(KERN_CONT "???????? ");
760+ else
761+ printk(KERN_CONT "%08x ", c);
762+ }
763+ printk("\n");
764+}
765+#endif
766
767 /*
768 * This routine handles page faults. It determines the address,
769@@ -133,8 +251,29 @@ retry:
770 good_area:
771 si_code = SEGV_ACCERR;
772 if (cause < 0) {
773- if (!(vma->vm_flags & VM_EXEC))
774+ if (!(vma->vm_flags & VM_EXEC)) {
775+
776+#ifdef CONFIG_PAX_PAGEEXEC
777+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
778+ goto bad_area;
779+
780+ up_read(&mm->mmap_sem);
781+ switch (pax_handle_fetch_fault(regs)) {
782+
783+#ifdef CONFIG_PAX_EMUPLT
784+ case 2:
785+ case 3:
786+ return;
787+#endif
788+
789+ }
790+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
791+ do_group_exit(SIGKILL);
792+#else
793 goto bad_area;
794+#endif
795+
796+ }
797 } else if (!cause) {
798 /* Allow reads even for write-only mappings */
799 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
800diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
801index 67874b8..0e40765 100644
802--- a/arch/arm/Kconfig
803+++ b/arch/arm/Kconfig
804@@ -1813,7 +1813,7 @@ config ALIGNMENT_TRAP
805
806 config UACCESS_WITH_MEMCPY
807 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
808- depends on MMU
809+ depends on MMU && !PAX_MEMORY_UDEREF
810 default y if CPU_FEROCEON
811 help
812 Implement faster copy_to_user and clear_user methods for CPU
813diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
814index 87dfa902..3a523fc 100644
815--- a/arch/arm/common/gic.c
816+++ b/arch/arm/common/gic.c
817@@ -81,7 +81,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
818 * Supported arch specific GIC irq extension.
819 * Default make them NULL.
820 */
821-struct irq_chip gic_arch_extn = {
822+irq_chip_no_const gic_arch_extn __read_only = {
823 .irq_eoi = NULL,
824 .irq_mask = NULL,
825 .irq_unmask = NULL,
826@@ -329,7 +329,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
827 chained_irq_exit(chip, desc);
828 }
829
830-static struct irq_chip gic_chip = {
831+static irq_chip_no_const gic_chip __read_only = {
832 .name = "GIC",
833 .irq_mask = gic_mask_irq,
834 .irq_unmask = gic_unmask_irq,
835diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
836index c79f61f..9ac0642 100644
837--- a/arch/arm/include/asm/atomic.h
838+++ b/arch/arm/include/asm/atomic.h
839@@ -17,17 +17,35 @@
840 #include <asm/barrier.h>
841 #include <asm/cmpxchg.h>
842
843+#ifdef CONFIG_GENERIC_ATOMIC64
844+#include <asm-generic/atomic64.h>
845+#endif
846+
847 #define ATOMIC_INIT(i) { (i) }
848
849 #ifdef __KERNEL__
850
851+#define _ASM_EXTABLE(from, to) \
852+" .pushsection __ex_table,\"a\"\n"\
853+" .align 3\n" \
854+" .long " #from ", " #to"\n" \
855+" .popsection"
856+
857 /*
858 * On ARM, ordinary assignment (str instruction) doesn't clear the local
859 * strex/ldrex monitor on some implementations. The reason we can use it for
860 * atomic_set() is the clrex or dummy strex done on every exception return.
861 */
862 #define atomic_read(v) (*(volatile int *)&(v)->counter)
863+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
864+{
865+ return v->counter;
866+}
867 #define atomic_set(v,i) (((v)->counter) = (i))
868+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
869+{
870+ v->counter = i;
871+}
872
873 #if __LINUX_ARM_ARCH__ >= 6
874
875@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
876 int result;
877
878 __asm__ __volatile__("@ atomic_add\n"
879+"1: ldrex %1, [%3]\n"
880+" adds %0, %1, %4\n"
881+
882+#ifdef CONFIG_PAX_REFCOUNT
883+" bvc 3f\n"
884+"2: bkpt 0xf103\n"
885+"3:\n"
886+#endif
887+
888+" strex %1, %0, [%3]\n"
889+" teq %1, #0\n"
890+" bne 1b"
891+
892+#ifdef CONFIG_PAX_REFCOUNT
893+"\n4:\n"
894+ _ASM_EXTABLE(2b, 4b)
895+#endif
896+
897+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
898+ : "r" (&v->counter), "Ir" (i)
899+ : "cc");
900+}
901+
902+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
903+{
904+ unsigned long tmp;
905+ int result;
906+
907+ __asm__ __volatile__("@ atomic_add_unchecked\n"
908 "1: ldrex %0, [%3]\n"
909 " add %0, %0, %4\n"
910 " strex %1, %0, [%3]\n"
911@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
912 smp_mb();
913
914 __asm__ __volatile__("@ atomic_add_return\n"
915+"1: ldrex %1, [%3]\n"
916+" adds %0, %1, %4\n"
917+
918+#ifdef CONFIG_PAX_REFCOUNT
919+" bvc 3f\n"
920+" mov %0, %1\n"
921+"2: bkpt 0xf103\n"
922+"3:\n"
923+#endif
924+
925+" strex %1, %0, [%3]\n"
926+" teq %1, #0\n"
927+" bne 1b"
928+
929+#ifdef CONFIG_PAX_REFCOUNT
930+"\n4:\n"
931+ _ASM_EXTABLE(2b, 4b)
932+#endif
933+
934+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
935+ : "r" (&v->counter), "Ir" (i)
936+ : "cc");
937+
938+ smp_mb();
939+
940+ return result;
941+}
942+
943+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
944+{
945+ unsigned long tmp;
946+ int result;
947+
948+ smp_mb();
949+
950+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
951 "1: ldrex %0, [%3]\n"
952 " add %0, %0, %4\n"
953 " strex %1, %0, [%3]\n"
954@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
955 int result;
956
957 __asm__ __volatile__("@ atomic_sub\n"
958+"1: ldrex %1, [%3]\n"
959+" subs %0, %1, %4\n"
960+
961+#ifdef CONFIG_PAX_REFCOUNT
962+" bvc 3f\n"
963+"2: bkpt 0xf103\n"
964+"3:\n"
965+#endif
966+
967+" strex %1, %0, [%3]\n"
968+" teq %1, #0\n"
969+" bne 1b"
970+
971+#ifdef CONFIG_PAX_REFCOUNT
972+"\n4:\n"
973+ _ASM_EXTABLE(2b, 4b)
974+#endif
975+
976+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
977+ : "r" (&v->counter), "Ir" (i)
978+ : "cc");
979+}
980+
981+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
982+{
983+ unsigned long tmp;
984+ int result;
985+
986+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
987 "1: ldrex %0, [%3]\n"
988 " sub %0, %0, %4\n"
989 " strex %1, %0, [%3]\n"
990@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
991 smp_mb();
992
993 __asm__ __volatile__("@ atomic_sub_return\n"
994-"1: ldrex %0, [%3]\n"
995-" sub %0, %0, %4\n"
996+"1: ldrex %1, [%3]\n"
997+" subs %0, %1, %4\n"
998+
999+#ifdef CONFIG_PAX_REFCOUNT
1000+" bvc 3f\n"
1001+" mov %0, %1\n"
1002+"2: bkpt 0xf103\n"
1003+"3:\n"
1004+#endif
1005+
1006 " strex %1, %0, [%3]\n"
1007 " teq %1, #0\n"
1008 " bne 1b"
1009+
1010+#ifdef CONFIG_PAX_REFCOUNT
1011+"\n4:\n"
1012+ _ASM_EXTABLE(2b, 4b)
1013+#endif
1014+
1015 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1016 : "r" (&v->counter), "Ir" (i)
1017 : "cc");
1018@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1019 return oldval;
1020 }
1021
1022+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1023+{
1024+ unsigned long oldval, res;
1025+
1026+ smp_mb();
1027+
1028+ do {
1029+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1030+ "ldrex %1, [%3]\n"
1031+ "mov %0, #0\n"
1032+ "teq %1, %4\n"
1033+ "strexeq %0, %5, [%3]\n"
1034+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1035+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1036+ : "cc");
1037+ } while (res);
1038+
1039+ smp_mb();
1040+
1041+ return oldval;
1042+}
1043+
1044 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1045 {
1046 unsigned long tmp, tmp2;
1047@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1048
1049 return val;
1050 }
1051+
1052+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1053+{
1054+ return atomic_add_return(i, v);
1055+}
1056+
1057 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1058+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1059+{
1060+ (void) atomic_add_return(i, v);
1061+}
1062
1063 static inline int atomic_sub_return(int i, atomic_t *v)
1064 {
1065@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1066 return val;
1067 }
1068 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1069+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1070+{
1071+ (void) atomic_sub_return(i, v);
1072+}
1073
1074 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1075 {
1076@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1077 return ret;
1078 }
1079
1080+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1081+{
1082+ return atomic_cmpxchg(v, old, new);
1083+}
1084+
1085 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1086 {
1087 unsigned long flags;
1088@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1089 #endif /* __LINUX_ARM_ARCH__ */
1090
1091 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1092+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1093+{
1094+ return xchg(&v->counter, new);
1095+}
1096
1097 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1098 {
1099@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1100 }
1101
1102 #define atomic_inc(v) atomic_add(1, v)
1103+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1104+{
1105+ atomic_add_unchecked(1, v);
1106+}
1107 #define atomic_dec(v) atomic_sub(1, v)
1108+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1109+{
1110+ atomic_sub_unchecked(1, v);
1111+}
1112
1113 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1114+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1115+{
1116+ return atomic_add_return_unchecked(1, v) == 0;
1117+}
1118 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1119 #define atomic_inc_return(v) (atomic_add_return(1, v))
1120+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1121+{
1122+ return atomic_add_return_unchecked(1, v);
1123+}
1124 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1125 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1126
1127@@ -241,6 +428,14 @@ typedef struct {
1128 u64 __aligned(8) counter;
1129 } atomic64_t;
1130
1131+#ifdef CONFIG_PAX_REFCOUNT
1132+typedef struct {
1133+ u64 __aligned(8) counter;
1134+} atomic64_unchecked_t;
1135+#else
1136+typedef atomic64_t atomic64_unchecked_t;
1137+#endif
1138+
1139 #define ATOMIC64_INIT(i) { (i) }
1140
1141 static inline u64 atomic64_read(const atomic64_t *v)
1142@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1143 return result;
1144 }
1145
1146+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1147+{
1148+ u64 result;
1149+
1150+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1151+" ldrexd %0, %H0, [%1]"
1152+ : "=&r" (result)
1153+ : "r" (&v->counter), "Qo" (v->counter)
1154+ );
1155+
1156+ return result;
1157+}
1158+
1159 static inline void atomic64_set(atomic64_t *v, u64 i)
1160 {
1161 u64 tmp;
1162@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1163 : "cc");
1164 }
1165
1166+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1167+{
1168+ u64 tmp;
1169+
1170+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1171+"1: ldrexd %0, %H0, [%2]\n"
1172+" strexd %0, %3, %H3, [%2]\n"
1173+" teq %0, #0\n"
1174+" bne 1b"
1175+ : "=&r" (tmp), "=Qo" (v->counter)
1176+ : "r" (&v->counter), "r" (i)
1177+ : "cc");
1178+}
1179+
1180 static inline void atomic64_add(u64 i, atomic64_t *v)
1181 {
1182 u64 result;
1183@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1184 __asm__ __volatile__("@ atomic64_add\n"
1185 "1: ldrexd %0, %H0, [%3]\n"
1186 " adds %0, %0, %4\n"
1187+" adcs %H0, %H0, %H4\n"
1188+
1189+#ifdef CONFIG_PAX_REFCOUNT
1190+" bvc 3f\n"
1191+"2: bkpt 0xf103\n"
1192+"3:\n"
1193+#endif
1194+
1195+" strexd %1, %0, %H0, [%3]\n"
1196+" teq %1, #0\n"
1197+" bne 1b"
1198+
1199+#ifdef CONFIG_PAX_REFCOUNT
1200+"\n4:\n"
1201+ _ASM_EXTABLE(2b, 4b)
1202+#endif
1203+
1204+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1205+ : "r" (&v->counter), "r" (i)
1206+ : "cc");
1207+}
1208+
1209+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1210+{
1211+ u64 result;
1212+ unsigned long tmp;
1213+
1214+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1215+"1: ldrexd %0, %H0, [%3]\n"
1216+" adds %0, %0, %4\n"
1217 " adc %H0, %H0, %H4\n"
1218 " strexd %1, %0, %H0, [%3]\n"
1219 " teq %1, #0\n"
1220@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1221
1222 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1223 {
1224- u64 result;
1225- unsigned long tmp;
1226+ u64 result, tmp;
1227
1228 smp_mb();
1229
1230 __asm__ __volatile__("@ atomic64_add_return\n"
1231+"1: ldrexd %1, %H1, [%3]\n"
1232+" adds %0, %1, %4\n"
1233+" adcs %H0, %H1, %H4\n"
1234+
1235+#ifdef CONFIG_PAX_REFCOUNT
1236+" bvc 3f\n"
1237+" mov %0, %1\n"
1238+" mov %H0, %H1\n"
1239+"2: bkpt 0xf103\n"
1240+"3:\n"
1241+#endif
1242+
1243+" strexd %1, %0, %H0, [%3]\n"
1244+" teq %1, #0\n"
1245+" bne 1b"
1246+
1247+#ifdef CONFIG_PAX_REFCOUNT
1248+"\n4:\n"
1249+ _ASM_EXTABLE(2b, 4b)
1250+#endif
1251+
1252+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1253+ : "r" (&v->counter), "r" (i)
1254+ : "cc");
1255+
1256+ smp_mb();
1257+
1258+ return result;
1259+}
1260+
1261+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1262+{
1263+ u64 result;
1264+ unsigned long tmp;
1265+
1266+ smp_mb();
1267+
1268+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1269 "1: ldrexd %0, %H0, [%3]\n"
1270 " adds %0, %0, %4\n"
1271 " adc %H0, %H0, %H4\n"
1272@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1273 __asm__ __volatile__("@ atomic64_sub\n"
1274 "1: ldrexd %0, %H0, [%3]\n"
1275 " subs %0, %0, %4\n"
1276+" sbcs %H0, %H0, %H4\n"
1277+
1278+#ifdef CONFIG_PAX_REFCOUNT
1279+" bvc 3f\n"
1280+"2: bkpt 0xf103\n"
1281+"3:\n"
1282+#endif
1283+
1284+" strexd %1, %0, %H0, [%3]\n"
1285+" teq %1, #0\n"
1286+" bne 1b"
1287+
1288+#ifdef CONFIG_PAX_REFCOUNT
1289+"\n4:\n"
1290+ _ASM_EXTABLE(2b, 4b)
1291+#endif
1292+
1293+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1294+ : "r" (&v->counter), "r" (i)
1295+ : "cc");
1296+}
1297+
1298+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1299+{
1300+ u64 result;
1301+ unsigned long tmp;
1302+
1303+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1304+"1: ldrexd %0, %H0, [%3]\n"
1305+" subs %0, %0, %4\n"
1306 " sbc %H0, %H0, %H4\n"
1307 " strexd %1, %0, %H0, [%3]\n"
1308 " teq %1, #0\n"
1309@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1310
1311 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1312 {
1313- u64 result;
1314- unsigned long tmp;
1315+ u64 result, tmp;
1316
1317 smp_mb();
1318
1319 __asm__ __volatile__("@ atomic64_sub_return\n"
1320-"1: ldrexd %0, %H0, [%3]\n"
1321-" subs %0, %0, %4\n"
1322-" sbc %H0, %H0, %H4\n"
1323+"1: ldrexd %1, %H1, [%3]\n"
1324+" subs %0, %1, %4\n"
1325+" sbcs %H0, %H1, %H4\n"
1326+
1327+#ifdef CONFIG_PAX_REFCOUNT
1328+" bvc 3f\n"
1329+" mov %0, %1\n"
1330+" mov %H0, %H1\n"
1331+"2: bkpt 0xf103\n"
1332+"3:\n"
1333+#endif
1334+
1335 " strexd %1, %0, %H0, [%3]\n"
1336 " teq %1, #0\n"
1337 " bne 1b"
1338+
1339+#ifdef CONFIG_PAX_REFCOUNT
1340+"\n4:\n"
1341+ _ASM_EXTABLE(2b, 4b)
1342+#endif
1343+
1344 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1345 : "r" (&v->counter), "r" (i)
1346 : "cc");
1347@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1348 return oldval;
1349 }
1350
1351+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1352+{
1353+ u64 oldval;
1354+ unsigned long res;
1355+
1356+ smp_mb();
1357+
1358+ do {
1359+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1360+ "ldrexd %1, %H1, [%3]\n"
1361+ "mov %0, #0\n"
1362+ "teq %1, %4\n"
1363+ "teqeq %H1, %H4\n"
1364+ "strexdeq %0, %5, %H5, [%3]"
1365+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1366+ : "r" (&ptr->counter), "r" (old), "r" (new)
1367+ : "cc");
1368+ } while (res);
1369+
1370+ smp_mb();
1371+
1372+ return oldval;
1373+}
1374+
1375 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1376 {
1377 u64 result;
1378@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1379
1380 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1381 {
1382- u64 result;
1383- unsigned long tmp;
1384+ u64 result, tmp;
1385
1386 smp_mb();
1387
1388 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1389-"1: ldrexd %0, %H0, [%3]\n"
1390-" subs %0, %0, #1\n"
1391-" sbc %H0, %H0, #0\n"
1392+"1: ldrexd %1, %H1, [%3]\n"
1393+" subs %0, %1, #1\n"
1394+" sbcs %H0, %H1, #0\n"
1395+
1396+#ifdef CONFIG_PAX_REFCOUNT
1397+" bvc 3f\n"
1398+" mov %0, %1\n"
1399+" mov %H0, %H1\n"
1400+"2: bkpt 0xf103\n"
1401+"3:\n"
1402+#endif
1403+
1404 " teq %H0, #0\n"
1405-" bmi 2f\n"
1406+" bmi 4f\n"
1407 " strexd %1, %0, %H0, [%3]\n"
1408 " teq %1, #0\n"
1409 " bne 1b\n"
1410-"2:"
1411+"4:\n"
1412+
1413+#ifdef CONFIG_PAX_REFCOUNT
1414+ _ASM_EXTABLE(2b, 4b)
1415+#endif
1416+
1417 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1418 : "r" (&v->counter)
1419 : "cc");
1420@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1421 " teq %0, %5\n"
1422 " teqeq %H0, %H5\n"
1423 " moveq %1, #0\n"
1424-" beq 2f\n"
1425+" beq 4f\n"
1426 " adds %0, %0, %6\n"
1427-" adc %H0, %H0, %H6\n"
1428+" adcs %H0, %H0, %H6\n"
1429+
1430+#ifdef CONFIG_PAX_REFCOUNT
1431+" bvc 3f\n"
1432+"2: bkpt 0xf103\n"
1433+"3:\n"
1434+#endif
1435+
1436 " strexd %2, %0, %H0, [%4]\n"
1437 " teq %2, #0\n"
1438 " bne 1b\n"
1439-"2:"
1440+"4:\n"
1441+
1442+#ifdef CONFIG_PAX_REFCOUNT
1443+ _ASM_EXTABLE(2b, 4b)
1444+#endif
1445+
1446 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1447 : "r" (&v->counter), "r" (u), "r" (a)
1448 : "cc");
1449@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1450
1451 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1452 #define atomic64_inc(v) atomic64_add(1LL, (v))
1453+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1454 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1455+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1456 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1457 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1458 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1459+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1460 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1461 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1462 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1463diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1464index 75fe66b..ba3dee4 100644
1465--- a/arch/arm/include/asm/cache.h
1466+++ b/arch/arm/include/asm/cache.h
1467@@ -4,8 +4,10 @@
1468 #ifndef __ASMARM_CACHE_H
1469 #define __ASMARM_CACHE_H
1470
1471+#include <linux/const.h>
1472+
1473 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1474-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1475+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1476
1477 /*
1478 * Memory returned by kmalloc() may be used for DMA, so we must make
1479@@ -24,5 +26,6 @@
1480 #endif
1481
1482 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1483+#define __read_only __attribute__ ((__section__(".data..read_only")))
1484
1485 #endif
1486diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1487index e1489c5..d418304 100644
1488--- a/arch/arm/include/asm/cacheflush.h
1489+++ b/arch/arm/include/asm/cacheflush.h
1490@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1491 void (*dma_unmap_area)(const void *, size_t, int);
1492
1493 void (*dma_flush_range)(const void *, const void *);
1494-};
1495+} __no_const;
1496
1497 /*
1498 * Select the calling method
1499diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1500index 6dcc164..b14d917 100644
1501--- a/arch/arm/include/asm/checksum.h
1502+++ b/arch/arm/include/asm/checksum.h
1503@@ -37,7 +37,19 @@ __wsum
1504 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1505
1506 __wsum
1507-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1508+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1509+
1510+static inline __wsum
1511+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1512+{
1513+ __wsum ret;
1514+ pax_open_userland();
1515+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1516+ pax_close_userland();
1517+ return ret;
1518+}
1519+
1520+
1521
1522 /*
1523 * Fold a partial checksum without adding pseudo headers
1524diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1525index 7eb18c1..e38b6d2 100644
1526--- a/arch/arm/include/asm/cmpxchg.h
1527+++ b/arch/arm/include/asm/cmpxchg.h
1528@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1529
1530 #define xchg(ptr,x) \
1531 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1532+#define xchg_unchecked(ptr,x) \
1533+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1534
1535 #include <asm-generic/cmpxchg-local.h>
1536
1537diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
1538index ab98fdd..6b19938 100644
1539--- a/arch/arm/include/asm/delay.h
1540+++ b/arch/arm/include/asm/delay.h
1541@@ -24,9 +24,9 @@ extern struct arm_delay_ops {
1542 void (*delay)(unsigned long);
1543 void (*const_udelay)(unsigned long);
1544 void (*udelay)(unsigned long);
1545-} arm_delay_ops;
1546+} *arm_delay_ops;
1547
1548-#define __delay(n) arm_delay_ops.delay(n)
1549+#define __delay(n) arm_delay_ops->delay(n)
1550
1551 /*
1552 * This function intentionally does not exist; if you see references to
1553@@ -47,8 +47,8 @@ extern void __bad_udelay(void);
1554 * first constant multiplications gets optimized away if the delay is
1555 * a constant)
1556 */
1557-#define __udelay(n) arm_delay_ops.udelay(n)
1558-#define __const_udelay(n) arm_delay_ops.const_udelay(n)
1559+#define __udelay(n) arm_delay_ops->udelay(n)
1560+#define __const_udelay(n) arm_delay_ops->const_udelay(n)
1561
1562 #define udelay(n) \
1563 (__builtin_constant_p(n) ? \
1564diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1565index 6ddbe44..b5e38b1 100644
1566--- a/arch/arm/include/asm/domain.h
1567+++ b/arch/arm/include/asm/domain.h
1568@@ -48,18 +48,37 @@
1569 * Domain types
1570 */
1571 #define DOMAIN_NOACCESS 0
1572-#define DOMAIN_CLIENT 1
1573 #ifdef CONFIG_CPU_USE_DOMAINS
1574+#define DOMAIN_USERCLIENT 1
1575+#define DOMAIN_KERNELCLIENT 1
1576 #define DOMAIN_MANAGER 3
1577+#define DOMAIN_VECTORS DOMAIN_USER
1578 #else
1579+
1580+#ifdef CONFIG_PAX_KERNEXEC
1581 #define DOMAIN_MANAGER 1
1582+#define DOMAIN_KERNEXEC 3
1583+#else
1584+#define DOMAIN_MANAGER 1
1585+#endif
1586+
1587+#ifdef CONFIG_PAX_MEMORY_UDEREF
1588+#define DOMAIN_USERCLIENT 0
1589+#define DOMAIN_UDEREF 1
1590+#define DOMAIN_VECTORS DOMAIN_KERNEL
1591+#else
1592+#define DOMAIN_USERCLIENT 1
1593+#define DOMAIN_VECTORS DOMAIN_USER
1594+#endif
1595+#define DOMAIN_KERNELCLIENT 1
1596+
1597 #endif
1598
1599 #define domain_val(dom,type) ((type) << (2*(dom)))
1600
1601 #ifndef __ASSEMBLY__
1602
1603-#ifdef CONFIG_CPU_USE_DOMAINS
1604+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1605 static inline void set_domain(unsigned val)
1606 {
1607 asm volatile(
1608@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1609 isb();
1610 }
1611
1612-#define modify_domain(dom,type) \
1613- do { \
1614- struct thread_info *thread = current_thread_info(); \
1615- unsigned int domain = thread->cpu_domain; \
1616- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1617- thread->cpu_domain = domain | domain_val(dom, type); \
1618- set_domain(thread->cpu_domain); \
1619- } while (0)
1620-
1621+extern void modify_domain(unsigned int dom, unsigned int type);
1622 #else
1623 static inline void set_domain(unsigned val) { }
1624 static inline void modify_domain(unsigned dom, unsigned type) { }
1625diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1626index 38050b1..9d90e8b 100644
1627--- a/arch/arm/include/asm/elf.h
1628+++ b/arch/arm/include/asm/elf.h
1629@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1630 the loader. We need to make sure that it is out of the way of the program
1631 that it will "exec", and that there is sufficient room for the brk. */
1632
1633-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1634+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1635+
1636+#ifdef CONFIG_PAX_ASLR
1637+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1638+
1639+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1640+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1641+#endif
1642
1643 /* When the program starts, a1 contains a pointer to a function to be
1644 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1645@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1646 extern void elf_set_personality(const struct elf32_hdr *);
1647 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1648
1649-struct mm_struct;
1650-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1651-#define arch_randomize_brk arch_randomize_brk
1652-
1653 #endif
1654diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1655index de53547..52b9a28 100644
1656--- a/arch/arm/include/asm/fncpy.h
1657+++ b/arch/arm/include/asm/fncpy.h
1658@@ -81,7 +81,9 @@
1659 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1660 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1661 \
1662+ pax_open_kernel(); \
1663 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1664+ pax_close_kernel(); \
1665 flush_icache_range((unsigned long)(dest_buf), \
1666 (unsigned long)(dest_buf) + (size)); \
1667 \
1668diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1669index e42cf59..7b94b8f 100644
1670--- a/arch/arm/include/asm/futex.h
1671+++ b/arch/arm/include/asm/futex.h
1672@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1673 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1674 return -EFAULT;
1675
1676+ pax_open_userland();
1677+
1678 smp_mb();
1679 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1680 "1: ldrex %1, [%4]\n"
1681@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1682 : "cc", "memory");
1683 smp_mb();
1684
1685+ pax_close_userland();
1686+
1687 *uval = val;
1688 return ret;
1689 }
1690@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1691 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1692 return -EFAULT;
1693
1694+ pax_open_userland();
1695+
1696 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1697 "1: " TUSER(ldr) " %1, [%4]\n"
1698 " teq %1, %2\n"
1699@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1700 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1701 : "cc", "memory");
1702
1703+ pax_close_userland();
1704+
1705 *uval = val;
1706 return ret;
1707 }
1708@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1709 return -EFAULT;
1710
1711 pagefault_disable(); /* implies preempt_disable() */
1712+ pax_open_userland();
1713
1714 switch (op) {
1715 case FUTEX_OP_SET:
1716@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1717 ret = -ENOSYS;
1718 }
1719
1720+ pax_close_userland();
1721 pagefault_enable(); /* subsumes preempt_enable() */
1722
1723 if (!ret) {
1724diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
1725index 4b1ce6c..bea3f73 100644
1726--- a/arch/arm/include/asm/hardware/gic.h
1727+++ b/arch/arm/include/asm/hardware/gic.h
1728@@ -34,9 +34,10 @@
1729
1730 #ifndef __ASSEMBLY__
1731 #include <linux/irqdomain.h>
1732+#include <linux/irq.h>
1733 struct device_node;
1734
1735-extern struct irq_chip gic_arch_extn;
1736+extern irq_chip_no_const gic_arch_extn;
1737
1738 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
1739 u32 offset, struct device_node *);
1740diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1741index 83eb2f7..ed77159 100644
1742--- a/arch/arm/include/asm/kmap_types.h
1743+++ b/arch/arm/include/asm/kmap_types.h
1744@@ -4,6 +4,6 @@
1745 /*
1746 * This is the "bare minimum". AIO seems to require this.
1747 */
1748-#define KM_TYPE_NR 16
1749+#define KM_TYPE_NR 17
1750
1751 #endif
1752diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1753index 9e614a1..3302cca 100644
1754--- a/arch/arm/include/asm/mach/dma.h
1755+++ b/arch/arm/include/asm/mach/dma.h
1756@@ -22,7 +22,7 @@ struct dma_ops {
1757 int (*residue)(unsigned int, dma_t *); /* optional */
1758 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1759 const char *type;
1760-};
1761+} __do_const;
1762
1763 struct dma_struct {
1764 void *addr; /* single DMA address */
1765diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1766index 2fe141f..192dc01 100644
1767--- a/arch/arm/include/asm/mach/map.h
1768+++ b/arch/arm/include/asm/mach/map.h
1769@@ -27,13 +27,16 @@ struct map_desc {
1770 #define MT_MINICLEAN 6
1771 #define MT_LOW_VECTORS 7
1772 #define MT_HIGH_VECTORS 8
1773-#define MT_MEMORY 9
1774+#define MT_MEMORY_RWX 9
1775 #define MT_ROM 10
1776-#define MT_MEMORY_NONCACHED 11
1777+#define MT_MEMORY_NONCACHED_RX 11
1778 #define MT_MEMORY_DTCM 12
1779 #define MT_MEMORY_ITCM 13
1780 #define MT_MEMORY_SO 14
1781 #define MT_MEMORY_DMA_READY 15
1782+#define MT_MEMORY_RW 16
1783+#define MT_MEMORY_RX 17
1784+#define MT_MEMORY_NONCACHED_RW 18
1785
1786 #ifdef CONFIG_MMU
1787 extern void iotable_init(struct map_desc *, int);
1788diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1789index 53426c6..c7baff3 100644
1790--- a/arch/arm/include/asm/outercache.h
1791+++ b/arch/arm/include/asm/outercache.h
1792@@ -35,7 +35,7 @@ struct outer_cache_fns {
1793 #endif
1794 void (*set_debug)(unsigned long);
1795 void (*resume)(void);
1796-};
1797+} __no_const;
1798
1799 #ifdef CONFIG_OUTER_CACHE
1800
1801diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1802index 812a494..71fc0b6 100644
1803--- a/arch/arm/include/asm/page.h
1804+++ b/arch/arm/include/asm/page.h
1805@@ -114,7 +114,7 @@ struct cpu_user_fns {
1806 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1807 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1808 unsigned long vaddr, struct vm_area_struct *vma);
1809-};
1810+} __no_const;
1811
1812 #ifdef MULTI_USER
1813 extern struct cpu_user_fns cpu_user;
1814diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1815index 943504f..c37a730 100644
1816--- a/arch/arm/include/asm/pgalloc.h
1817+++ b/arch/arm/include/asm/pgalloc.h
1818@@ -17,6 +17,7 @@
1819 #include <asm/processor.h>
1820 #include <asm/cacheflush.h>
1821 #include <asm/tlbflush.h>
1822+#include <asm/system_info.h>
1823
1824 #define check_pgt_cache() do { } while (0)
1825
1826@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1827 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1828 }
1829
1830+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1831+{
1832+ pud_populate(mm, pud, pmd);
1833+}
1834+
1835 #else /* !CONFIG_ARM_LPAE */
1836
1837 /*
1838@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1839 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1840 #define pmd_free(mm, pmd) do { } while (0)
1841 #define pud_populate(mm,pmd,pte) BUG()
1842+#define pud_populate_kernel(mm,pmd,pte) BUG()
1843
1844 #endif /* CONFIG_ARM_LPAE */
1845
1846@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1847 __free_page(pte);
1848 }
1849
1850+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1851+{
1852+#ifdef CONFIG_ARM_LPAE
1853+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1854+#else
1855+ if (addr & SECTION_SIZE)
1856+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1857+ else
1858+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1859+#endif
1860+ flush_pmd_entry(pmdp);
1861+}
1862+
1863 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1864 pmdval_t prot)
1865 {
1866@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1867 static inline void
1868 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1869 {
1870- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1871+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1872 }
1873 #define pmd_pgtable(pmd) pmd_page(pmd)
1874
1875diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1876index 5cfba15..f415e1a 100644
1877--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1878+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1879@@ -20,12 +20,15 @@
1880 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1881 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1882 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1883+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1884 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1885 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1886 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1887+
1888 /*
1889 * - section
1890 */
1891+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1892 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1893 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1894 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1895@@ -37,6 +40,7 @@
1896 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1897 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1898 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1899+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1900
1901 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1902 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1903@@ -66,6 +70,7 @@
1904 * - extended small page/tiny page
1905 */
1906 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1907+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1908 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1909 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1910 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1911diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1912index f97ee02..07f1be5 100644
1913--- a/arch/arm/include/asm/pgtable-2level.h
1914+++ b/arch/arm/include/asm/pgtable-2level.h
1915@@ -125,6 +125,7 @@
1916 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1917 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1918 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1919+#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
1920
1921 /*
1922 * These are the memory types, defined to be compatible with
1923diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1924index d795282..a43ea90 100644
1925--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1926+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1927@@ -32,15 +32,18 @@
1928 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
1929 #define PMD_BIT4 (_AT(pmdval_t, 0))
1930 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
1931+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 59) /* PXNTable */
1932
1933 /*
1934 * - section
1935 */
1936 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1937 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1938+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1939 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1940 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1941 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1942+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
1943 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
1944 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
1945 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
1946@@ -66,6 +69,7 @@
1947 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1948 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1949 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1950+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1951 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1952
1953 /*
1954diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1955index a3f3792..7b932a6 100644
1956--- a/arch/arm/include/asm/pgtable-3level.h
1957+++ b/arch/arm/include/asm/pgtable-3level.h
1958@@ -74,6 +74,7 @@
1959 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1960 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1961 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1962+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1963 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1964 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1965 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1966@@ -82,6 +83,7 @@
1967 /*
1968 * To be used in assembly code with the upper page attributes.
1969 */
1970+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1971 #define L_PTE_XN_HIGH (1 << (54 - 32))
1972 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1973
1974diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1975index 9c82f988..514705a 100644
1976--- a/arch/arm/include/asm/pgtable.h
1977+++ b/arch/arm/include/asm/pgtable.h
1978@@ -30,6 +30,9 @@
1979 #include <asm/pgtable-2level.h>
1980 #endif
1981
1982+#define ktla_ktva(addr) (addr)
1983+#define ktva_ktla(addr) (addr)
1984+
1985 /*
1986 * Just any arbitrary offset to the start of the vmalloc VM area: the
1987 * current 8MB value just means that there will be a 8MB "hole" after the
1988@@ -45,6 +48,9 @@
1989 #define LIBRARY_TEXT_START 0x0c000000
1990
1991 #ifndef __ASSEMBLY__
1992+extern pteval_t __supported_pte_mask;
1993+extern pmdval_t __supported_pmd_mask;
1994+
1995 extern void __pte_error(const char *file, int line, pte_t);
1996 extern void __pmd_error(const char *file, int line, pmd_t);
1997 extern void __pgd_error(const char *file, int line, pgd_t);
1998@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1999 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2000 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2001
2002+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2003+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2004+
2005+#ifdef CONFIG_PAX_KERNEXEC
2006+#include <asm/domain.h>
2007+#include <linux/thread_info.h>
2008+#include <linux/preempt.h>
2009+#endif
2010+
2011+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2012+static inline int test_domain(int domain, int domaintype)
2013+{
2014+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2015+}
2016+#endif
2017+
2018+#ifdef CONFIG_PAX_KERNEXEC
2019+static inline unsigned long pax_open_kernel(void) {
2020+#ifdef CONFIG_ARM_LPAE
2021+ /* TODO */
2022+#else
2023+ preempt_disable();
2024+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2025+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2026+#endif
2027+ return 0;
2028+}
2029+
2030+static inline unsigned long pax_close_kernel(void) {
2031+#ifdef CONFIG_ARM_LPAE
2032+ /* TODO */
2033+#else
2034+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2035+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2036+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2037+ preempt_enable_no_resched();
2038+#endif
2039+ return 0;
2040+}
2041+#else
2042+static inline unsigned long pax_open_kernel(void) { return 0; }
2043+static inline unsigned long pax_close_kernel(void) { return 0; }
2044+#endif
2045+
2046 /*
2047 * This is the lowest virtual address we can permit any user space
2048 * mapping to be mapped at. This is particularly important for
2049@@ -63,8 +113,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2050 /*
2051 * The pgprot_* and protection_map entries will be fixed up in runtime
2052 * to include the cachable and bufferable bits based on memory policy,
2053- * as well as any architecture dependent bits like global/ASID and SMP
2054- * shared mapping bits.
2055+ * as well as any architecture dependent bits like global/ASID, PXN,
2056+ * and SMP shared mapping bits.
2057 */
2058 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2059
2060@@ -240,7 +290,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2061
2062 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2063 {
2064- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
2065+ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE | __supported_pte_mask;
2066 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2067 return pte;
2068 }
2069diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2070index f3628fb..a0672dd 100644
2071--- a/arch/arm/include/asm/proc-fns.h
2072+++ b/arch/arm/include/asm/proc-fns.h
2073@@ -75,7 +75,7 @@ extern struct processor {
2074 unsigned int suspend_size;
2075 void (*do_suspend)(void *);
2076 void (*do_resume)(void *);
2077-} processor;
2078+} __do_const processor;
2079
2080 #ifndef MULTI_CPU
2081 extern void cpu_proc_init(void);
2082diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
2083index 06e7d50..8a8e251 100644
2084--- a/arch/arm/include/asm/processor.h
2085+++ b/arch/arm/include/asm/processor.h
2086@@ -65,9 +65,8 @@ struct thread_struct {
2087 regs->ARM_cpsr |= PSR_ENDSTATE; \
2088 regs->ARM_pc = pc & ~1; /* pc */ \
2089 regs->ARM_sp = sp; /* sp */ \
2090- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
2091- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
2092- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
2093+ /* r2 (envp), r1 (argv), r0 (argc) */ \
2094+ (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
2095 nommu_start_thread(regs); \
2096 })
2097
2098diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2099index d3a22be..3a69ad5 100644
2100--- a/arch/arm/include/asm/smp.h
2101+++ b/arch/arm/include/asm/smp.h
2102@@ -107,7 +107,7 @@ struct smp_operations {
2103 int (*cpu_disable)(unsigned int cpu);
2104 #endif
2105 #endif
2106-};
2107+} __no_const;
2108
2109 /*
2110 * set platform specific SMP operations
2111diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2112index cddda1f..ff357f7 100644
2113--- a/arch/arm/include/asm/thread_info.h
2114+++ b/arch/arm/include/asm/thread_info.h
2115@@ -77,9 +77,9 @@ struct thread_info {
2116 .flags = 0, \
2117 .preempt_count = INIT_PREEMPT_COUNT, \
2118 .addr_limit = KERNEL_DS, \
2119- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2120- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2121- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2122+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2123+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2124+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2125 .restart_block = { \
2126 .fn = do_no_restart_syscall, \
2127 }, \
2128@@ -152,6 +152,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2129 #define TIF_SYSCALL_AUDIT 9
2130 #define TIF_SYSCALL_TRACEPOINT 10
2131 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2132+
2133+/* within 8 bits of TIF_SYSCALL_TRACE
2134+ * to meet flexible second operand requirements
2135+ */
2136+#define TIF_GRSEC_SETXID 12
2137+
2138 #define TIF_USING_IWMMXT 17
2139 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2140 #define TIF_RESTORE_SIGMASK 20
2141@@ -165,10 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2142 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2143 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2144 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2145+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2146
2147 /* Checks for any syscall work in entry-common.S */
2148 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2149- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2150+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2151
2152 /*
2153 * Change these and you break ASM code in entry-common.S
2154diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2155index 7e1f760..752fcb7 100644
2156--- a/arch/arm/include/asm/uaccess.h
2157+++ b/arch/arm/include/asm/uaccess.h
2158@@ -18,6 +18,7 @@
2159 #include <asm/domain.h>
2160 #include <asm/unified.h>
2161 #include <asm/compiler.h>
2162+#include <asm/pgtable.h>
2163
2164 #define VERIFY_READ 0
2165 #define VERIFY_WRITE 1
2166@@ -60,10 +61,34 @@ extern int __put_user_bad(void);
2167 #define USER_DS TASK_SIZE
2168 #define get_fs() (current_thread_info()->addr_limit)
2169
2170+static inline void pax_open_userland(void)
2171+{
2172+
2173+#ifdef CONFIG_PAX_MEMORY_UDEREF
2174+ if (get_fs() == USER_DS) {
2175+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2176+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2177+ }
2178+#endif
2179+
2180+}
2181+
2182+static inline void pax_close_userland(void)
2183+{
2184+
2185+#ifdef CONFIG_PAX_MEMORY_UDEREF
2186+ if (get_fs() == USER_DS) {
2187+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2188+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2189+ }
2190+#endif
2191+
2192+}
2193+
2194 static inline void set_fs(mm_segment_t fs)
2195 {
2196 current_thread_info()->addr_limit = fs;
2197- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2198+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2199 }
2200
2201 #define segment_eq(a,b) ((a) == (b))
2202@@ -143,8 +168,12 @@ extern int __get_user_4(void *);
2203
2204 #define get_user(x,p) \
2205 ({ \
2206+ int __e; \
2207 might_fault(); \
2208- __get_user_check(x,p); \
2209+ pax_open_userland(); \
2210+ __e = __get_user_check(x,p); \
2211+ pax_close_userland(); \
2212+ __e; \
2213 })
2214
2215 extern int __put_user_1(void *, unsigned int);
2216@@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
2217
2218 #define put_user(x,p) \
2219 ({ \
2220+ int __e; \
2221 might_fault(); \
2222- __put_user_check(x,p); \
2223+ pax_open_userland(); \
2224+ __e = __put_user_check(x,p); \
2225+ pax_close_userland(); \
2226+ __e; \
2227 })
2228
2229 #else /* CONFIG_MMU */
2230@@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
2231 #define __get_user(x,ptr) \
2232 ({ \
2233 long __gu_err = 0; \
2234+ pax_open_userland(); \
2235 __get_user_err((x),(ptr),__gu_err); \
2236+ pax_close_userland(); \
2237 __gu_err; \
2238 })
2239
2240 #define __get_user_error(x,ptr,err) \
2241 ({ \
2242+ pax_open_userland(); \
2243 __get_user_err((x),(ptr),err); \
2244+ pax_close_userland(); \
2245 (void) 0; \
2246 })
2247
2248@@ -312,13 +349,17 @@ do { \
2249 #define __put_user(x,ptr) \
2250 ({ \
2251 long __pu_err = 0; \
2252+ pax_open_userland(); \
2253 __put_user_err((x),(ptr),__pu_err); \
2254+ pax_close_userland(); \
2255 __pu_err; \
2256 })
2257
2258 #define __put_user_error(x,ptr,err) \
2259 ({ \
2260+ pax_open_userland(); \
2261 __put_user_err((x),(ptr),err); \
2262+ pax_close_userland(); \
2263 (void) 0; \
2264 })
2265
2266@@ -418,11 +459,44 @@ do { \
2267
2268
2269 #ifdef CONFIG_MMU
2270-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2271-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2272+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2273+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2274+
2275+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2276+{
2277+ unsigned long ret;
2278+
2279+ check_object_size(to, n, false);
2280+ pax_open_userland();
2281+ ret = ___copy_from_user(to, from, n);
2282+ pax_close_userland();
2283+ return ret;
2284+}
2285+
2286+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2287+{
2288+ unsigned long ret;
2289+
2290+ check_object_size(from, n, true);
2291+ pax_open_userland();
2292+ ret = ___copy_to_user(to, from, n);
2293+ pax_close_userland();
2294+ return ret;
2295+}
2296+
2297 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2298-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2299+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2300 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2301+
2302+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2303+{
2304+ unsigned long ret;
2305+ pax_open_userland();
2306+ ret = ___clear_user(addr, n);
2307+ pax_close_userland();
2308+ return ret;
2309+}
2310+
2311 #else
2312 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2313 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2314@@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2315
2316 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2317 {
2318+ if ((long)n < 0)
2319+ return n;
2320+
2321 if (access_ok(VERIFY_READ, from, n))
2322 n = __copy_from_user(to, from, n);
2323 else /* security hole - plug it */
2324@@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2325
2326 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2327 {
2328+ if ((long)n < 0)
2329+ return n;
2330+
2331 if (access_ok(VERIFY_WRITE, to, n))
2332 n = __copy_to_user(to, from, n);
2333 return n;
2334diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2335index 96ee092..37f1844 100644
2336--- a/arch/arm/include/uapi/asm/ptrace.h
2337+++ b/arch/arm/include/uapi/asm/ptrace.h
2338@@ -73,7 +73,7 @@
2339 * ARMv7 groups of PSR bits
2340 */
2341 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2342-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2343+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2344 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2345 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2346
2347diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2348index 60d3b73..d27ee09 100644
2349--- a/arch/arm/kernel/armksyms.c
2350+++ b/arch/arm/kernel/armksyms.c
2351@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2352 #ifdef CONFIG_MMU
2353 EXPORT_SYMBOL(copy_page);
2354
2355-EXPORT_SYMBOL(__copy_from_user);
2356-EXPORT_SYMBOL(__copy_to_user);
2357-EXPORT_SYMBOL(__clear_user);
2358+EXPORT_SYMBOL(___copy_from_user);
2359+EXPORT_SYMBOL(___copy_to_user);
2360+EXPORT_SYMBOL(___clear_user);
2361
2362 EXPORT_SYMBOL(__get_user_1);
2363 EXPORT_SYMBOL(__get_user_2);
2364diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2365index 0f82098..3dbd3ee 100644
2366--- a/arch/arm/kernel/entry-armv.S
2367+++ b/arch/arm/kernel/entry-armv.S
2368@@ -47,6 +47,87 @@
2369 9997:
2370 .endm
2371
2372+ .macro pax_enter_kernel
2373+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2374+ @ make aligned space for saved DACR
2375+ sub sp, sp, #8
2376+ @ save regs
2377+ stmdb sp!, {r1, r2}
2378+ @ read DACR from cpu_domain into r1
2379+ mov r2, sp
2380+ @ assume 8K pages, since we have to split the immediate in two
2381+ bic r2, r2, #(0x1fc0)
2382+ bic r2, r2, #(0x3f)
2383+ ldr r1, [r2, #TI_CPU_DOMAIN]
2384+ @ store old DACR on stack
2385+ str r1, [sp, #8]
2386+#ifdef CONFIG_PAX_KERNEXEC
2387+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2388+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2389+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2390+#endif
2391+#ifdef CONFIG_PAX_MEMORY_UDEREF
2392+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2393+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2394+#endif
2395+ @ write r1 to current_thread_info()->cpu_domain
2396+ str r1, [r2, #TI_CPU_DOMAIN]
2397+ @ write r1 to DACR
2398+ mcr p15, 0, r1, c3, c0, 0
2399+ @ instruction sync
2400+ instr_sync
2401+ @ restore regs
2402+ ldmia sp!, {r1, r2}
2403+#endif
2404+ .endm
2405+
2406+ .macro pax_open_userland
2407+#ifdef CONFIG_PAX_MEMORY_UDEREF
2408+ @ save regs
2409+ stmdb sp!, {r0, r1}
2410+ @ read DACR from cpu_domain into r1
2411+ mov r0, sp
2412+ @ assume 8K pages, since we have to split the immediate in two
2413+ bic r0, r0, #(0x1fc0)
2414+ bic r0, r0, #(0x3f)
2415+ ldr r1, [r0, #TI_CPU_DOMAIN]
2416+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2417+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2418+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2419+ @ write r1 to current_thread_info()->cpu_domain
2420+ str r1, [r0, #TI_CPU_DOMAIN]
2421+ @ write r1 to DACR
2422+ mcr p15, 0, r1, c3, c0, 0
2423+ @ instruction sync
2424+ instr_sync
2425+ @ restore regs
2426+ ldmia sp!, {r0, r1}
2427+#endif
2428+ .endm
2429+
2430+ .macro pax_close_userland
2431+#ifdef CONFIG_PAX_MEMORY_UDEREF
2432+ @ save regs
2433+ stmdb sp!, {r0, r1}
2434+ @ read DACR from cpu_domain into r1
2435+ mov r0, sp
2436+ @ assume 8K pages, since we have to split the immediate in two
2437+ bic r0, r0, #(0x1fc0)
2438+ bic r0, r0, #(0x3f)
2439+ ldr r1, [r0, #TI_CPU_DOMAIN]
2440+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2441+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2442+ @ write r1 to current_thread_info()->cpu_domain
2443+ str r1, [r0, #TI_CPU_DOMAIN]
2444+ @ write r1 to DACR
2445+ mcr p15, 0, r1, c3, c0, 0
2446+ @ instruction sync
2447+ instr_sync
2448+ @ restore regs
2449+ ldmia sp!, {r0, r1}
2450+#endif
2451+ .endm
2452+
2453 .macro pabt_helper
2454 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2455 #ifdef MULTI_PABORT
2456@@ -89,11 +170,15 @@
2457 * Invalid mode handlers
2458 */
2459 .macro inv_entry, reason
2460+
2461+ pax_enter_kernel
2462+
2463 sub sp, sp, #S_FRAME_SIZE
2464 ARM( stmib sp, {r1 - lr} )
2465 THUMB( stmia sp, {r0 - r12} )
2466 THUMB( str sp, [sp, #S_SP] )
2467 THUMB( str lr, [sp, #S_LR] )
2468+
2469 mov r1, #\reason
2470 .endm
2471
2472@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2473 .macro svc_entry, stack_hole=0
2474 UNWIND(.fnstart )
2475 UNWIND(.save {r0 - pc} )
2476+
2477+ pax_enter_kernel
2478+
2479 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2480+
2481 #ifdef CONFIG_THUMB2_KERNEL
2482 SPFIX( str r0, [sp] ) @ temporarily saved
2483 SPFIX( mov r0, sp )
2484@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2485 ldmia r0, {r3 - r5}
2486 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2487 mov r6, #-1 @ "" "" "" ""
2488+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2489+ @ offset sp by 8 as done in pax_enter_kernel
2490+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2491+#else
2492 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2493+#endif
2494 SPFIX( addeq r2, r2, #4 )
2495 str r3, [sp, #-4]! @ save the "real" r0 copied
2496 @ from the exception stack
2497@@ -359,6 +453,9 @@ ENDPROC(__pabt_svc)
2498 .macro usr_entry
2499 UNWIND(.fnstart )
2500 UNWIND(.cantunwind ) @ don't unwind the user space
2501+
2502+ pax_enter_kernel_user
2503+
2504 sub sp, sp, #S_FRAME_SIZE
2505 ARM( stmib sp, {r1 - r12} )
2506 THUMB( stmia sp, {r0 - r12} )
2507@@ -456,7 +553,9 @@ __und_usr:
2508 tst r3, #PSR_T_BIT @ Thumb mode?
2509 bne __und_usr_thumb
2510 sub r4, r2, #4 @ ARM instr at LR - 4
2511+ pax_open_userland
2512 1: ldrt r0, [r4]
2513+ pax_close_userland
2514 #ifdef CONFIG_CPU_ENDIAN_BE8
2515 rev r0, r0 @ little endian instruction
2516 #endif
2517@@ -491,10 +590,14 @@ __und_usr_thumb:
2518 */
2519 .arch armv6t2
2520 #endif
2521+ pax_open_userland
2522 2: ldrht r5, [r4]
2523+ pax_close_userland
2524 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2525 blo __und_usr_fault_16 @ 16bit undefined instruction
2526+ pax_open_userland
2527 3: ldrht r0, [r2]
2528+ pax_close_userland
2529 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2530 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2531 orr r0, r0, r5, lsl #16
2532@@ -733,7 +836,7 @@ ENTRY(__switch_to)
2533 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2534 THUMB( str sp, [ip], #4 )
2535 THUMB( str lr, [ip], #4 )
2536-#ifdef CONFIG_CPU_USE_DOMAINS
2537+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2538 ldr r6, [r2, #TI_CPU_DOMAIN]
2539 #endif
2540 set_tls r3, r4, r5
2541@@ -742,7 +845,7 @@ ENTRY(__switch_to)
2542 ldr r8, =__stack_chk_guard
2543 ldr r7, [r7, #TSK_STACK_CANARY]
2544 #endif
2545-#ifdef CONFIG_CPU_USE_DOMAINS
2546+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2547 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2548 #endif
2549 mov r5, r0
2550diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2551index a6c301e..908821b 100644
2552--- a/arch/arm/kernel/entry-common.S
2553+++ b/arch/arm/kernel/entry-common.S
2554@@ -10,18 +10,46 @@
2555
2556 #include <asm/unistd.h>
2557 #include <asm/ftrace.h>
2558+#include <asm/domain.h>
2559 #include <asm/unwind.h>
2560
2561+#include "entry-header.S"
2562+
2563 #ifdef CONFIG_NEED_RET_TO_USER
2564 #include <mach/entry-macro.S>
2565 #else
2566 .macro arch_ret_to_user, tmp1, tmp2
2567+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2568+ @ save regs
2569+ stmdb sp!, {r1, r2}
2570+ @ read DACR from cpu_domain into r1
2571+ mov r2, sp
2572+ @ assume 8K pages, since we have to split the immediate in two
2573+ bic r2, r2, #(0x1fc0)
2574+ bic r2, r2, #(0x3f)
2575+ ldr r1, [r2, #TI_CPU_DOMAIN]
2576+#ifdef CONFIG_PAX_KERNEXEC
2577+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2578+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2579+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2580+#endif
2581+#ifdef CONFIG_PAX_MEMORY_UDEREF
2582+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2583+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2584+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2585+#endif
2586+ @ write r1 to current_thread_info()->cpu_domain
2587+ str r1, [r2, #TI_CPU_DOMAIN]
2588+ @ write r1 to DACR
2589+ mcr p15, 0, r1, c3, c0, 0
2590+ @ instruction sync
2591+ instr_sync
2592+ @ restore regs
2593+ ldmia sp!, {r1, r2}
2594+#endif
2595 .endm
2596 #endif
2597
2598-#include "entry-header.S"
2599-
2600-
2601 .align 5
2602 /*
2603 * This is the fast syscall return path. We do as little as
2604@@ -339,6 +367,7 @@ ENDPROC(ftrace_stub)
2605
2606 .align 5
2607 ENTRY(vector_swi)
2608+
2609 sub sp, sp, #S_FRAME_SIZE
2610 stmia sp, {r0 - r12} @ Calling r0 - r12
2611 ARM( add r8, sp, #S_PC )
2612@@ -388,6 +417,12 @@ ENTRY(vector_swi)
2613 ldr scno, [lr, #-4] @ get SWI instruction
2614 #endif
2615
2616+ /*
2617+ * do this here to avoid a performance hit of wrapping the code above
2618+ * that directly dereferences userland to parse the SWI instruction
2619+ */
2620+ pax_enter_kernel_user
2621+
2622 #ifdef CONFIG_ALIGNMENT_TRAP
2623 ldr ip, __cr_alignment
2624 ldr ip, [ip]
2625diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2626index 9a8531e..812e287 100644
2627--- a/arch/arm/kernel/entry-header.S
2628+++ b/arch/arm/kernel/entry-header.S
2629@@ -73,9 +73,66 @@
2630 msr cpsr_c, \rtemp @ switch back to the SVC mode
2631 .endm
2632
2633+ .macro pax_enter_kernel_user
2634+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2635+ @ save regs
2636+ stmdb sp!, {r0, r1}
2637+ @ read DACR from cpu_domain into r1
2638+ mov r0, sp
2639+ @ assume 8K pages, since we have to split the immediate in two
2640+ bic r0, r0, #(0x1fc0)
2641+ bic r0, r0, #(0x3f)
2642+ ldr r1, [r0, #TI_CPU_DOMAIN]
2643+#ifdef CONFIG_PAX_MEMORY_UDEREF
2644+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2645+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2646+#endif
2647+#ifdef CONFIG_PAX_KERNEXEC
2648+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2649+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2650+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2651+#endif
2652+ @ write r1 to current_thread_info()->cpu_domain
2653+ str r1, [r0, #TI_CPU_DOMAIN]
2654+ @ write r1 to DACR
2655+ mcr p15, 0, r1, c3, c0, 0
2656+ @ instruction sync
2657+ instr_sync
2658+ @ restore regs
2659+ ldmia sp!, {r0, r1}
2660+#endif
2661+ .endm
2662+
2663+ .macro pax_exit_kernel
2664+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2665+ @ save regs
2666+ stmdb sp!, {r0, r1}
2667+ @ read old DACR from stack into r1
2668+ ldr r1, [sp, #(8 + S_SP)]
2669+ sub r1, r1, #8
2670+ ldr r1, [r1]
2671+
2672+ @ write r1 to current_thread_info()->cpu_domain
2673+ mov r0, sp
2674+ @ assume 8K pages, since we have to split the immediate in two
2675+ bic r0, r0, #(0x1fc0)
2676+ bic r0, r0, #(0x3f)
2677+ str r1, [r0, #TI_CPU_DOMAIN]
2678+ @ write r1 to DACR
2679+ mcr p15, 0, r1, c3, c0, 0
2680+ @ instruction sync
2681+ instr_sync
2682+ @ restore regs
2683+ ldmia sp!, {r0, r1}
2684+#endif
2685+ .endm
2686+
2687 #ifndef CONFIG_THUMB2_KERNEL
2688 .macro svc_exit, rpsr
2689 msr spsr_cxsf, \rpsr
2690+
2691+ pax_exit_kernel
2692+
2693 #if defined(CONFIG_CPU_V6)
2694 ldr r0, [sp]
2695 strex r1, r2, [sp] @ clear the exclusive monitor
2696@@ -121,6 +178,9 @@
2697 .endm
2698 #else /* CONFIG_THUMB2_KERNEL */
2699 .macro svc_exit, rpsr
2700+
2701+ pax_exit_kernel
2702+
2703 ldr lr, [sp, #S_SP] @ top of the stack
2704 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2705 clrex @ clear the exclusive monitor
2706diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2707index 2adda11..7fbe958 100644
2708--- a/arch/arm/kernel/fiq.c
2709+++ b/arch/arm/kernel/fiq.c
2710@@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
2711 #if defined(CONFIG_CPU_USE_DOMAINS)
2712 memcpy((void *)0xffff001c, start, length);
2713 #else
2714+ pax_open_kernel();
2715 memcpy(vectors_page + 0x1c, start, length);
2716+ pax_close_kernel();
2717 #endif
2718 flush_icache_range(0xffff001c, 0xffff001c + length);
2719 if (!vectors_high())
2720diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2721index 486a15a..2d6880e 100644
2722--- a/arch/arm/kernel/head.S
2723+++ b/arch/arm/kernel/head.S
2724@@ -52,7 +52,9 @@
2725 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2726
2727 .macro pgtbl, rd, phys
2728- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2729+ mov \rd, #TEXT_OFFSET
2730+ sub \rd, #PG_DIR_SIZE
2731+ add \rd, \rd, \phys
2732 .endm
2733
2734 /*
2735@@ -416,7 +418,7 @@ __enable_mmu:
2736 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2737 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2738 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2739- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2740+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2741 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2742 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2743 #endif
2744diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2745index 5ff2e77..556d030 100644
2746--- a/arch/arm/kernel/hw_breakpoint.c
2747+++ b/arch/arm/kernel/hw_breakpoint.c
2748@@ -1011,7 +1011,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2749 return NOTIFY_OK;
2750 }
2751
2752-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2753+static struct notifier_block dbg_reset_nb = {
2754 .notifier_call = dbg_reset_notify,
2755 };
2756
2757diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2758index 1e9be5d..03edbc2 100644
2759--- a/arch/arm/kernel/module.c
2760+++ b/arch/arm/kernel/module.c
2761@@ -37,12 +37,37 @@
2762 #endif
2763
2764 #ifdef CONFIG_MMU
2765-void *module_alloc(unsigned long size)
2766+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2767 {
2768+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2769+ return NULL;
2770 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2771- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2772+ GFP_KERNEL, prot, -1,
2773 __builtin_return_address(0));
2774 }
2775+
2776+void *module_alloc(unsigned long size)
2777+{
2778+
2779+#ifdef CONFIG_PAX_KERNEXEC
2780+ return __module_alloc(size, PAGE_KERNEL);
2781+#else
2782+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2783+#endif
2784+
2785+}
2786+
2787+#ifdef CONFIG_PAX_KERNEXEC
2788+void module_free_exec(struct module *mod, void *module_region)
2789+{
2790+ module_free(mod, module_region);
2791+}
2792+
2793+void *module_alloc_exec(unsigned long size)
2794+{
2795+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2796+}
2797+#endif
2798 #endif
2799
2800 int
2801diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2802index 5f66206..dce492f 100644
2803--- a/arch/arm/kernel/perf_event_cpu.c
2804+++ b/arch/arm/kernel/perf_event_cpu.c
2805@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2806 return NOTIFY_OK;
2807 }
2808
2809-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2810+static struct notifier_block cpu_pmu_hotplug_notifier = {
2811 .notifier_call = cpu_pmu_notify,
2812 };
2813
2814diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2815index c6dec5f..f853532 100644
2816--- a/arch/arm/kernel/process.c
2817+++ b/arch/arm/kernel/process.c
2818@@ -28,7 +28,6 @@
2819 #include <linux/tick.h>
2820 #include <linux/utsname.h>
2821 #include <linux/uaccess.h>
2822-#include <linux/random.h>
2823 #include <linux/hw_breakpoint.h>
2824 #include <linux/cpuidle.h>
2825 #include <linux/leds.h>
2826@@ -256,9 +255,10 @@ void machine_power_off(void)
2827 machine_shutdown();
2828 if (pm_power_off)
2829 pm_power_off();
2830+ BUG();
2831 }
2832
2833-void machine_restart(char *cmd)
2834+__noreturn void machine_restart(char *cmd)
2835 {
2836 machine_shutdown();
2837
2838@@ -283,8 +283,8 @@ void __show_regs(struct pt_regs *regs)
2839 init_utsname()->release,
2840 (int)strcspn(init_utsname()->version, " "),
2841 init_utsname()->version);
2842- print_symbol("PC is at %s\n", instruction_pointer(regs));
2843- print_symbol("LR is at %s\n", regs->ARM_lr);
2844+ printk("PC is at %pA\n", instruction_pointer(regs));
2845+ printk("LR is at %pA\n", regs->ARM_lr);
2846 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2847 "sp : %08lx ip : %08lx fp : %08lx\n",
2848 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2849@@ -452,12 +452,6 @@ unsigned long get_wchan(struct task_struct *p)
2850 return 0;
2851 }
2852
2853-unsigned long arch_randomize_brk(struct mm_struct *mm)
2854-{
2855- unsigned long range_end = mm->brk + 0x02000000;
2856- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2857-}
2858-
2859 #ifdef CONFIG_MMU
2860 /*
2861 * The vectors page is always readable from user space for the
2862diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2863index 03deeff..741ce88 100644
2864--- a/arch/arm/kernel/ptrace.c
2865+++ b/arch/arm/kernel/ptrace.c
2866@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2867 return current_thread_info()->syscall;
2868 }
2869
2870+#ifdef CONFIG_GRKERNSEC_SETXID
2871+extern void gr_delayed_cred_worker(void);
2872+#endif
2873+
2874 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2875 {
2876 current_thread_info()->syscall = scno;
2877
2878+#ifdef CONFIG_GRKERNSEC_SETXID
2879+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2880+ gr_delayed_cred_worker();
2881+#endif
2882+
2883 /* Do the secure computing check first; failures should be fast. */
2884 if (secure_computing(scno) == -1)
2885 return -1;
2886diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2887index 3f6cbb2..6d856f5 100644
2888--- a/arch/arm/kernel/setup.c
2889+++ b/arch/arm/kernel/setup.c
2890@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
2891 unsigned int elf_hwcap __read_mostly;
2892 EXPORT_SYMBOL(elf_hwcap);
2893
2894+pteval_t __supported_pte_mask __read_only;
2895+pmdval_t __supported_pmd_mask __read_only;
2896
2897 #ifdef MULTI_CPU
2898-struct processor processor __read_mostly;
2899+struct processor processor;
2900 #endif
2901 #ifdef MULTI_TLB
2902-struct cpu_tlb_fns cpu_tlb __read_mostly;
2903+struct cpu_tlb_fns cpu_tlb __read_only;
2904 #endif
2905 #ifdef MULTI_USER
2906-struct cpu_user_fns cpu_user __read_mostly;
2907+struct cpu_user_fns cpu_user __read_only;
2908 #endif
2909 #ifdef MULTI_CACHE
2910-struct cpu_cache_fns cpu_cache __read_mostly;
2911+struct cpu_cache_fns cpu_cache __read_only;
2912 #endif
2913 #ifdef CONFIG_OUTER_CACHE
2914-struct outer_cache_fns outer_cache __read_mostly;
2915+struct outer_cache_fns outer_cache __read_only;
2916 EXPORT_SYMBOL(outer_cache);
2917 #endif
2918
2919@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
2920 asm("mrc p15, 0, %0, c0, c1, 4"
2921 : "=r" (mmfr0));
2922 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
2923- (mmfr0 & 0x000000f0) >= 0x00000030)
2924+ (mmfr0 & 0x000000f0) >= 0x00000030) {
2925 cpu_arch = CPU_ARCH_ARMv7;
2926- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2927+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
2928+ __supported_pte_mask |= L_PTE_PXN;
2929+ __supported_pmd_mask |= PMD_PXNTABLE;
2930+ }
2931+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2932 (mmfr0 & 0x000000f0) == 0x00000020)
2933 cpu_arch = CPU_ARCH_ARMv6;
2934 else
2935@@ -462,7 +468,7 @@ static void __init setup_processor(void)
2936 __cpu_architecture = __get_cpu_architecture();
2937
2938 #ifdef MULTI_CPU
2939- processor = *list->proc;
2940+ memcpy((void *)&processor, list->proc, sizeof processor);
2941 #endif
2942 #ifdef MULTI_TLB
2943 cpu_tlb = *list->tlb;
2944diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
2945index 84f4cbf..672f5b8 100644
2946--- a/arch/arm/kernel/smp.c
2947+++ b/arch/arm/kernel/smp.c
2948@@ -70,7 +70,7 @@ enum ipi_msg_type {
2949
2950 static DECLARE_COMPLETION(cpu_running);
2951
2952-static struct smp_operations smp_ops;
2953+static struct smp_operations smp_ops __read_only;
2954
2955 void __init smp_set_ops(struct smp_operations *ops)
2956 {
2957diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
2958index b0179b8..b7b16c7 100644
2959--- a/arch/arm/kernel/traps.c
2960+++ b/arch/arm/kernel/traps.c
2961@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
2962 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
2963 {
2964 #ifdef CONFIG_KALLSYMS
2965- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
2966+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
2967 #else
2968 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
2969 #endif
2970@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2971 static int die_owner = -1;
2972 static unsigned int die_nest_count;
2973
2974+extern void gr_handle_kernel_exploit(void);
2975+
2976 static unsigned long oops_begin(void)
2977 {
2978 int cpu;
2979@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
2980 panic("Fatal exception in interrupt");
2981 if (panic_on_oops)
2982 panic("Fatal exception");
2983+
2984+ gr_handle_kernel_exploit();
2985+
2986 if (signr)
2987 do_exit(signr);
2988 }
2989@@ -601,7 +606,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
2990 * The user helper at 0xffff0fe0 must be used instead.
2991 * (see entry-armv.S for details)
2992 */
2993+ pax_open_kernel();
2994 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
2995+ pax_close_kernel();
2996 }
2997 return 0;
2998
2999@@ -849,5 +856,9 @@ void __init early_trap_init(void *vectors_base)
3000 sigreturn_codes, sizeof(sigreturn_codes));
3001
3002 flush_icache_range(vectors, vectors + PAGE_SIZE);
3003- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3004+
3005+#ifndef CONFIG_PAX_MEMORY_UDEREF
3006+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3007+#endif
3008+
3009 }
3010diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3011index 11c1785..c67d54c 100644
3012--- a/arch/arm/kernel/vmlinux.lds.S
3013+++ b/arch/arm/kernel/vmlinux.lds.S
3014@@ -8,7 +8,11 @@
3015 #include <asm/thread_info.h>
3016 #include <asm/memory.h>
3017 #include <asm/page.h>
3018-
3019+
3020+#ifdef CONFIG_PAX_KERNEXEC
3021+#include <asm/pgtable.h>
3022+#endif
3023+
3024 #define PROC_INFO \
3025 . = ALIGN(4); \
3026 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3027@@ -90,6 +94,11 @@ SECTIONS
3028 _text = .;
3029 HEAD_TEXT
3030 }
3031+
3032+#ifdef CONFIG_PAX_KERNEXEC
3033+ . = ALIGN(1<<SECTION_SHIFT);
3034+#endif
3035+
3036 .text : { /* Real text segment */
3037 _stext = .; /* Text and read-only data */
3038 __exception_text_start = .;
3039@@ -144,6 +153,10 @@ SECTIONS
3040
3041 _etext = .; /* End of text and rodata section */
3042
3043+#ifdef CONFIG_PAX_KERNEXEC
3044+ . = ALIGN(1<<SECTION_SHIFT);
3045+#endif
3046+
3047 #ifndef CONFIG_XIP_KERNEL
3048 . = ALIGN(PAGE_SIZE);
3049 __init_begin = .;
3050@@ -203,6 +216,11 @@ SECTIONS
3051 . = PAGE_OFFSET + TEXT_OFFSET;
3052 #else
3053 __init_end = .;
3054+
3055+#ifdef CONFIG_PAX_KERNEXEC
3056+ . = ALIGN(1<<SECTION_SHIFT);
3057+#endif
3058+
3059 . = ALIGN(THREAD_SIZE);
3060 __data_loc = .;
3061 #endif
3062diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3063index 14a0d98..7771a7d 100644
3064--- a/arch/arm/lib/clear_user.S
3065+++ b/arch/arm/lib/clear_user.S
3066@@ -12,14 +12,14 @@
3067
3068 .text
3069
3070-/* Prototype: int __clear_user(void *addr, size_t sz)
3071+/* Prototype: int ___clear_user(void *addr, size_t sz)
3072 * Purpose : clear some user memory
3073 * Params : addr - user memory address to clear
3074 * : sz - number of bytes to clear
3075 * Returns : number of bytes NOT cleared
3076 */
3077 ENTRY(__clear_user_std)
3078-WEAK(__clear_user)
3079+WEAK(___clear_user)
3080 stmfd sp!, {r1, lr}
3081 mov r2, #0
3082 cmp r1, #4
3083@@ -44,7 +44,7 @@ WEAK(__clear_user)
3084 USER( strnebt r2, [r0])
3085 mov r0, #0
3086 ldmfd sp!, {r1, pc}
3087-ENDPROC(__clear_user)
3088+ENDPROC(___clear_user)
3089 ENDPROC(__clear_user_std)
3090
3091 .pushsection .fixup,"ax"
3092diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3093index 66a477a..bee61d3 100644
3094--- a/arch/arm/lib/copy_from_user.S
3095+++ b/arch/arm/lib/copy_from_user.S
3096@@ -16,7 +16,7 @@
3097 /*
3098 * Prototype:
3099 *
3100- * size_t __copy_from_user(void *to, const void *from, size_t n)
3101+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3102 *
3103 * Purpose:
3104 *
3105@@ -84,11 +84,11 @@
3106
3107 .text
3108
3109-ENTRY(__copy_from_user)
3110+ENTRY(___copy_from_user)
3111
3112 #include "copy_template.S"
3113
3114-ENDPROC(__copy_from_user)
3115+ENDPROC(___copy_from_user)
3116
3117 .pushsection .fixup,"ax"
3118 .align 0
3119diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3120index 6ee2f67..d1cce76 100644
3121--- a/arch/arm/lib/copy_page.S
3122+++ b/arch/arm/lib/copy_page.S
3123@@ -10,6 +10,7 @@
3124 * ASM optimised string functions
3125 */
3126 #include <linux/linkage.h>
3127+#include <linux/const.h>
3128 #include <asm/assembler.h>
3129 #include <asm/asm-offsets.h>
3130 #include <asm/cache.h>
3131diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3132index d066df6..df28194 100644
3133--- a/arch/arm/lib/copy_to_user.S
3134+++ b/arch/arm/lib/copy_to_user.S
3135@@ -16,7 +16,7 @@
3136 /*
3137 * Prototype:
3138 *
3139- * size_t __copy_to_user(void *to, const void *from, size_t n)
3140+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3141 *
3142 * Purpose:
3143 *
3144@@ -88,11 +88,11 @@
3145 .text
3146
3147 ENTRY(__copy_to_user_std)
3148-WEAK(__copy_to_user)
3149+WEAK(___copy_to_user)
3150
3151 #include "copy_template.S"
3152
3153-ENDPROC(__copy_to_user)
3154+ENDPROC(___copy_to_user)
3155 ENDPROC(__copy_to_user_std)
3156
3157 .pushsection .fixup,"ax"
3158diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3159index 7d08b43..f7ca7ea 100644
3160--- a/arch/arm/lib/csumpartialcopyuser.S
3161+++ b/arch/arm/lib/csumpartialcopyuser.S
3162@@ -57,8 +57,8 @@
3163 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3164 */
3165
3166-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3167-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3168+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3169+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3170
3171 #include "csumpartialcopygeneric.S"
3172
3173diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3174index 0dc5385..45833ef 100644
3175--- a/arch/arm/lib/delay.c
3176+++ b/arch/arm/lib/delay.c
3177@@ -28,12 +28,14 @@
3178 /*
3179 * Default to the loop-based delay implementation.
3180 */
3181-struct arm_delay_ops arm_delay_ops = {
3182+static struct arm_delay_ops arm_loop_delay_ops = {
3183 .delay = __loop_delay,
3184 .const_udelay = __loop_const_udelay,
3185 .udelay = __loop_udelay,
3186 };
3187
3188+struct arm_delay_ops *arm_delay_ops __read_only = &arm_loop_delay_ops;
3189+
3190 static const struct delay_timer *delay_timer;
3191 static bool delay_calibrated;
3192
3193@@ -67,6 +69,12 @@ static void __timer_udelay(unsigned long usecs)
3194 __timer_const_udelay(usecs * UDELAY_MULT);
3195 }
3196
3197+static struct arm_delay_ops arm_timer_delay_ops = {
3198+ .delay = __timer_delay,
3199+ .const_udelay = __timer_const_udelay,
3200+ .udelay = __timer_udelay,
3201+};
3202+
3203 void __init register_current_timer_delay(const struct delay_timer *timer)
3204 {
3205 if (!delay_calibrated) {
3206@@ -74,9 +82,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
3207 delay_timer = timer;
3208 lpj_fine = timer->freq / HZ;
3209 loops_per_jiffy = lpj_fine;
3210- arm_delay_ops.delay = __timer_delay;
3211- arm_delay_ops.const_udelay = __timer_const_udelay;
3212- arm_delay_ops.udelay = __timer_udelay;
3213+ arm_delay_ops = &arm_timer_delay_ops;
3214 delay_calibrated = true;
3215 } else {
3216 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
3217diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3218index 025f742..8432b08 100644
3219--- a/arch/arm/lib/uaccess_with_memcpy.c
3220+++ b/arch/arm/lib/uaccess_with_memcpy.c
3221@@ -104,7 +104,7 @@ out:
3222 }
3223
3224 unsigned long
3225-__copy_to_user(void __user *to, const void *from, unsigned long n)
3226+___copy_to_user(void __user *to, const void *from, unsigned long n)
3227 {
3228 /*
3229 * This test is stubbed out of the main function above to keep
3230diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3231index bac21a5..b67ef8e 100644
3232--- a/arch/arm/mach-kirkwood/common.c
3233+++ b/arch/arm/mach-kirkwood/common.c
3234@@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3235 clk_gate_ops.disable(hw);
3236 }
3237
3238-static struct clk_ops clk_gate_fn_ops;
3239+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3240+{
3241+ return clk_gate_ops.is_enabled(hw);
3242+}
3243+
3244+static struct clk_ops clk_gate_fn_ops = {
3245+ .enable = clk_gate_fn_enable,
3246+ .disable = clk_gate_fn_disable,
3247+ .is_enabled = clk_gate_fn_is_enabled,
3248+};
3249
3250 static struct clk __init *clk_register_gate_fn(struct device *dev,
3251 const char *name,
3252@@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3253 gate_fn->fn_en = fn_en;
3254 gate_fn->fn_dis = fn_dis;
3255
3256- /* ops is the gate ops, but with our enable/disable functions */
3257- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3258- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3259- clk_gate_fn_ops = clk_gate_ops;
3260- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3261- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3262- }
3263-
3264 clk = clk_register(dev, &gate_fn->gate.hw);
3265
3266 if (IS_ERR(clk))
3267diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3268index 0abb30f..54064da 100644
3269--- a/arch/arm/mach-omap2/board-n8x0.c
3270+++ b/arch/arm/mach-omap2/board-n8x0.c
3271@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3272 }
3273 #endif
3274
3275-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3276+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3277 .late_init = n8x0_menelaus_late_init,
3278 };
3279
3280diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3281index 5d3b4f4..ddba3c0 100644
3282--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3283+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3284@@ -340,7 +340,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3285 return NOTIFY_OK;
3286 }
3287
3288-static struct notifier_block __refdata irq_hotplug_notifier = {
3289+static struct notifier_block irq_hotplug_notifier = {
3290 .notifier_call = irq_cpu_hotplug_notify,
3291 };
3292
3293diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3294index 4653efb..8c60bf7 100644
3295--- a/arch/arm/mach-omap2/omap_hwmod.c
3296+++ b/arch/arm/mach-omap2/omap_hwmod.c
3297@@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
3298 int (*init_clkdm)(struct omap_hwmod *oh);
3299 void (*update_context_lost)(struct omap_hwmod *oh);
3300 int (*get_context_lost)(struct omap_hwmod *oh);
3301-};
3302+} __no_const;
3303
3304 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3305-static struct omap_hwmod_soc_ops soc_ops;
3306+static struct omap_hwmod_soc_ops soc_ops __read_only;
3307
3308 /* omap_hwmod_list contains all registered struct omap_hwmods */
3309 static LIST_HEAD(omap_hwmod_list);
3310diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3311index 7c2b4ed..b2ea51f 100644
3312--- a/arch/arm/mach-omap2/wd_timer.c
3313+++ b/arch/arm/mach-omap2/wd_timer.c
3314@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3315 struct omap_hwmod *oh;
3316 char *oh_name = "wd_timer2";
3317 char *dev_name = "omap_wdt";
3318- struct omap_wd_timer_platform_data pdata;
3319+ static struct omap_wd_timer_platform_data pdata = {
3320+ .read_reset_sources = prm_read_reset_sources
3321+ };
3322
3323 if (!cpu_class_is_omap2() || of_have_populated_dt())
3324 return 0;
3325@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3326 return -EINVAL;
3327 }
3328
3329- pdata.read_reset_sources = prm_read_reset_sources;
3330-
3331 pdev = omap_device_build(dev_name, id, oh, &pdata,
3332 sizeof(struct omap_wd_timer_platform_data),
3333 NULL, 0, 0);
3334diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
3335index 6be4c4d..32ac32a 100644
3336--- a/arch/arm/mach-ux500/include/mach/setup.h
3337+++ b/arch/arm/mach-ux500/include/mach/setup.h
3338@@ -38,13 +38,6 @@ extern struct sys_timer ux500_timer;
3339 .type = MT_DEVICE, \
3340 }
3341
3342-#define __MEM_DEV_DESC(x, sz) { \
3343- .virtual = IO_ADDRESS(x), \
3344- .pfn = __phys_to_pfn(x), \
3345- .length = sz, \
3346- .type = MT_MEMORY, \
3347-}
3348-
3349 extern struct smp_operations ux500_smp_ops;
3350 extern void ux500_cpu_die(unsigned int cpu);
3351
3352diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3353index 3fd629d..8b1aca9 100644
3354--- a/arch/arm/mm/Kconfig
3355+++ b/arch/arm/mm/Kconfig
3356@@ -425,7 +425,7 @@ config CPU_32v5
3357
3358 config CPU_32v6
3359 bool
3360- select CPU_USE_DOMAINS if CPU_V6 && MMU
3361+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC
3362 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3363
3364 config CPU_32v6K
3365@@ -577,6 +577,7 @@ config CPU_CP15_MPU
3366
3367 config CPU_USE_DOMAINS
3368 bool
3369+ depends on !ARM_LPAE && !PAX_KERNEXEC
3370 help
3371 This option enables or disables the use of domain switching
3372 via the set_fs() function.
3373diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3374index 5dbf13f..6393f55 100644
3375--- a/arch/arm/mm/fault.c
3376+++ b/arch/arm/mm/fault.c
3377@@ -25,6 +25,7 @@
3378 #include <asm/system_misc.h>
3379 #include <asm/system_info.h>
3380 #include <asm/tlbflush.h>
3381+#include <asm/sections.h>
3382
3383 #include "fault.h"
3384
3385@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3386 if (fixup_exception(regs))
3387 return;
3388
3389+#ifdef CONFIG_PAX_KERNEXEC
3390+ if ((fsr & FSR_WRITE) &&
3391+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3392+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3393+ {
3394+ if (current->signal->curr_ip)
3395+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3396+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
3397+ else
3398+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3399+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
3400+ }
3401+#endif
3402+
3403 /*
3404 * No handler, we'll have to terminate things with extreme prejudice.
3405 */
3406@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3407 }
3408 #endif
3409
3410+#ifdef CONFIG_PAX_PAGEEXEC
3411+ if (fsr & FSR_LNX_PF) {
3412+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3413+ do_group_exit(SIGKILL);
3414+ }
3415+#endif
3416+
3417 tsk->thread.address = addr;
3418 tsk->thread.error_code = fsr;
3419 tsk->thread.trap_no = 14;
3420@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3421 }
3422 #endif /* CONFIG_MMU */
3423
3424+#ifdef CONFIG_PAX_PAGEEXEC
3425+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3426+{
3427+ long i;
3428+
3429+ printk(KERN_ERR "PAX: bytes at PC: ");
3430+ for (i = 0; i < 20; i++) {
3431+ unsigned char c;
3432+ if (get_user(c, (__force unsigned char __user *)pc+i))
3433+ printk(KERN_CONT "?? ");
3434+ else
3435+ printk(KERN_CONT "%02x ", c);
3436+ }
3437+ printk("\n");
3438+
3439+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3440+ for (i = -1; i < 20; i++) {
3441+ unsigned long c;
3442+ if (get_user(c, (__force unsigned long __user *)sp+i))
3443+ printk(KERN_CONT "???????? ");
3444+ else
3445+ printk(KERN_CONT "%08lx ", c);
3446+ }
3447+ printk("\n");
3448+}
3449+#endif
3450+
3451 /*
3452 * First Level Translation Fault Handler
3453 *
3454@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3455 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3456 struct siginfo info;
3457
3458+#ifdef CONFIG_PAX_MEMORY_UDEREF
3459+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3460+ if (current->signal->curr_ip)
3461+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3462+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()), addr);
3463+ else
3464+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3465+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()), addr);
3466+ goto die;
3467+ }
3468+#endif
3469+
3470 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3471 return;
3472
3473+die:
3474 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3475 inf->name, fsr, addr);
3476
3477@@ -575,9 +637,38 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3478 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3479 struct siginfo info;
3480
3481+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3482+ if (!user_mode(regs) && (is_domain_fault(ifsr) || is_xn_fault(ifsr))) {
3483+ if (current->signal->curr_ip)
3484+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3485+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()),
3486+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3487+ else
3488+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3489+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()),
3490+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3491+ goto die;
3492+ }
3493+#endif
3494+
3495+#ifdef CONFIG_PAX_REFCOUNT
3496+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3497+ unsigned int bkpt;
3498+
3499+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3500+ current->thread.error_code = ifsr;
3501+ current->thread.trap_no = 0;
3502+ pax_report_refcount_overflow(regs);
3503+ fixup_exception(regs);
3504+ return;
3505+ }
3506+ }
3507+#endif
3508+
3509 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3510 return;
3511
3512+die:
3513 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3514 inf->name, ifsr, addr);
3515
3516diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3517index cf08bdf..772656c 100644
3518--- a/arch/arm/mm/fault.h
3519+++ b/arch/arm/mm/fault.h
3520@@ -3,6 +3,7 @@
3521
3522 /*
3523 * Fault status register encodings. We steal bit 31 for our own purposes.
3524+ * Set when the FSR value is from an instruction fault.
3525 */
3526 #define FSR_LNX_PF (1 << 31)
3527 #define FSR_WRITE (1 << 11)
3528@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3529 }
3530 #endif
3531
3532+/* valid for LPAE and !LPAE */
3533+static inline int is_xn_fault(unsigned int fsr)
3534+{
3535+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
3536+}
3537+
3538+static inline int is_domain_fault(unsigned int fsr)
3539+{
3540+ return ((fsr_fs(fsr) & 0xD) == 0x9);
3541+}
3542+
3543 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3544 unsigned long search_exception_table(unsigned long addr);
3545
3546diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3547index ad722f1..763fdd3 100644
3548--- a/arch/arm/mm/init.c
3549+++ b/arch/arm/mm/init.c
3550@@ -30,6 +30,8 @@
3551 #include <asm/setup.h>
3552 #include <asm/tlb.h>
3553 #include <asm/fixmap.h>
3554+#include <asm/system_info.h>
3555+#include <asm/cp15.h>
3556
3557 #include <asm/mach/arch.h>
3558 #include <asm/mach/map.h>
3559@@ -736,7 +738,46 @@ void free_initmem(void)
3560 {
3561 #ifdef CONFIG_HAVE_TCM
3562 extern char __tcm_start, __tcm_end;
3563+#endif
3564
3565+#ifdef CONFIG_PAX_KERNEXEC
3566+ unsigned long addr;
3567+ pgd_t *pgd;
3568+ pud_t *pud;
3569+ pmd_t *pmd;
3570+ int cpu_arch = cpu_architecture();
3571+ unsigned int cr = get_cr();
3572+
3573+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
3574+ /* make pages tables, etc before .text NX */
3575+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
3576+ pgd = pgd_offset_k(addr);
3577+ pud = pud_offset(pgd, addr);
3578+ pmd = pmd_offset(pud, addr);
3579+ __section_update(pmd, addr, PMD_SECT_XN);
3580+ }
3581+ /* make init NX */
3582+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
3583+ pgd = pgd_offset_k(addr);
3584+ pud = pud_offset(pgd, addr);
3585+ pmd = pmd_offset(pud, addr);
3586+ __section_update(pmd, addr, PMD_SECT_XN);
3587+ }
3588+ /* make kernel code/rodata RX */
3589+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
3590+ pgd = pgd_offset_k(addr);
3591+ pud = pud_offset(pgd, addr);
3592+ pmd = pmd_offset(pud, addr);
3593+#ifdef CONFIG_ARM_LPAE
3594+ __section_update(pmd, addr, PMD_SECT_RDONLY);
3595+#else
3596+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
3597+#endif
3598+ }
3599+ }
3600+#endif
3601+
3602+#ifdef CONFIG_HAVE_TCM
3603 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
3604 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
3605 __phys_to_pfn(__pa(&__tcm_end)),
3606diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
3607index 88fd86c..7a224ce 100644
3608--- a/arch/arm/mm/ioremap.c
3609+++ b/arch/arm/mm/ioremap.c
3610@@ -335,9 +335,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
3611 unsigned int mtype;
3612
3613 if (cached)
3614- mtype = MT_MEMORY;
3615+ mtype = MT_MEMORY_RX;
3616 else
3617- mtype = MT_MEMORY_NONCACHED;
3618+ mtype = MT_MEMORY_NONCACHED_RX;
3619
3620 return __arm_ioremap_caller(phys_addr, size, mtype,
3621 __builtin_return_address(0));
3622diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
3623index 10062ce..aa96dd7 100644
3624--- a/arch/arm/mm/mmap.c
3625+++ b/arch/arm/mm/mmap.c
3626@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3627 struct vm_area_struct *vma;
3628 int do_align = 0;
3629 int aliasing = cache_is_vipt_aliasing();
3630+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3631 struct vm_unmapped_area_info info;
3632
3633 /*
3634@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3635 if (len > TASK_SIZE)
3636 return -ENOMEM;
3637
3638+#ifdef CONFIG_PAX_RANDMMAP
3639+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3640+#endif
3641+
3642 if (addr) {
3643 if (do_align)
3644 addr = COLOUR_ALIGN(addr, pgoff);
3645@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3646 addr = PAGE_ALIGN(addr);
3647
3648 vma = find_vma(mm, addr);
3649- if (TASK_SIZE - len >= addr &&
3650- (!vma || addr + len <= vma->vm_start))
3651+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3652 return addr;
3653 }
3654
3655@@ -112,6 +116,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3656 unsigned long addr = addr0;
3657 int do_align = 0;
3658 int aliasing = cache_is_vipt_aliasing();
3659+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3660 struct vm_unmapped_area_info info;
3661
3662 /*
3663@@ -132,6 +137,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3664 return addr;
3665 }
3666
3667+#ifdef CONFIG_PAX_RANDMMAP
3668+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3669+#endif
3670+
3671 /* requesting a specific address */
3672 if (addr) {
3673 if (do_align)
3674@@ -139,8 +148,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3675 else
3676 addr = PAGE_ALIGN(addr);
3677 vma = find_vma(mm, addr);
3678- if (TASK_SIZE - len >= addr &&
3679- (!vma || addr + len <= vma->vm_start))
3680+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3681 return addr;
3682 }
3683
3684@@ -162,6 +170,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3685 VM_BUG_ON(addr != -ENOMEM);
3686 info.flags = 0;
3687 info.low_limit = mm->mmap_base;
3688+
3689+#ifdef CONFIG_PAX_RANDMMAP
3690+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3691+ info.low_limit += mm->delta_mmap;
3692+#endif
3693+
3694 info.high_limit = TASK_SIZE;
3695 addr = vm_unmapped_area(&info);
3696 }
3697@@ -173,6 +187,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3698 {
3699 unsigned long random_factor = 0UL;
3700
3701+#ifdef CONFIG_PAX_RANDMMAP
3702+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3703+#endif
3704+
3705 /* 8 bits of randomness in 20 address space bits */
3706 if ((current->flags & PF_RANDOMIZE) &&
3707 !(current->personality & ADDR_NO_RANDOMIZE))
3708@@ -180,10 +198,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3709
3710 if (mmap_is_legacy()) {
3711 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3712+
3713+#ifdef CONFIG_PAX_RANDMMAP
3714+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3715+ mm->mmap_base += mm->delta_mmap;
3716+#endif
3717+
3718 mm->get_unmapped_area = arch_get_unmapped_area;
3719 mm->unmap_area = arch_unmap_area;
3720 } else {
3721 mm->mmap_base = mmap_base(random_factor);
3722+
3723+#ifdef CONFIG_PAX_RANDMMAP
3724+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3725+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3726+#endif
3727+
3728 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3729 mm->unmap_area = arch_unmap_area_topdown;
3730 }
3731diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
3732index ce328c7..f82bebb 100644
3733--- a/arch/arm/mm/mmu.c
3734+++ b/arch/arm/mm/mmu.c
3735@@ -35,6 +35,23 @@
3736
3737 #include "mm.h"
3738
3739+
3740+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3741+void modify_domain(unsigned int dom, unsigned int type)
3742+{
3743+ struct thread_info *thread = current_thread_info();
3744+ unsigned int domain = thread->cpu_domain;
3745+ /*
3746+ * DOMAIN_MANAGER might be defined to some other value,
3747+ * use the arch-defined constant
3748+ */
3749+ domain &= ~domain_val(dom, 3);
3750+ thread->cpu_domain = domain | domain_val(dom, type);
3751+ set_domain(thread->cpu_domain);
3752+}
3753+EXPORT_SYMBOL(modify_domain);
3754+#endif
3755+
3756 /*
3757 * empty_zero_page is a special page that is used for
3758 * zero-initialized data and COW.
3759@@ -195,10 +212,18 @@ void adjust_cr(unsigned long mask, unsigned long set)
3760 }
3761 #endif
3762
3763-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
3764+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
3765 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
3766
3767-static struct mem_type mem_types[] = {
3768+#ifdef CONFIG_PAX_KERNEXEC
3769+#define L_PTE_KERNEXEC L_PTE_RDONLY
3770+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
3771+#else
3772+#define L_PTE_KERNEXEC L_PTE_DIRTY
3773+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
3774+#endif
3775+
3776+static struct mem_type mem_types[] __read_only = {
3777 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
3778 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
3779 L_PTE_SHARED,
3780@@ -227,16 +252,16 @@ static struct mem_type mem_types[] = {
3781 [MT_UNCACHED] = {
3782 .prot_pte = PROT_PTE_DEVICE,
3783 .prot_l1 = PMD_TYPE_TABLE,
3784- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3785+ .prot_sect = PROT_SECT_DEVICE,
3786 .domain = DOMAIN_IO,
3787 },
3788 [MT_CACHECLEAN] = {
3789- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3790+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
3791 .domain = DOMAIN_KERNEL,
3792 },
3793 #ifndef CONFIG_ARM_LPAE
3794 [MT_MINICLEAN] = {
3795- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
3796+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
3797 .domain = DOMAIN_KERNEL,
3798 },
3799 #endif
3800@@ -244,36 +269,54 @@ static struct mem_type mem_types[] = {
3801 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3802 L_PTE_RDONLY,
3803 .prot_l1 = PMD_TYPE_TABLE,
3804- .domain = DOMAIN_USER,
3805+ .domain = DOMAIN_VECTORS,
3806 },
3807 [MT_HIGH_VECTORS] = {
3808 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3809 L_PTE_USER | L_PTE_RDONLY,
3810 .prot_l1 = PMD_TYPE_TABLE,
3811- .domain = DOMAIN_USER,
3812+ .domain = DOMAIN_VECTORS,
3813 },
3814- [MT_MEMORY] = {
3815+ [MT_MEMORY_RWX] = {
3816 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
3817 .prot_l1 = PMD_TYPE_TABLE,
3818 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
3819 .domain = DOMAIN_KERNEL,
3820 },
3821+ [MT_MEMORY_RW] = {
3822+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
3823+ .prot_l1 = PMD_TYPE_TABLE,
3824+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
3825+ .domain = DOMAIN_KERNEL,
3826+ },
3827+ [MT_MEMORY_RX] = {
3828+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
3829+ .prot_l1 = PMD_TYPE_TABLE,
3830+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
3831+ .domain = DOMAIN_KERNEL,
3832+ },
3833 [MT_ROM] = {
3834- .prot_sect = PMD_TYPE_SECT,
3835+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
3836 .domain = DOMAIN_KERNEL,
3837 },
3838- [MT_MEMORY_NONCACHED] = {
3839+ [MT_MEMORY_NONCACHED_RW] = {
3840 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3841 L_PTE_MT_BUFFERABLE,
3842 .prot_l1 = PMD_TYPE_TABLE,
3843 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
3844 .domain = DOMAIN_KERNEL,
3845 },
3846+ [MT_MEMORY_NONCACHED_RX] = {
3847+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
3848+ L_PTE_MT_BUFFERABLE,
3849+ .prot_l1 = PMD_TYPE_TABLE,
3850+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
3851+ .domain = DOMAIN_KERNEL,
3852+ },
3853 [MT_MEMORY_DTCM] = {
3854- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3855- L_PTE_XN,
3856+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
3857 .prot_l1 = PMD_TYPE_TABLE,
3858- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3859+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
3860 .domain = DOMAIN_KERNEL,
3861 },
3862 [MT_MEMORY_ITCM] = {
3863@@ -283,10 +326,10 @@ static struct mem_type mem_types[] = {
3864 },
3865 [MT_MEMORY_SO] = {
3866 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3867- L_PTE_MT_UNCACHED | L_PTE_XN,
3868+ L_PTE_MT_UNCACHED,
3869 .prot_l1 = PMD_TYPE_TABLE,
3870 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
3871- PMD_SECT_UNCACHED | PMD_SECT_XN,
3872+ PMD_SECT_UNCACHED,
3873 .domain = DOMAIN_KERNEL,
3874 },
3875 [MT_MEMORY_DMA_READY] = {
3876@@ -371,9 +414,35 @@ static void __init build_mem_type_table(void)
3877 * to prevent speculative instruction fetches.
3878 */
3879 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
3880+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
3881 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
3882+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
3883 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
3884+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
3885 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
3886+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
3887+
3888+ /* Mark other regions on ARMv6+ as execute-never */
3889+
3890+#ifdef CONFIG_PAX_KERNEXEC
3891+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
3892+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
3893+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
3894+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
3895+#ifndef CONFIG_ARM_LPAE
3896+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
3897+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
3898+#endif
3899+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
3900+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
3901+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
3902+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
3903+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
3904+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
3905+#endif
3906+
3907+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
3908+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
3909 }
3910 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
3911 /*
3912@@ -432,6 +501,9 @@ static void __init build_mem_type_table(void)
3913 * from SVC mode and no access from userspace.
3914 */
3915 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
3916+#ifdef CONFIG_PAX_KERNEXEC
3917+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
3918+#endif
3919 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
3920 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
3921 #endif
3922@@ -448,11 +520,17 @@ static void __init build_mem_type_table(void)
3923 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
3924 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
3925 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
3926- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
3927- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
3928+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
3929+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
3930+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
3931+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
3932+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
3933+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
3934 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
3935- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
3936- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
3937+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
3938+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
3939+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
3940+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
3941 }
3942 }
3943
3944@@ -463,15 +541,20 @@ static void __init build_mem_type_table(void)
3945 if (cpu_arch >= CPU_ARCH_ARMv6) {
3946 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
3947 /* Non-cacheable Normal is XCB = 001 */
3948- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
3949+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
3950+ PMD_SECT_BUFFERED;
3951+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
3952 PMD_SECT_BUFFERED;
3953 } else {
3954 /* For both ARMv6 and non-TEX-remapping ARMv7 */
3955- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
3956+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
3957+ PMD_SECT_TEX(1);
3958+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
3959 PMD_SECT_TEX(1);
3960 }
3961 } else {
3962- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
3963+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
3964+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
3965 }
3966
3967 #ifdef CONFIG_ARM_LPAE
3968@@ -487,6 +570,8 @@ static void __init build_mem_type_table(void)
3969 vecs_pgprot |= PTE_EXT_AF;
3970 #endif
3971
3972+ user_pgprot |= __supported_pte_mask;
3973+
3974 for (i = 0; i < 16; i++) {
3975 pteval_t v = pgprot_val(protection_map[i]);
3976 protection_map[i] = __pgprot(v | user_pgprot);
3977@@ -501,10 +586,15 @@ static void __init build_mem_type_table(void)
3978
3979 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
3980 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
3981- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
3982- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
3983+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
3984+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
3985+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
3986+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
3987+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
3988+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
3989 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
3990- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
3991+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
3992+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
3993 mem_types[MT_ROM].prot_sect |= cp->pmd;
3994
3995 switch (cp->pmd) {
3996@@ -1105,18 +1195,15 @@ void __init arm_mm_memblock_reserve(void)
3997 * called function. This means you can't use any function or debugging
3998 * method which may touch any device, otherwise the kernel _will_ crash.
3999 */
4000+
4001+static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
4002+
4003 static void __init devicemaps_init(struct machine_desc *mdesc)
4004 {
4005 struct map_desc map;
4006 unsigned long addr;
4007- void *vectors;
4008
4009- /*
4010- * Allocate the vector page early.
4011- */
4012- vectors = early_alloc(PAGE_SIZE);
4013-
4014- early_trap_init(vectors);
4015+ early_trap_init(&vectors);
4016
4017 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4018 pmd_clear(pmd_off_k(addr));
4019@@ -1156,7 +1243,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4020 * location (0xffff0000). If we aren't using high-vectors, also
4021 * create a mapping at the low-vectors virtual address.
4022 */
4023- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4024+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4025 map.virtual = 0xffff0000;
4026 map.length = PAGE_SIZE;
4027 map.type = MT_HIGH_VECTORS;
4028@@ -1214,8 +1301,39 @@ static void __init map_lowmem(void)
4029 map.pfn = __phys_to_pfn(start);
4030 map.virtual = __phys_to_virt(start);
4031 map.length = end - start;
4032- map.type = MT_MEMORY;
4033
4034+#ifdef CONFIG_PAX_KERNEXEC
4035+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4036+ struct map_desc kernel;
4037+ struct map_desc initmap;
4038+
4039+ /* when freeing initmem we will make this RW */
4040+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4041+ initmap.virtual = (unsigned long)__init_begin;
4042+ initmap.length = _sdata - __init_begin;
4043+ initmap.type = MT_MEMORY_RWX;
4044+ create_mapping(&initmap);
4045+
4046+ /* when freeing initmem we will make this RX */
4047+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4048+ kernel.virtual = (unsigned long)_stext;
4049+ kernel.length = __init_begin - _stext;
4050+ kernel.type = MT_MEMORY_RWX;
4051+ create_mapping(&kernel);
4052+
4053+ if (map.virtual < (unsigned long)_stext) {
4054+ map.length = (unsigned long)_stext - map.virtual;
4055+ map.type = MT_MEMORY_RWX;
4056+ create_mapping(&map);
4057+ }
4058+
4059+ map.pfn = __phys_to_pfn(__pa(_sdata));
4060+ map.virtual = (unsigned long)_sdata;
4061+ map.length = end - __pa(_sdata);
4062+ }
4063+#endif
4064+
4065+ map.type = MT_MEMORY_RW;
4066 create_mapping(&map);
4067 }
4068 }
4069diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
4070index 6d98c13..3cfb174 100644
4071--- a/arch/arm/mm/proc-v7-2level.S
4072+++ b/arch/arm/mm/proc-v7-2level.S
4073@@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
4074 tst r1, #L_PTE_XN
4075 orrne r3, r3, #PTE_EXT_XN
4076
4077+ tst r1, #L_PTE_PXN
4078+ orrne r3, r3, #PTE_EXT_PXN
4079+
4080 tst r1, #L_PTE_YOUNG
4081 tstne r1, #L_PTE_VALID
4082 #ifndef CONFIG_CPU_USE_DOMAINS
4083diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4084index a5bc92d..0bb4730 100644
4085--- a/arch/arm/plat-omap/sram.c
4086+++ b/arch/arm/plat-omap/sram.c
4087@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4088 * Looks like we need to preserve some bootloader code at the
4089 * beginning of SRAM for jumping to flash for reboot to work...
4090 */
4091+ pax_open_kernel();
4092 memset_io(omap_sram_base + omap_sram_skip, 0,
4093 omap_sram_size - omap_sram_skip);
4094+ pax_close_kernel();
4095 }
4096diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
4097index b76c065..b6e766b 100644
4098--- a/arch/arm/plat-orion/include/plat/addr-map.h
4099+++ b/arch/arm/plat-orion/include/plat/addr-map.h
4100@@ -27,7 +27,7 @@ struct orion_addr_map_cfg {
4101 value in bridge_virt_base */
4102 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
4103 const int win);
4104-};
4105+} __no_const;
4106
4107 /*
4108 * Information needed to setup one address mapping.
4109diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4110index f5144cd..71f6d1f 100644
4111--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4112+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4113@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4114 int (*started)(unsigned ch);
4115 int (*flush)(unsigned ch);
4116 int (*stop)(unsigned ch);
4117-};
4118+} __no_const;
4119
4120 extern void *samsung_dmadev_get_ops(void);
4121 extern void *s3c_dma_get_ops(void);
4122diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4123index 0c3ba9f..95722b3 100644
4124--- a/arch/arm64/kernel/debug-monitors.c
4125+++ b/arch/arm64/kernel/debug-monitors.c
4126@@ -151,7 +151,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4127 return NOTIFY_OK;
4128 }
4129
4130-static struct notifier_block __cpuinitdata os_lock_nb = {
4131+static struct notifier_block os_lock_nb = {
4132 .notifier_call = os_lock_notify,
4133 };
4134
4135diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4136index 5ab825c..96aaec8 100644
4137--- a/arch/arm64/kernel/hw_breakpoint.c
4138+++ b/arch/arm64/kernel/hw_breakpoint.c
4139@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4140 return NOTIFY_OK;
4141 }
4142
4143-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4144+static struct notifier_block hw_breakpoint_reset_nb = {
4145 .notifier_call = hw_breakpoint_reset_notify,
4146 };
4147
4148diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4149index c3a58a1..78fbf54 100644
4150--- a/arch/avr32/include/asm/cache.h
4151+++ b/arch/avr32/include/asm/cache.h
4152@@ -1,8 +1,10 @@
4153 #ifndef __ASM_AVR32_CACHE_H
4154 #define __ASM_AVR32_CACHE_H
4155
4156+#include <linux/const.h>
4157+
4158 #define L1_CACHE_SHIFT 5
4159-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4160+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4161
4162 /*
4163 * Memory returned by kmalloc() may be used for DMA, so we must make
4164diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4165index e2c3287..6c4f98c 100644
4166--- a/arch/avr32/include/asm/elf.h
4167+++ b/arch/avr32/include/asm/elf.h
4168@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4169 the loader. We need to make sure that it is out of the way of the program
4170 that it will "exec", and that there is sufficient room for the brk. */
4171
4172-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4173+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4174
4175+#ifdef CONFIG_PAX_ASLR
4176+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4177+
4178+#define PAX_DELTA_MMAP_LEN 15
4179+#define PAX_DELTA_STACK_LEN 15
4180+#endif
4181
4182 /* This yields a mask that user programs can use to figure out what
4183 instruction set this CPU supports. This could be done in user space,
4184diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4185index 479330b..53717a8 100644
4186--- a/arch/avr32/include/asm/kmap_types.h
4187+++ b/arch/avr32/include/asm/kmap_types.h
4188@@ -2,9 +2,9 @@
4189 #define __ASM_AVR32_KMAP_TYPES_H
4190
4191 #ifdef CONFIG_DEBUG_HIGHMEM
4192-# define KM_TYPE_NR 29
4193+# define KM_TYPE_NR 30
4194 #else
4195-# define KM_TYPE_NR 14
4196+# define KM_TYPE_NR 15
4197 #endif
4198
4199 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4200diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4201index b2f2d2d..d1c85cb 100644
4202--- a/arch/avr32/mm/fault.c
4203+++ b/arch/avr32/mm/fault.c
4204@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4205
4206 int exception_trace = 1;
4207
4208+#ifdef CONFIG_PAX_PAGEEXEC
4209+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4210+{
4211+ unsigned long i;
4212+
4213+ printk(KERN_ERR "PAX: bytes at PC: ");
4214+ for (i = 0; i < 20; i++) {
4215+ unsigned char c;
4216+ if (get_user(c, (unsigned char *)pc+i))
4217+ printk(KERN_CONT "???????? ");
4218+ else
4219+ printk(KERN_CONT "%02x ", c);
4220+ }
4221+ printk("\n");
4222+}
4223+#endif
4224+
4225 /*
4226 * This routine handles page faults. It determines the address and the
4227 * problem, and then passes it off to one of the appropriate routines.
4228@@ -174,6 +191,16 @@ bad_area:
4229 up_read(&mm->mmap_sem);
4230
4231 if (user_mode(regs)) {
4232+
4233+#ifdef CONFIG_PAX_PAGEEXEC
4234+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4235+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4236+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4237+ do_group_exit(SIGKILL);
4238+ }
4239+ }
4240+#endif
4241+
4242 if (exception_trace && printk_ratelimit())
4243 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4244 "sp %08lx ecr %lu\n",
4245diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4246index 568885a..f8008df 100644
4247--- a/arch/blackfin/include/asm/cache.h
4248+++ b/arch/blackfin/include/asm/cache.h
4249@@ -7,6 +7,7 @@
4250 #ifndef __ARCH_BLACKFIN_CACHE_H
4251 #define __ARCH_BLACKFIN_CACHE_H
4252
4253+#include <linux/const.h>
4254 #include <linux/linkage.h> /* for asmlinkage */
4255
4256 /*
4257@@ -14,7 +15,7 @@
4258 * Blackfin loads 32 bytes for cache
4259 */
4260 #define L1_CACHE_SHIFT 5
4261-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4262+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4263 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4264
4265 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4266diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4267index aea2718..3639a60 100644
4268--- a/arch/cris/include/arch-v10/arch/cache.h
4269+++ b/arch/cris/include/arch-v10/arch/cache.h
4270@@ -1,8 +1,9 @@
4271 #ifndef _ASM_ARCH_CACHE_H
4272 #define _ASM_ARCH_CACHE_H
4273
4274+#include <linux/const.h>
4275 /* Etrax 100LX have 32-byte cache-lines. */
4276-#define L1_CACHE_BYTES 32
4277 #define L1_CACHE_SHIFT 5
4278+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4279
4280 #endif /* _ASM_ARCH_CACHE_H */
4281diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4282index 7caf25d..ee65ac5 100644
4283--- a/arch/cris/include/arch-v32/arch/cache.h
4284+++ b/arch/cris/include/arch-v32/arch/cache.h
4285@@ -1,11 +1,12 @@
4286 #ifndef _ASM_CRIS_ARCH_CACHE_H
4287 #define _ASM_CRIS_ARCH_CACHE_H
4288
4289+#include <linux/const.h>
4290 #include <arch/hwregs/dma.h>
4291
4292 /* A cache-line is 32 bytes. */
4293-#define L1_CACHE_BYTES 32
4294 #define L1_CACHE_SHIFT 5
4295+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4296
4297 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4298
4299diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4300index b86329d..6709906 100644
4301--- a/arch/frv/include/asm/atomic.h
4302+++ b/arch/frv/include/asm/atomic.h
4303@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4304 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4305 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4306
4307+#define atomic64_read_unchecked(v) atomic64_read(v)
4308+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4309+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4310+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4311+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4312+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4313+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4314+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4315+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4316+
4317 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4318 {
4319 int c, old;
4320diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4321index 2797163..c2a401d 100644
4322--- a/arch/frv/include/asm/cache.h
4323+++ b/arch/frv/include/asm/cache.h
4324@@ -12,10 +12,11 @@
4325 #ifndef __ASM_CACHE_H
4326 #define __ASM_CACHE_H
4327
4328+#include <linux/const.h>
4329
4330 /* bytes per L1 cache line */
4331 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4332-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4333+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4334
4335 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4336 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4337diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4338index 43901f2..0d8b865 100644
4339--- a/arch/frv/include/asm/kmap_types.h
4340+++ b/arch/frv/include/asm/kmap_types.h
4341@@ -2,6 +2,6 @@
4342 #ifndef _ASM_KMAP_TYPES_H
4343 #define _ASM_KMAP_TYPES_H
4344
4345-#define KM_TYPE_NR 17
4346+#define KM_TYPE_NR 18
4347
4348 #endif
4349diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4350index 385fd30..3aaf4fe 100644
4351--- a/arch/frv/mm/elf-fdpic.c
4352+++ b/arch/frv/mm/elf-fdpic.c
4353@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4354 {
4355 struct vm_area_struct *vma;
4356 unsigned long limit;
4357+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4358
4359 if (len > TASK_SIZE)
4360 return -ENOMEM;
4361@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4362 if (addr) {
4363 addr = PAGE_ALIGN(addr);
4364 vma = find_vma(current->mm, addr);
4365- if (TASK_SIZE - len >= addr &&
4366- (!vma || addr + len <= vma->vm_start))
4367+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4368 goto success;
4369 }
4370
4371@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4372 for (; vma; vma = vma->vm_next) {
4373 if (addr > limit)
4374 break;
4375- if (addr + len <= vma->vm_start)
4376+ if (check_heap_stack_gap(vma, addr, len, offset))
4377 goto success;
4378 addr = vma->vm_end;
4379 }
4380@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4381 for (; vma; vma = vma->vm_next) {
4382 if (addr > limit)
4383 break;
4384- if (addr + len <= vma->vm_start)
4385+ if (check_heap_stack_gap(vma, addr, len, offset))
4386 goto success;
4387 addr = vma->vm_end;
4388 }
4389diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4390index f4ca594..adc72fd6 100644
4391--- a/arch/hexagon/include/asm/cache.h
4392+++ b/arch/hexagon/include/asm/cache.h
4393@@ -21,9 +21,11 @@
4394 #ifndef __ASM_CACHE_H
4395 #define __ASM_CACHE_H
4396
4397+#include <linux/const.h>
4398+
4399 /* Bytes per L1 cache line */
4400-#define L1_CACHE_SHIFT (5)
4401-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4402+#define L1_CACHE_SHIFT 5
4403+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4404
4405 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4406 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4407diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4408index 6e6fe18..a6ae668 100644
4409--- a/arch/ia64/include/asm/atomic.h
4410+++ b/arch/ia64/include/asm/atomic.h
4411@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4412 #define atomic64_inc(v) atomic64_add(1, (v))
4413 #define atomic64_dec(v) atomic64_sub(1, (v))
4414
4415+#define atomic64_read_unchecked(v) atomic64_read(v)
4416+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4417+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4418+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4419+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4420+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4421+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4422+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4423+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4424+
4425 /* Atomic operations are already serializing */
4426 #define smp_mb__before_atomic_dec() barrier()
4427 #define smp_mb__after_atomic_dec() barrier()
4428diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4429index 988254a..e1ee885 100644
4430--- a/arch/ia64/include/asm/cache.h
4431+++ b/arch/ia64/include/asm/cache.h
4432@@ -1,6 +1,7 @@
4433 #ifndef _ASM_IA64_CACHE_H
4434 #define _ASM_IA64_CACHE_H
4435
4436+#include <linux/const.h>
4437
4438 /*
4439 * Copyright (C) 1998-2000 Hewlett-Packard Co
4440@@ -9,7 +10,7 @@
4441
4442 /* Bytes per L1 (data) cache line. */
4443 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4444-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4445+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4446
4447 #ifdef CONFIG_SMP
4448 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4449diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4450index b5298eb..67c6e62 100644
4451--- a/arch/ia64/include/asm/elf.h
4452+++ b/arch/ia64/include/asm/elf.h
4453@@ -42,6 +42,13 @@
4454 */
4455 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4456
4457+#ifdef CONFIG_PAX_ASLR
4458+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4459+
4460+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4461+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4462+#endif
4463+
4464 #define PT_IA_64_UNWIND 0x70000001
4465
4466 /* IA-64 relocations: */
4467diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4468index 96a8d92..617a1cf 100644
4469--- a/arch/ia64/include/asm/pgalloc.h
4470+++ b/arch/ia64/include/asm/pgalloc.h
4471@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4472 pgd_val(*pgd_entry) = __pa(pud);
4473 }
4474
4475+static inline void
4476+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4477+{
4478+ pgd_populate(mm, pgd_entry, pud);
4479+}
4480+
4481 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4482 {
4483 return quicklist_alloc(0, GFP_KERNEL, NULL);
4484@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4485 pud_val(*pud_entry) = __pa(pmd);
4486 }
4487
4488+static inline void
4489+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4490+{
4491+ pud_populate(mm, pud_entry, pmd);
4492+}
4493+
4494 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4495 {
4496 return quicklist_alloc(0, GFP_KERNEL, NULL);
4497diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4498index 815810c..d60bd4c 100644
4499--- a/arch/ia64/include/asm/pgtable.h
4500+++ b/arch/ia64/include/asm/pgtable.h
4501@@ -12,7 +12,7 @@
4502 * David Mosberger-Tang <davidm@hpl.hp.com>
4503 */
4504
4505-
4506+#include <linux/const.h>
4507 #include <asm/mman.h>
4508 #include <asm/page.h>
4509 #include <asm/processor.h>
4510@@ -142,6 +142,17 @@
4511 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4512 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4513 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4514+
4515+#ifdef CONFIG_PAX_PAGEEXEC
4516+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4517+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4518+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4519+#else
4520+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4521+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4522+# define PAGE_COPY_NOEXEC PAGE_COPY
4523+#endif
4524+
4525 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4526 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4527 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4528diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4529index 54ff557..70c88b7 100644
4530--- a/arch/ia64/include/asm/spinlock.h
4531+++ b/arch/ia64/include/asm/spinlock.h
4532@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4533 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4534
4535 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4536- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4537+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4538 }
4539
4540 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4541diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
4542index 449c8c0..50cdf87 100644
4543--- a/arch/ia64/include/asm/uaccess.h
4544+++ b/arch/ia64/include/asm/uaccess.h
4545@@ -42,6 +42,8 @@
4546 #include <asm/pgtable.h>
4547 #include <asm/io.h>
4548
4549+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4550+
4551 /*
4552 * For historical reasons, the following macros are grossly misnamed:
4553 */
4554@@ -240,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
4555 static inline unsigned long
4556 __copy_to_user (void __user *to, const void *from, unsigned long count)
4557 {
4558+ if (count > INT_MAX)
4559+ return count;
4560+
4561+ if (!__builtin_constant_p(count))
4562+ check_object_size(from, count, true);
4563+
4564 return __copy_user(to, (__force void __user *) from, count);
4565 }
4566
4567 static inline unsigned long
4568 __copy_from_user (void *to, const void __user *from, unsigned long count)
4569 {
4570+ if (count > INT_MAX)
4571+ return count;
4572+
4573+ if (!__builtin_constant_p(count))
4574+ check_object_size(to, count, false);
4575+
4576 return __copy_user((__force void __user *) to, from, count);
4577 }
4578
4579@@ -255,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4580 ({ \
4581 void __user *__cu_to = (to); \
4582 const void *__cu_from = (from); \
4583- long __cu_len = (n); \
4584+ unsigned long __cu_len = (n); \
4585 \
4586- if (__access_ok(__cu_to, __cu_len, get_fs())) \
4587+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
4588+ if (!__builtin_constant_p(n)) \
4589+ check_object_size(__cu_from, __cu_len, true); \
4590 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
4591+ } \
4592 __cu_len; \
4593 })
4594
4595@@ -266,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4596 ({ \
4597 void *__cu_to = (to); \
4598 const void __user *__cu_from = (from); \
4599- long __cu_len = (n); \
4600+ unsigned long __cu_len = (n); \
4601 \
4602 __chk_user_ptr(__cu_from); \
4603- if (__access_ok(__cu_from, __cu_len, get_fs())) \
4604+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
4605+ if (!__builtin_constant_p(n)) \
4606+ check_object_size(__cu_to, __cu_len, false); \
4607 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
4608+ } \
4609 __cu_len; \
4610 })
4611
4612diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
4613index 2d67317..07d8bfa 100644
4614--- a/arch/ia64/kernel/err_inject.c
4615+++ b/arch/ia64/kernel/err_inject.c
4616@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
4617 return NOTIFY_OK;
4618 }
4619
4620-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
4621+static struct notifier_block err_inject_cpu_notifier =
4622 {
4623 .notifier_call = err_inject_cpu_callback,
4624 };
4625diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
4626index 65bf9cd..794f06b 100644
4627--- a/arch/ia64/kernel/mca.c
4628+++ b/arch/ia64/kernel/mca.c
4629@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
4630 return NOTIFY_OK;
4631 }
4632
4633-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
4634+static struct notifier_block mca_cpu_notifier = {
4635 .notifier_call = mca_cpu_callback
4636 };
4637
4638diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
4639index 24603be..948052d 100644
4640--- a/arch/ia64/kernel/module.c
4641+++ b/arch/ia64/kernel/module.c
4642@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
4643 void
4644 module_free (struct module *mod, void *module_region)
4645 {
4646- if (mod && mod->arch.init_unw_table &&
4647- module_region == mod->module_init) {
4648+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
4649 unw_remove_unwind_table(mod->arch.init_unw_table);
4650 mod->arch.init_unw_table = NULL;
4651 }
4652@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
4653 }
4654
4655 static inline int
4656+in_init_rx (const struct module *mod, uint64_t addr)
4657+{
4658+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
4659+}
4660+
4661+static inline int
4662+in_init_rw (const struct module *mod, uint64_t addr)
4663+{
4664+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
4665+}
4666+
4667+static inline int
4668 in_init (const struct module *mod, uint64_t addr)
4669 {
4670- return addr - (uint64_t) mod->module_init < mod->init_size;
4671+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
4672+}
4673+
4674+static inline int
4675+in_core_rx (const struct module *mod, uint64_t addr)
4676+{
4677+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
4678+}
4679+
4680+static inline int
4681+in_core_rw (const struct module *mod, uint64_t addr)
4682+{
4683+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
4684 }
4685
4686 static inline int
4687 in_core (const struct module *mod, uint64_t addr)
4688 {
4689- return addr - (uint64_t) mod->module_core < mod->core_size;
4690+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
4691 }
4692
4693 static inline int
4694@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
4695 break;
4696
4697 case RV_BDREL:
4698- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
4699+ if (in_init_rx(mod, val))
4700+ val -= (uint64_t) mod->module_init_rx;
4701+ else if (in_init_rw(mod, val))
4702+ val -= (uint64_t) mod->module_init_rw;
4703+ else if (in_core_rx(mod, val))
4704+ val -= (uint64_t) mod->module_core_rx;
4705+ else if (in_core_rw(mod, val))
4706+ val -= (uint64_t) mod->module_core_rw;
4707 break;
4708
4709 case RV_LTV:
4710@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
4711 * addresses have been selected...
4712 */
4713 uint64_t gp;
4714- if (mod->core_size > MAX_LTOFF)
4715+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
4716 /*
4717 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
4718 * at the end of the module.
4719 */
4720- gp = mod->core_size - MAX_LTOFF / 2;
4721+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
4722 else
4723- gp = mod->core_size / 2;
4724- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
4725+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
4726+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
4727 mod->arch.gp = gp;
4728 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
4729 }
4730diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
4731index 77597e5..6f28f3f 100644
4732--- a/arch/ia64/kernel/palinfo.c
4733+++ b/arch/ia64/kernel/palinfo.c
4734@@ -1045,7 +1045,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
4735 return NOTIFY_OK;
4736 }
4737
4738-static struct notifier_block __refdata palinfo_cpu_notifier =
4739+static struct notifier_block palinfo_cpu_notifier =
4740 {
4741 .notifier_call = palinfo_cpu_callback,
4742 .priority = 0,
4743diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
4744index 79802e5..1a89ec5 100644
4745--- a/arch/ia64/kernel/salinfo.c
4746+++ b/arch/ia64/kernel/salinfo.c
4747@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
4748 return NOTIFY_OK;
4749 }
4750
4751-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
4752+static struct notifier_block salinfo_cpu_notifier =
4753 {
4754 .notifier_call = salinfo_cpu_callback,
4755 .priority = 0,
4756diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
4757index d9439ef..d0cac6b 100644
4758--- a/arch/ia64/kernel/sys_ia64.c
4759+++ b/arch/ia64/kernel/sys_ia64.c
4760@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4761 unsigned long start_addr, align_mask = PAGE_SIZE - 1;
4762 struct mm_struct *mm = current->mm;
4763 struct vm_area_struct *vma;
4764+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4765
4766 if (len > RGN_MAP_LIMIT)
4767 return -ENOMEM;
4768@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4769 if (REGION_NUMBER(addr) == RGN_HPAGE)
4770 addr = 0;
4771 #endif
4772+
4773+#ifdef CONFIG_PAX_RANDMMAP
4774+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4775+ addr = mm->free_area_cache;
4776+ else
4777+#endif
4778+
4779 if (!addr)
4780 addr = mm->free_area_cache;
4781
4782@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4783 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
4784 /* At this point: (!vma || addr < vma->vm_end). */
4785 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
4786- if (start_addr != TASK_UNMAPPED_BASE) {
4787+ if (start_addr != mm->mmap_base) {
4788 /* Start a new search --- just in case we missed some holes. */
4789- addr = TASK_UNMAPPED_BASE;
4790+ addr = mm->mmap_base;
4791 goto full_search;
4792 }
4793 return -ENOMEM;
4794 }
4795- if (!vma || addr + len <= vma->vm_start) {
4796+ if (check_heap_stack_gap(vma, addr, len, offset)) {
4797 /* Remember the address where we stopped this search: */
4798 mm->free_area_cache = addr + len;
4799 return addr;
4800diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
4801index dc00b2c..cce53c2 100644
4802--- a/arch/ia64/kernel/topology.c
4803+++ b/arch/ia64/kernel/topology.c
4804@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
4805 return NOTIFY_OK;
4806 }
4807
4808-static struct notifier_block __cpuinitdata cache_cpu_notifier =
4809+static struct notifier_block cache_cpu_notifier =
4810 {
4811 .notifier_call = cache_cpu_callback
4812 };
4813diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
4814index 0ccb28f..8992469 100644
4815--- a/arch/ia64/kernel/vmlinux.lds.S
4816+++ b/arch/ia64/kernel/vmlinux.lds.S
4817@@ -198,7 +198,7 @@ SECTIONS {
4818 /* Per-cpu data: */
4819 . = ALIGN(PERCPU_PAGE_SIZE);
4820 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
4821- __phys_per_cpu_start = __per_cpu_load;
4822+ __phys_per_cpu_start = per_cpu_load;
4823 /*
4824 * ensure percpu data fits
4825 * into percpu page size
4826diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
4827index 6cf0341..d352594 100644
4828--- a/arch/ia64/mm/fault.c
4829+++ b/arch/ia64/mm/fault.c
4830@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
4831 return pte_present(pte);
4832 }
4833
4834+#ifdef CONFIG_PAX_PAGEEXEC
4835+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4836+{
4837+ unsigned long i;
4838+
4839+ printk(KERN_ERR "PAX: bytes at PC: ");
4840+ for (i = 0; i < 8; i++) {
4841+ unsigned int c;
4842+ if (get_user(c, (unsigned int *)pc+i))
4843+ printk(KERN_CONT "???????? ");
4844+ else
4845+ printk(KERN_CONT "%08x ", c);
4846+ }
4847+ printk("\n");
4848+}
4849+#endif
4850+
4851 # define VM_READ_BIT 0
4852 # define VM_WRITE_BIT 1
4853 # define VM_EXEC_BIT 2
4854@@ -149,8 +166,21 @@ retry:
4855 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
4856 goto bad_area;
4857
4858- if ((vma->vm_flags & mask) != mask)
4859+ if ((vma->vm_flags & mask) != mask) {
4860+
4861+#ifdef CONFIG_PAX_PAGEEXEC
4862+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
4863+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
4864+ goto bad_area;
4865+
4866+ up_read(&mm->mmap_sem);
4867+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
4868+ do_group_exit(SIGKILL);
4869+ }
4870+#endif
4871+
4872 goto bad_area;
4873+ }
4874
4875 /*
4876 * If for any reason at all we couldn't handle the fault, make
4877diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
4878index 5ca674b..127c3cb 100644
4879--- a/arch/ia64/mm/hugetlbpage.c
4880+++ b/arch/ia64/mm/hugetlbpage.c
4881@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
4882 unsigned long pgoff, unsigned long flags)
4883 {
4884 struct vm_area_struct *vmm;
4885+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
4886
4887 if (len > RGN_MAP_LIMIT)
4888 return -ENOMEM;
4889@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
4890 /* At this point: (!vmm || addr < vmm->vm_end). */
4891 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
4892 return -ENOMEM;
4893- if (!vmm || (addr + len) <= vmm->vm_start)
4894+ if (check_heap_stack_gap(vmm, addr, len, offset))
4895 return addr;
4896 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
4897 }
4898diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
4899index b755ea9..b9a969e 100644
4900--- a/arch/ia64/mm/init.c
4901+++ b/arch/ia64/mm/init.c
4902@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
4903 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
4904 vma->vm_end = vma->vm_start + PAGE_SIZE;
4905 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
4906+
4907+#ifdef CONFIG_PAX_PAGEEXEC
4908+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
4909+ vma->vm_flags &= ~VM_EXEC;
4910+
4911+#ifdef CONFIG_PAX_MPROTECT
4912+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
4913+ vma->vm_flags &= ~VM_MAYEXEC;
4914+#endif
4915+
4916+ }
4917+#endif
4918+
4919 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4920 down_write(&current->mm->mmap_sem);
4921 if (insert_vm_struct(current->mm, vma)) {
4922diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
4923index 40b3ee9..8c2c112 100644
4924--- a/arch/m32r/include/asm/cache.h
4925+++ b/arch/m32r/include/asm/cache.h
4926@@ -1,8 +1,10 @@
4927 #ifndef _ASM_M32R_CACHE_H
4928 #define _ASM_M32R_CACHE_H
4929
4930+#include <linux/const.h>
4931+
4932 /* L1 cache line size */
4933 #define L1_CACHE_SHIFT 4
4934-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4935+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4936
4937 #endif /* _ASM_M32R_CACHE_H */
4938diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
4939index 82abd15..d95ae5d 100644
4940--- a/arch/m32r/lib/usercopy.c
4941+++ b/arch/m32r/lib/usercopy.c
4942@@ -14,6 +14,9 @@
4943 unsigned long
4944 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
4945 {
4946+ if ((long)n < 0)
4947+ return n;
4948+
4949 prefetch(from);
4950 if (access_ok(VERIFY_WRITE, to, n))
4951 __copy_user(to,from,n);
4952@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
4953 unsigned long
4954 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
4955 {
4956+ if ((long)n < 0)
4957+ return n;
4958+
4959 prefetchw(to);
4960 if (access_ok(VERIFY_READ, from, n))
4961 __copy_user_zeroing(to,from,n);
4962diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
4963index 0395c51..5f26031 100644
4964--- a/arch/m68k/include/asm/cache.h
4965+++ b/arch/m68k/include/asm/cache.h
4966@@ -4,9 +4,11 @@
4967 #ifndef __ARCH_M68K_CACHE_H
4968 #define __ARCH_M68K_CACHE_H
4969
4970+#include <linux/const.h>
4971+
4972 /* bytes per L1 cache line */
4973 #define L1_CACHE_SHIFT 4
4974-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
4975+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4976
4977 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4978
4979diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
4980index 4efe96a..60e8699 100644
4981--- a/arch/microblaze/include/asm/cache.h
4982+++ b/arch/microblaze/include/asm/cache.h
4983@@ -13,11 +13,12 @@
4984 #ifndef _ASM_MICROBLAZE_CACHE_H
4985 #define _ASM_MICROBLAZE_CACHE_H
4986
4987+#include <linux/const.h>
4988 #include <asm/registers.h>
4989
4990 #define L1_CACHE_SHIFT 5
4991 /* word-granular cache in microblaze */
4992-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4993+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4994
4995 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4996
4997diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
4998index 01cc6ba..bcb7a5d 100644
4999--- a/arch/mips/include/asm/atomic.h
5000+++ b/arch/mips/include/asm/atomic.h
5001@@ -21,6 +21,10 @@
5002 #include <asm/cmpxchg.h>
5003 #include <asm/war.h>
5004
5005+#ifdef CONFIG_GENERIC_ATOMIC64
5006+#include <asm-generic/atomic64.h>
5007+#endif
5008+
5009 #define ATOMIC_INIT(i) { (i) }
5010
5011 /*
5012@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5013 */
5014 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5015
5016+#define atomic64_read_unchecked(v) atomic64_read(v)
5017+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5018+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5019+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5020+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5021+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5022+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5023+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5024+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5025+
5026 #endif /* CONFIG_64BIT */
5027
5028 /*
5029diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5030index b4db69f..8f3b093 100644
5031--- a/arch/mips/include/asm/cache.h
5032+++ b/arch/mips/include/asm/cache.h
5033@@ -9,10 +9,11 @@
5034 #ifndef _ASM_CACHE_H
5035 #define _ASM_CACHE_H
5036
5037+#include <linux/const.h>
5038 #include <kmalloc.h>
5039
5040 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5041-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5042+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5043
5044 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5045 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5046diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5047index 455c0ac..ad65fbe 100644
5048--- a/arch/mips/include/asm/elf.h
5049+++ b/arch/mips/include/asm/elf.h
5050@@ -372,13 +372,16 @@ extern const char *__elf_platform;
5051 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5052 #endif
5053
5054+#ifdef CONFIG_PAX_ASLR
5055+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5056+
5057+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5058+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5059+#endif
5060+
5061 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5062 struct linux_binprm;
5063 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5064 int uses_interp);
5065
5066-struct mm_struct;
5067-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5068-#define arch_randomize_brk arch_randomize_brk
5069-
5070 #endif /* _ASM_ELF_H */
5071diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5072index c1f6afa..38cc6e9 100644
5073--- a/arch/mips/include/asm/exec.h
5074+++ b/arch/mips/include/asm/exec.h
5075@@ -12,6 +12,6 @@
5076 #ifndef _ASM_EXEC_H
5077 #define _ASM_EXEC_H
5078
5079-extern unsigned long arch_align_stack(unsigned long sp);
5080+#define arch_align_stack(x) ((x) & ~0xfUL)
5081
5082 #endif /* _ASM_EXEC_H */
5083diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5084index dbaec94..6a14935 100644
5085--- a/arch/mips/include/asm/page.h
5086+++ b/arch/mips/include/asm/page.h
5087@@ -96,7 +96,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5088 #ifdef CONFIG_CPU_MIPS32
5089 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5090 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5091- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5092+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5093 #else
5094 typedef struct { unsigned long long pte; } pte_t;
5095 #define pte_val(x) ((x).pte)
5096diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5097index 881d18b..cea38bc 100644
5098--- a/arch/mips/include/asm/pgalloc.h
5099+++ b/arch/mips/include/asm/pgalloc.h
5100@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5101 {
5102 set_pud(pud, __pud((unsigned long)pmd));
5103 }
5104+
5105+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5106+{
5107+ pud_populate(mm, pud, pmd);
5108+}
5109 #endif
5110
5111 /*
5112diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5113index b2050b9..d71bb1b 100644
5114--- a/arch/mips/include/asm/thread_info.h
5115+++ b/arch/mips/include/asm/thread_info.h
5116@@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
5117 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5118 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5119 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5120+/* li takes a 32bit immediate */
5121+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5122 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5123
5124 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5125@@ -126,15 +128,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
5126 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5127 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5128 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5129+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5130+
5131+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5132
5133 /* work to do in syscall_trace_leave() */
5134-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5135+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5136
5137 /* work to do on interrupt/exception return */
5138 #define _TIF_WORK_MASK \
5139 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5140 /* work to do on any return to u-space */
5141-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5142+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5143
5144 #endif /* __KERNEL__ */
5145
5146diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5147index 9fdd8bc..4bd7f1a 100644
5148--- a/arch/mips/kernel/binfmt_elfn32.c
5149+++ b/arch/mips/kernel/binfmt_elfn32.c
5150@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5151 #undef ELF_ET_DYN_BASE
5152 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5153
5154+#ifdef CONFIG_PAX_ASLR
5155+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5156+
5157+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5158+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5159+#endif
5160+
5161 #include <asm/processor.h>
5162 #include <linux/module.h>
5163 #include <linux/elfcore.h>
5164diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5165index ff44823..97f8906 100644
5166--- a/arch/mips/kernel/binfmt_elfo32.c
5167+++ b/arch/mips/kernel/binfmt_elfo32.c
5168@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5169 #undef ELF_ET_DYN_BASE
5170 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5171
5172+#ifdef CONFIG_PAX_ASLR
5173+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5174+
5175+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5176+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5177+#endif
5178+
5179 #include <asm/processor.h>
5180
5181 /*
5182diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5183index a11c6f9..be5e164 100644
5184--- a/arch/mips/kernel/process.c
5185+++ b/arch/mips/kernel/process.c
5186@@ -460,15 +460,3 @@ unsigned long get_wchan(struct task_struct *task)
5187 out:
5188 return pc;
5189 }
5190-
5191-/*
5192- * Don't forget that the stack pointer must be aligned on a 8 bytes
5193- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5194- */
5195-unsigned long arch_align_stack(unsigned long sp)
5196-{
5197- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5198- sp -= get_random_int() & ~PAGE_MASK;
5199-
5200- return sp & ALMASK;
5201-}
5202diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5203index 4812c6d..2069554 100644
5204--- a/arch/mips/kernel/ptrace.c
5205+++ b/arch/mips/kernel/ptrace.c
5206@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5207 return arch;
5208 }
5209
5210+#ifdef CONFIG_GRKERNSEC_SETXID
5211+extern void gr_delayed_cred_worker(void);
5212+#endif
5213+
5214 /*
5215 * Notification of system call entry/exit
5216 * - triggered by current->work.syscall_trace
5217@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5218 /* do the secure computing check first */
5219 secure_computing_strict(regs->regs[2]);
5220
5221+#ifdef CONFIG_GRKERNSEC_SETXID
5222+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5223+ gr_delayed_cred_worker();
5224+#endif
5225+
5226 if (!(current->ptrace & PT_PTRACED))
5227 goto out;
5228
5229diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5230index d20a4bc..7096ae5 100644
5231--- a/arch/mips/kernel/scall32-o32.S
5232+++ b/arch/mips/kernel/scall32-o32.S
5233@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5234
5235 stack_done:
5236 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5237- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5238+ li t1, _TIF_SYSCALL_WORK
5239 and t0, t1
5240 bnez t0, syscall_trace_entry # -> yes
5241
5242diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5243index b64f642..0fe6eab 100644
5244--- a/arch/mips/kernel/scall64-64.S
5245+++ b/arch/mips/kernel/scall64-64.S
5246@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5247
5248 sd a3, PT_R26(sp) # save a3 for syscall restarting
5249
5250- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5251+ li t1, _TIF_SYSCALL_WORK
5252 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5253 and t0, t1, t0
5254 bnez t0, syscall_trace_entry
5255diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5256index c29ac19..c592d05 100644
5257--- a/arch/mips/kernel/scall64-n32.S
5258+++ b/arch/mips/kernel/scall64-n32.S
5259@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5260
5261 sd a3, PT_R26(sp) # save a3 for syscall restarting
5262
5263- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5264+ li t1, _TIF_SYSCALL_WORK
5265 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5266 and t0, t1, t0
5267 bnez t0, n32_syscall_trace_entry
5268diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5269index cf3e75e..72e93fe 100644
5270--- a/arch/mips/kernel/scall64-o32.S
5271+++ b/arch/mips/kernel/scall64-o32.S
5272@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5273 PTR 4b, bad_stack
5274 .previous
5275
5276- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5277+ li t1, _TIF_SYSCALL_WORK
5278 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5279 and t0, t1, t0
5280 bnez t0, trace_a_syscall
5281diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5282index ddcec1e..c7f983e 100644
5283--- a/arch/mips/mm/fault.c
5284+++ b/arch/mips/mm/fault.c
5285@@ -27,6 +27,23 @@
5286 #include <asm/highmem.h> /* For VMALLOC_END */
5287 #include <linux/kdebug.h>
5288
5289+#ifdef CONFIG_PAX_PAGEEXEC
5290+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5291+{
5292+ unsigned long i;
5293+
5294+ printk(KERN_ERR "PAX: bytes at PC: ");
5295+ for (i = 0; i < 5; i++) {
5296+ unsigned int c;
5297+ if (get_user(c, (unsigned int *)pc+i))
5298+ printk(KERN_CONT "???????? ");
5299+ else
5300+ printk(KERN_CONT "%08x ", c);
5301+ }
5302+ printk("\n");
5303+}
5304+#endif
5305+
5306 /*
5307 * This routine handles page faults. It determines the address,
5308 * and the problem, and then passes it off to one of the appropriate
5309diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5310index 7e5fe27..479a219 100644
5311--- a/arch/mips/mm/mmap.c
5312+++ b/arch/mips/mm/mmap.c
5313@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5314 struct vm_area_struct *vma;
5315 unsigned long addr = addr0;
5316 int do_color_align;
5317+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5318 struct vm_unmapped_area_info info;
5319
5320 if (unlikely(len > TASK_SIZE))
5321@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5322 do_color_align = 1;
5323
5324 /* requesting a specific address */
5325+
5326+#ifdef CONFIG_PAX_RANDMMAP
5327+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5328+#endif
5329+
5330 if (addr) {
5331 if (do_color_align)
5332 addr = COLOUR_ALIGN(addr, pgoff);
5333@@ -91,8 +97,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5334 addr = PAGE_ALIGN(addr);
5335
5336 vma = find_vma(mm, addr);
5337- if (TASK_SIZE - len >= addr &&
5338- (!vma || addr + len <= vma->vm_start))
5339+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5340 return addr;
5341 }
5342
5343@@ -146,6 +151,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5344 {
5345 unsigned long random_factor = 0UL;
5346
5347+#ifdef CONFIG_PAX_RANDMMAP
5348+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5349+#endif
5350+
5351 if (current->flags & PF_RANDOMIZE) {
5352 random_factor = get_random_int();
5353 random_factor = random_factor << PAGE_SHIFT;
5354@@ -157,42 +166,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5355
5356 if (mmap_is_legacy()) {
5357 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5358+
5359+#ifdef CONFIG_PAX_RANDMMAP
5360+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5361+ mm->mmap_base += mm->delta_mmap;
5362+#endif
5363+
5364 mm->get_unmapped_area = arch_get_unmapped_area;
5365 mm->unmap_area = arch_unmap_area;
5366 } else {
5367 mm->mmap_base = mmap_base(random_factor);
5368+
5369+#ifdef CONFIG_PAX_RANDMMAP
5370+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5371+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5372+#endif
5373+
5374 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5375 mm->unmap_area = arch_unmap_area_topdown;
5376 }
5377 }
5378
5379-static inline unsigned long brk_rnd(void)
5380-{
5381- unsigned long rnd = get_random_int();
5382-
5383- rnd = rnd << PAGE_SHIFT;
5384- /* 8MB for 32bit, 256MB for 64bit */
5385- if (TASK_IS_32BIT_ADDR)
5386- rnd = rnd & 0x7ffffful;
5387- else
5388- rnd = rnd & 0xffffffful;
5389-
5390- return rnd;
5391-}
5392-
5393-unsigned long arch_randomize_brk(struct mm_struct *mm)
5394-{
5395- unsigned long base = mm->brk;
5396- unsigned long ret;
5397-
5398- ret = PAGE_ALIGN(base + brk_rnd());
5399-
5400- if (ret < mm->brk)
5401- return mm->brk;
5402-
5403- return ret;
5404-}
5405-
5406 int __virt_addr_valid(const volatile void *kaddr)
5407 {
5408 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5409diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5410index 967d144..db12197 100644
5411--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5412+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5413@@ -11,12 +11,14 @@
5414 #ifndef _ASM_PROC_CACHE_H
5415 #define _ASM_PROC_CACHE_H
5416
5417+#include <linux/const.h>
5418+
5419 /* L1 cache */
5420
5421 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5422 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
5423-#define L1_CACHE_BYTES 16 /* bytes per entry */
5424 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
5425+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5426 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
5427
5428 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5429diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5430index bcb5df2..84fabd2 100644
5431--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5432+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5433@@ -16,13 +16,15 @@
5434 #ifndef _ASM_PROC_CACHE_H
5435 #define _ASM_PROC_CACHE_H
5436
5437+#include <linux/const.h>
5438+
5439 /*
5440 * L1 cache
5441 */
5442 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5443 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
5444-#define L1_CACHE_BYTES 32 /* bytes per entry */
5445 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
5446+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5447 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
5448
5449 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5450diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
5451index 4ce7a01..449202a 100644
5452--- a/arch/openrisc/include/asm/cache.h
5453+++ b/arch/openrisc/include/asm/cache.h
5454@@ -19,11 +19,13 @@
5455 #ifndef __ASM_OPENRISC_CACHE_H
5456 #define __ASM_OPENRISC_CACHE_H
5457
5458+#include <linux/const.h>
5459+
5460 /* FIXME: How can we replace these with values from the CPU...
5461 * they shouldn't be hard-coded!
5462 */
5463
5464-#define L1_CACHE_BYTES 16
5465 #define L1_CACHE_SHIFT 4
5466+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5467
5468 #endif /* __ASM_OPENRISC_CACHE_H */
5469diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
5470index af9cf30..2aae9b2 100644
5471--- a/arch/parisc/include/asm/atomic.h
5472+++ b/arch/parisc/include/asm/atomic.h
5473@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5474
5475 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5476
5477+#define atomic64_read_unchecked(v) atomic64_read(v)
5478+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5479+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5480+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5481+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5482+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5483+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5484+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5485+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5486+
5487 #endif /* !CONFIG_64BIT */
5488
5489
5490diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
5491index 47f11c7..3420df2 100644
5492--- a/arch/parisc/include/asm/cache.h
5493+++ b/arch/parisc/include/asm/cache.h
5494@@ -5,6 +5,7 @@
5495 #ifndef __ARCH_PARISC_CACHE_H
5496 #define __ARCH_PARISC_CACHE_H
5497
5498+#include <linux/const.h>
5499
5500 /*
5501 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
5502@@ -15,13 +16,13 @@
5503 * just ruin performance.
5504 */
5505 #ifdef CONFIG_PA20
5506-#define L1_CACHE_BYTES 64
5507 #define L1_CACHE_SHIFT 6
5508 #else
5509-#define L1_CACHE_BYTES 32
5510 #define L1_CACHE_SHIFT 5
5511 #endif
5512
5513+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5514+
5515 #ifndef __ASSEMBLY__
5516
5517 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5518diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
5519index 19f6cb1..6c78cf2 100644
5520--- a/arch/parisc/include/asm/elf.h
5521+++ b/arch/parisc/include/asm/elf.h
5522@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
5523
5524 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
5525
5526+#ifdef CONFIG_PAX_ASLR
5527+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5528+
5529+#define PAX_DELTA_MMAP_LEN 16
5530+#define PAX_DELTA_STACK_LEN 16
5531+#endif
5532+
5533 /* This yields a mask that user programs can use to figure out what
5534 instruction set this CPU supports. This could be done in user space,
5535 but it's not easy, and we've already done it here. */
5536diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
5537index fc987a1..6e068ef 100644
5538--- a/arch/parisc/include/asm/pgalloc.h
5539+++ b/arch/parisc/include/asm/pgalloc.h
5540@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5541 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
5542 }
5543
5544+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5545+{
5546+ pgd_populate(mm, pgd, pmd);
5547+}
5548+
5549 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
5550 {
5551 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
5552@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
5553 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
5554 #define pmd_free(mm, x) do { } while (0)
5555 #define pgd_populate(mm, pmd, pte) BUG()
5556+#define pgd_populate_kernel(mm, pmd, pte) BUG()
5557
5558 #endif
5559
5560diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
5561index 7df49fa..38b62bf 100644
5562--- a/arch/parisc/include/asm/pgtable.h
5563+++ b/arch/parisc/include/asm/pgtable.h
5564@@ -218,6 +218,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
5565 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
5566 #define PAGE_COPY PAGE_EXECREAD
5567 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
5568+
5569+#ifdef CONFIG_PAX_PAGEEXEC
5570+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
5571+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5572+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5573+#else
5574+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5575+# define PAGE_COPY_NOEXEC PAGE_COPY
5576+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5577+#endif
5578+
5579 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
5580 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
5581 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
5582diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
5583index 4ba2c93..f5e3974 100644
5584--- a/arch/parisc/include/asm/uaccess.h
5585+++ b/arch/parisc/include/asm/uaccess.h
5586@@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
5587 const void __user *from,
5588 unsigned long n)
5589 {
5590- int sz = __compiletime_object_size(to);
5591+ size_t sz = __compiletime_object_size(to);
5592 int ret = -EFAULT;
5593
5594- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
5595+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
5596 ret = __copy_from_user(to, from, n);
5597 else
5598 copy_from_user_overflow();
5599diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
5600index 2a625fb..9908930 100644
5601--- a/arch/parisc/kernel/module.c
5602+++ b/arch/parisc/kernel/module.c
5603@@ -98,16 +98,38 @@
5604
5605 /* three functions to determine where in the module core
5606 * or init pieces the location is */
5607+static inline int in_init_rx(struct module *me, void *loc)
5608+{
5609+ return (loc >= me->module_init_rx &&
5610+ loc < (me->module_init_rx + me->init_size_rx));
5611+}
5612+
5613+static inline int in_init_rw(struct module *me, void *loc)
5614+{
5615+ return (loc >= me->module_init_rw &&
5616+ loc < (me->module_init_rw + me->init_size_rw));
5617+}
5618+
5619 static inline int in_init(struct module *me, void *loc)
5620 {
5621- return (loc >= me->module_init &&
5622- loc <= (me->module_init + me->init_size));
5623+ return in_init_rx(me, loc) || in_init_rw(me, loc);
5624+}
5625+
5626+static inline int in_core_rx(struct module *me, void *loc)
5627+{
5628+ return (loc >= me->module_core_rx &&
5629+ loc < (me->module_core_rx + me->core_size_rx));
5630+}
5631+
5632+static inline int in_core_rw(struct module *me, void *loc)
5633+{
5634+ return (loc >= me->module_core_rw &&
5635+ loc < (me->module_core_rw + me->core_size_rw));
5636 }
5637
5638 static inline int in_core(struct module *me, void *loc)
5639 {
5640- return (loc >= me->module_core &&
5641- loc <= (me->module_core + me->core_size));
5642+ return in_core_rx(me, loc) || in_core_rw(me, loc);
5643 }
5644
5645 static inline int in_local(struct module *me, void *loc)
5646@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
5647 }
5648
5649 /* align things a bit */
5650- me->core_size = ALIGN(me->core_size, 16);
5651- me->arch.got_offset = me->core_size;
5652- me->core_size += gots * sizeof(struct got_entry);
5653+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
5654+ me->arch.got_offset = me->core_size_rw;
5655+ me->core_size_rw += gots * sizeof(struct got_entry);
5656
5657- me->core_size = ALIGN(me->core_size, 16);
5658- me->arch.fdesc_offset = me->core_size;
5659- me->core_size += fdescs * sizeof(Elf_Fdesc);
5660+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
5661+ me->arch.fdesc_offset = me->core_size_rw;
5662+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
5663
5664 me->arch.got_max = gots;
5665 me->arch.fdesc_max = fdescs;
5666@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5667
5668 BUG_ON(value == 0);
5669
5670- got = me->module_core + me->arch.got_offset;
5671+ got = me->module_core_rw + me->arch.got_offset;
5672 for (i = 0; got[i].addr; i++)
5673 if (got[i].addr == value)
5674 goto out;
5675@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5676 #ifdef CONFIG_64BIT
5677 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5678 {
5679- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
5680+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
5681
5682 if (!value) {
5683 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
5684@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5685
5686 /* Create new one */
5687 fdesc->addr = value;
5688- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5689+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5690 return (Elf_Addr)fdesc;
5691 }
5692 #endif /* CONFIG_64BIT */
5693@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
5694
5695 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
5696 end = table + sechdrs[me->arch.unwind_section].sh_size;
5697- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5698+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5699
5700 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
5701 me->arch.unwind_section, table, end, gp);
5702diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
5703index f76c108..92bad82 100644
5704--- a/arch/parisc/kernel/sys_parisc.c
5705+++ b/arch/parisc/kernel/sys_parisc.c
5706@@ -33,9 +33,11 @@
5707 #include <linux/utsname.h>
5708 #include <linux/personality.h>
5709
5710-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5711+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
5712+ unsigned long flags)
5713 {
5714 struct vm_area_struct *vma;
5715+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5716
5717 addr = PAGE_ALIGN(addr);
5718
5719@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5720 /* At this point: (!vma || addr < vma->vm_end). */
5721 if (TASK_SIZE - len < addr)
5722 return -ENOMEM;
5723- if (!vma || addr + len <= vma->vm_start)
5724+ if (check_heap_stack_gap(vma, addr, len, offset))
5725 return addr;
5726 addr = vma->vm_end;
5727 }
5728@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
5729 return offset & 0x3FF000;
5730 }
5731
5732-static unsigned long get_shared_area(struct address_space *mapping,
5733- unsigned long addr, unsigned long len, unsigned long pgoff)
5734+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
5735+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
5736 {
5737 struct vm_area_struct *vma;
5738 int offset = mapping ? get_offset(mapping) : 0;
5739+ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5740
5741 offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
5742
5743@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
5744 /* At this point: (!vma || addr < vma->vm_end). */
5745 if (TASK_SIZE - len < addr)
5746 return -ENOMEM;
5747- if (!vma || addr + len <= vma->vm_start)
5748+ if (check_heap_stack_gap(vma, addr, len, rand_offset))
5749 return addr;
5750 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
5751 if (addr < vma->vm_end) /* handle wraparound */
5752@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5753 if (flags & MAP_FIXED)
5754 return addr;
5755 if (!addr)
5756- addr = TASK_UNMAPPED_BASE;
5757+ addr = current->mm->mmap_base;
5758
5759 if (filp) {
5760- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
5761+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
5762 } else if(flags & MAP_SHARED) {
5763- addr = get_shared_area(NULL, addr, len, pgoff);
5764+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
5765 } else {
5766- addr = get_unshared_area(addr, len);
5767+ addr = get_unshared_area(filp, addr, len, flags);
5768 }
5769 return addr;
5770 }
5771diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
5772index 45ba99f..8e22c33 100644
5773--- a/arch/parisc/kernel/traps.c
5774+++ b/arch/parisc/kernel/traps.c
5775@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
5776
5777 down_read(&current->mm->mmap_sem);
5778 vma = find_vma(current->mm,regs->iaoq[0]);
5779- if (vma && (regs->iaoq[0] >= vma->vm_start)
5780- && (vma->vm_flags & VM_EXEC)) {
5781-
5782+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
5783 fault_address = regs->iaoq[0];
5784 fault_space = regs->iasq[0];
5785
5786diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
5787index 18162ce..94de376 100644
5788--- a/arch/parisc/mm/fault.c
5789+++ b/arch/parisc/mm/fault.c
5790@@ -15,6 +15,7 @@
5791 #include <linux/sched.h>
5792 #include <linux/interrupt.h>
5793 #include <linux/module.h>
5794+#include <linux/unistd.h>
5795
5796 #include <asm/uaccess.h>
5797 #include <asm/traps.h>
5798@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
5799 static unsigned long
5800 parisc_acctyp(unsigned long code, unsigned int inst)
5801 {
5802- if (code == 6 || code == 16)
5803+ if (code == 6 || code == 7 || code == 16)
5804 return VM_EXEC;
5805
5806 switch (inst & 0xf0000000) {
5807@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
5808 }
5809 #endif
5810
5811+#ifdef CONFIG_PAX_PAGEEXEC
5812+/*
5813+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
5814+ *
5815+ * returns 1 when task should be killed
5816+ * 2 when rt_sigreturn trampoline was detected
5817+ * 3 when unpatched PLT trampoline was detected
5818+ */
5819+static int pax_handle_fetch_fault(struct pt_regs *regs)
5820+{
5821+
5822+#ifdef CONFIG_PAX_EMUPLT
5823+ int err;
5824+
5825+ do { /* PaX: unpatched PLT emulation */
5826+ unsigned int bl, depwi;
5827+
5828+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
5829+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
5830+
5831+ if (err)
5832+ break;
5833+
5834+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
5835+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
5836+
5837+ err = get_user(ldw, (unsigned int *)addr);
5838+ err |= get_user(bv, (unsigned int *)(addr+4));
5839+ err |= get_user(ldw2, (unsigned int *)(addr+8));
5840+
5841+ if (err)
5842+ break;
5843+
5844+ if (ldw == 0x0E801096U &&
5845+ bv == 0xEAC0C000U &&
5846+ ldw2 == 0x0E881095U)
5847+ {
5848+ unsigned int resolver, map;
5849+
5850+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
5851+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
5852+ if (err)
5853+ break;
5854+
5855+ regs->gr[20] = instruction_pointer(regs)+8;
5856+ regs->gr[21] = map;
5857+ regs->gr[22] = resolver;
5858+ regs->iaoq[0] = resolver | 3UL;
5859+ regs->iaoq[1] = regs->iaoq[0] + 4;
5860+ return 3;
5861+ }
5862+ }
5863+ } while (0);
5864+#endif
5865+
5866+#ifdef CONFIG_PAX_EMUTRAMP
5867+
5868+#ifndef CONFIG_PAX_EMUSIGRT
5869+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
5870+ return 1;
5871+#endif
5872+
5873+ do { /* PaX: rt_sigreturn emulation */
5874+ unsigned int ldi1, ldi2, bel, nop;
5875+
5876+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
5877+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
5878+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
5879+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
5880+
5881+ if (err)
5882+ break;
5883+
5884+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
5885+ ldi2 == 0x3414015AU &&
5886+ bel == 0xE4008200U &&
5887+ nop == 0x08000240U)
5888+ {
5889+ regs->gr[25] = (ldi1 & 2) >> 1;
5890+ regs->gr[20] = __NR_rt_sigreturn;
5891+ regs->gr[31] = regs->iaoq[1] + 16;
5892+ regs->sr[0] = regs->iasq[1];
5893+ regs->iaoq[0] = 0x100UL;
5894+ regs->iaoq[1] = regs->iaoq[0] + 4;
5895+ regs->iasq[0] = regs->sr[2];
5896+ regs->iasq[1] = regs->sr[2];
5897+ return 2;
5898+ }
5899+ } while (0);
5900+#endif
5901+
5902+ return 1;
5903+}
5904+
5905+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5906+{
5907+ unsigned long i;
5908+
5909+ printk(KERN_ERR "PAX: bytes at PC: ");
5910+ for (i = 0; i < 5; i++) {
5911+ unsigned int c;
5912+ if (get_user(c, (unsigned int *)pc+i))
5913+ printk(KERN_CONT "???????? ");
5914+ else
5915+ printk(KERN_CONT "%08x ", c);
5916+ }
5917+ printk("\n");
5918+}
5919+#endif
5920+
5921 int fixup_exception(struct pt_regs *regs)
5922 {
5923 const struct exception_table_entry *fix;
5924@@ -192,8 +303,33 @@ good_area:
5925
5926 acc_type = parisc_acctyp(code,regs->iir);
5927
5928- if ((vma->vm_flags & acc_type) != acc_type)
5929+ if ((vma->vm_flags & acc_type) != acc_type) {
5930+
5931+#ifdef CONFIG_PAX_PAGEEXEC
5932+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
5933+ (address & ~3UL) == instruction_pointer(regs))
5934+ {
5935+ up_read(&mm->mmap_sem);
5936+ switch (pax_handle_fetch_fault(regs)) {
5937+
5938+#ifdef CONFIG_PAX_EMUPLT
5939+ case 3:
5940+ return;
5941+#endif
5942+
5943+#ifdef CONFIG_PAX_EMUTRAMP
5944+ case 2:
5945+ return;
5946+#endif
5947+
5948+ }
5949+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
5950+ do_group_exit(SIGKILL);
5951+ }
5952+#endif
5953+
5954 goto bad_area;
5955+ }
5956
5957 /*
5958 * If for any reason at all we couldn't handle the fault, make
5959diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
5960index e3b1d41..8e81edf 100644
5961--- a/arch/powerpc/include/asm/atomic.h
5962+++ b/arch/powerpc/include/asm/atomic.h
5963@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
5964 return t1;
5965 }
5966
5967+#define atomic64_read_unchecked(v) atomic64_read(v)
5968+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5969+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5970+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5971+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5972+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5973+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5974+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5975+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5976+
5977 #endif /* __powerpc64__ */
5978
5979 #endif /* __KERNEL__ */
5980diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
5981index 9e495c9..b6878e5 100644
5982--- a/arch/powerpc/include/asm/cache.h
5983+++ b/arch/powerpc/include/asm/cache.h
5984@@ -3,6 +3,7 @@
5985
5986 #ifdef __KERNEL__
5987
5988+#include <linux/const.h>
5989
5990 /* bytes per L1 cache line */
5991 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
5992@@ -22,7 +23,7 @@
5993 #define L1_CACHE_SHIFT 7
5994 #endif
5995
5996-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5997+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5998
5999 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6000
6001diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6002index 6abf0a1..459d0f1 100644
6003--- a/arch/powerpc/include/asm/elf.h
6004+++ b/arch/powerpc/include/asm/elf.h
6005@@ -28,8 +28,19 @@
6006 the loader. We need to make sure that it is out of the way of the program
6007 that it will "exec", and that there is sufficient room for the brk. */
6008
6009-extern unsigned long randomize_et_dyn(unsigned long base);
6010-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6011+#define ELF_ET_DYN_BASE (0x20000000)
6012+
6013+#ifdef CONFIG_PAX_ASLR
6014+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6015+
6016+#ifdef __powerpc64__
6017+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6018+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6019+#else
6020+#define PAX_DELTA_MMAP_LEN 15
6021+#define PAX_DELTA_STACK_LEN 15
6022+#endif
6023+#endif
6024
6025 /*
6026 * Our registers are always unsigned longs, whether we're a 32 bit
6027@@ -124,10 +135,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6028 (0x7ff >> (PAGE_SHIFT - 12)) : \
6029 (0x3ffff >> (PAGE_SHIFT - 12)))
6030
6031-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6032-#define arch_randomize_brk arch_randomize_brk
6033-
6034-
6035 #ifdef CONFIG_SPU_BASE
6036 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6037 #define NT_SPU 1
6038diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6039index 8196e9c..d83a9f3 100644
6040--- a/arch/powerpc/include/asm/exec.h
6041+++ b/arch/powerpc/include/asm/exec.h
6042@@ -4,6 +4,6 @@
6043 #ifndef _ASM_POWERPC_EXEC_H
6044 #define _ASM_POWERPC_EXEC_H
6045
6046-extern unsigned long arch_align_stack(unsigned long sp);
6047+#define arch_align_stack(x) ((x) & ~0xfUL)
6048
6049 #endif /* _ASM_POWERPC_EXEC_H */
6050diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6051index 5acabbd..7ea14fa 100644
6052--- a/arch/powerpc/include/asm/kmap_types.h
6053+++ b/arch/powerpc/include/asm/kmap_types.h
6054@@ -10,7 +10,7 @@
6055 * 2 of the License, or (at your option) any later version.
6056 */
6057
6058-#define KM_TYPE_NR 16
6059+#define KM_TYPE_NR 17
6060
6061 #endif /* __KERNEL__ */
6062 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6063diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6064index 8565c25..2865190 100644
6065--- a/arch/powerpc/include/asm/mman.h
6066+++ b/arch/powerpc/include/asm/mman.h
6067@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6068 }
6069 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6070
6071-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6072+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6073 {
6074 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6075 }
6076diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6077index f072e97..b436dee 100644
6078--- a/arch/powerpc/include/asm/page.h
6079+++ b/arch/powerpc/include/asm/page.h
6080@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6081 * and needs to be executable. This means the whole heap ends
6082 * up being executable.
6083 */
6084-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6085- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6086+#define VM_DATA_DEFAULT_FLAGS32 \
6087+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6088+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6089
6090 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6091 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6092@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6093 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6094 #endif
6095
6096+#define ktla_ktva(addr) (addr)
6097+#define ktva_ktla(addr) (addr)
6098+
6099 /*
6100 * Use the top bit of the higher-level page table entries to indicate whether
6101 * the entries we point to contain hugepages. This works because we know that
6102diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6103index cd915d6..c10cee8 100644
6104--- a/arch/powerpc/include/asm/page_64.h
6105+++ b/arch/powerpc/include/asm/page_64.h
6106@@ -154,15 +154,18 @@ do { \
6107 * stack by default, so in the absence of a PT_GNU_STACK program header
6108 * we turn execute permission off.
6109 */
6110-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6111- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6112+#define VM_STACK_DEFAULT_FLAGS32 \
6113+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6114+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6115
6116 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6117 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6118
6119+#ifndef CONFIG_PAX_PAGEEXEC
6120 #define VM_STACK_DEFAULT_FLAGS \
6121 (is_32bit_task() ? \
6122 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6123+#endif
6124
6125 #include <asm-generic/getorder.h>
6126
6127diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6128index 292725c..f87ae14 100644
6129--- a/arch/powerpc/include/asm/pgalloc-64.h
6130+++ b/arch/powerpc/include/asm/pgalloc-64.h
6131@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6132 #ifndef CONFIG_PPC_64K_PAGES
6133
6134 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6135+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6136
6137 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6138 {
6139@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6140 pud_set(pud, (unsigned long)pmd);
6141 }
6142
6143+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6144+{
6145+ pud_populate(mm, pud, pmd);
6146+}
6147+
6148 #define pmd_populate(mm, pmd, pte_page) \
6149 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6150 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6151@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6152 #else /* CONFIG_PPC_64K_PAGES */
6153
6154 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6155+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6156
6157 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6158 pte_t *pte)
6159diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6160index a9cbd3b..3b67efa 100644
6161--- a/arch/powerpc/include/asm/pgtable.h
6162+++ b/arch/powerpc/include/asm/pgtable.h
6163@@ -2,6 +2,7 @@
6164 #define _ASM_POWERPC_PGTABLE_H
6165 #ifdef __KERNEL__
6166
6167+#include <linux/const.h>
6168 #ifndef __ASSEMBLY__
6169 #include <asm/processor.h> /* For TASK_SIZE */
6170 #include <asm/mmu.h>
6171diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6172index 4aad413..85d86bf 100644
6173--- a/arch/powerpc/include/asm/pte-hash32.h
6174+++ b/arch/powerpc/include/asm/pte-hash32.h
6175@@ -21,6 +21,7 @@
6176 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6177 #define _PAGE_USER 0x004 /* usermode access allowed */
6178 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6179+#define _PAGE_EXEC _PAGE_GUARDED
6180 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6181 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6182 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6183diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6184index 3d5c9dc..62f8414 100644
6185--- a/arch/powerpc/include/asm/reg.h
6186+++ b/arch/powerpc/include/asm/reg.h
6187@@ -215,6 +215,7 @@
6188 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6189 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6190 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6191+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6192 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6193 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6194 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6195diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6196index 406b7b9..af63426 100644
6197--- a/arch/powerpc/include/asm/thread_info.h
6198+++ b/arch/powerpc/include/asm/thread_info.h
6199@@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
6200 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6201 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6202 #define TIF_SINGLESTEP 8 /* singlestepping active */
6203-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
6204 #define TIF_SECCOMP 10 /* secure computing */
6205 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
6206 #define TIF_NOERROR 12 /* Force successful syscall return */
6207@@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6208 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
6209 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6210 for stack store? */
6211+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6212+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6213+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
6214
6215 /* as above, but as bit values */
6216 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6217@@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
6218 #define _TIF_UPROBE (1<<TIF_UPROBE)
6219 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6220 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6221+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6222 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6223- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
6224+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6225+ _TIF_GRSEC_SETXID)
6226
6227 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6228 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6229diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6230index 4db4959..335e00c 100644
6231--- a/arch/powerpc/include/asm/uaccess.h
6232+++ b/arch/powerpc/include/asm/uaccess.h
6233@@ -13,6 +13,8 @@
6234 #define VERIFY_READ 0
6235 #define VERIFY_WRITE 1
6236
6237+extern void check_object_size(const void *ptr, unsigned long n, bool to);
6238+
6239 /*
6240 * The fs value determines whether argument validity checking should be
6241 * performed or not. If get_fs() == USER_DS, checking is performed, with
6242@@ -318,52 +320,6 @@ do { \
6243 extern unsigned long __copy_tofrom_user(void __user *to,
6244 const void __user *from, unsigned long size);
6245
6246-#ifndef __powerpc64__
6247-
6248-static inline unsigned long copy_from_user(void *to,
6249- const void __user *from, unsigned long n)
6250-{
6251- unsigned long over;
6252-
6253- if (access_ok(VERIFY_READ, from, n))
6254- return __copy_tofrom_user((__force void __user *)to, from, n);
6255- if ((unsigned long)from < TASK_SIZE) {
6256- over = (unsigned long)from + n - TASK_SIZE;
6257- return __copy_tofrom_user((__force void __user *)to, from,
6258- n - over) + over;
6259- }
6260- return n;
6261-}
6262-
6263-static inline unsigned long copy_to_user(void __user *to,
6264- const void *from, unsigned long n)
6265-{
6266- unsigned long over;
6267-
6268- if (access_ok(VERIFY_WRITE, to, n))
6269- return __copy_tofrom_user(to, (__force void __user *)from, n);
6270- if ((unsigned long)to < TASK_SIZE) {
6271- over = (unsigned long)to + n - TASK_SIZE;
6272- return __copy_tofrom_user(to, (__force void __user *)from,
6273- n - over) + over;
6274- }
6275- return n;
6276-}
6277-
6278-#else /* __powerpc64__ */
6279-
6280-#define __copy_in_user(to, from, size) \
6281- __copy_tofrom_user((to), (from), (size))
6282-
6283-extern unsigned long copy_from_user(void *to, const void __user *from,
6284- unsigned long n);
6285-extern unsigned long copy_to_user(void __user *to, const void *from,
6286- unsigned long n);
6287-extern unsigned long copy_in_user(void __user *to, const void __user *from,
6288- unsigned long n);
6289-
6290-#endif /* __powerpc64__ */
6291-
6292 static inline unsigned long __copy_from_user_inatomic(void *to,
6293 const void __user *from, unsigned long n)
6294 {
6295@@ -387,6 +343,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6296 if (ret == 0)
6297 return 0;
6298 }
6299+
6300+ if (!__builtin_constant_p(n))
6301+ check_object_size(to, n, false);
6302+
6303 return __copy_tofrom_user((__force void __user *)to, from, n);
6304 }
6305
6306@@ -413,6 +373,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6307 if (ret == 0)
6308 return 0;
6309 }
6310+
6311+ if (!__builtin_constant_p(n))
6312+ check_object_size(from, n, true);
6313+
6314 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6315 }
6316
6317@@ -430,6 +394,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6318 return __copy_to_user_inatomic(to, from, size);
6319 }
6320
6321+#ifndef __powerpc64__
6322+
6323+static inline unsigned long __must_check copy_from_user(void *to,
6324+ const void __user *from, unsigned long n)
6325+{
6326+ unsigned long over;
6327+
6328+ if ((long)n < 0)
6329+ return n;
6330+
6331+ if (access_ok(VERIFY_READ, from, n)) {
6332+ if (!__builtin_constant_p(n))
6333+ check_object_size(to, n, false);
6334+ return __copy_tofrom_user((__force void __user *)to, from, n);
6335+ }
6336+ if ((unsigned long)from < TASK_SIZE) {
6337+ over = (unsigned long)from + n - TASK_SIZE;
6338+ if (!__builtin_constant_p(n - over))
6339+ check_object_size(to, n - over, false);
6340+ return __copy_tofrom_user((__force void __user *)to, from,
6341+ n - over) + over;
6342+ }
6343+ return n;
6344+}
6345+
6346+static inline unsigned long __must_check copy_to_user(void __user *to,
6347+ const void *from, unsigned long n)
6348+{
6349+ unsigned long over;
6350+
6351+ if ((long)n < 0)
6352+ return n;
6353+
6354+ if (access_ok(VERIFY_WRITE, to, n)) {
6355+ if (!__builtin_constant_p(n))
6356+ check_object_size(from, n, true);
6357+ return __copy_tofrom_user(to, (__force void __user *)from, n);
6358+ }
6359+ if ((unsigned long)to < TASK_SIZE) {
6360+ over = (unsigned long)to + n - TASK_SIZE;
6361+ if (!__builtin_constant_p(n))
6362+ check_object_size(from, n - over, true);
6363+ return __copy_tofrom_user(to, (__force void __user *)from,
6364+ n - over) + over;
6365+ }
6366+ return n;
6367+}
6368+
6369+#else /* __powerpc64__ */
6370+
6371+#define __copy_in_user(to, from, size) \
6372+ __copy_tofrom_user((to), (from), (size))
6373+
6374+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6375+{
6376+ if ((long)n < 0 || n > INT_MAX)
6377+ return n;
6378+
6379+ if (!__builtin_constant_p(n))
6380+ check_object_size(to, n, false);
6381+
6382+ if (likely(access_ok(VERIFY_READ, from, n)))
6383+ n = __copy_from_user(to, from, n);
6384+ else
6385+ memset(to, 0, n);
6386+ return n;
6387+}
6388+
6389+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6390+{
6391+ if ((long)n < 0 || n > INT_MAX)
6392+ return n;
6393+
6394+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
6395+ if (!__builtin_constant_p(n))
6396+ check_object_size(from, n, true);
6397+ n = __copy_to_user(to, from, n);
6398+ }
6399+ return n;
6400+}
6401+
6402+extern unsigned long copy_in_user(void __user *to, const void __user *from,
6403+ unsigned long n);
6404+
6405+#endif /* __powerpc64__ */
6406+
6407 extern unsigned long __clear_user(void __user *addr, unsigned long size);
6408
6409 static inline unsigned long clear_user(void __user *addr, unsigned long size)
6410diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
6411index 4684e33..acc4d19e 100644
6412--- a/arch/powerpc/kernel/exceptions-64e.S
6413+++ b/arch/powerpc/kernel/exceptions-64e.S
6414@@ -715,6 +715,7 @@ storage_fault_common:
6415 std r14,_DAR(r1)
6416 std r15,_DSISR(r1)
6417 addi r3,r1,STACK_FRAME_OVERHEAD
6418+ bl .save_nvgprs
6419 mr r4,r14
6420 mr r5,r15
6421 ld r14,PACA_EXGEN+EX_R14(r13)
6422@@ -723,8 +724,7 @@ storage_fault_common:
6423 cmpdi r3,0
6424 bne- 1f
6425 b .ret_from_except_lite
6426-1: bl .save_nvgprs
6427- mr r5,r3
6428+1: mr r5,r3
6429 addi r3,r1,STACK_FRAME_OVERHEAD
6430 ld r4,_DAR(r1)
6431 bl .bad_page_fault
6432diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
6433index 4665e82..080ea99 100644
6434--- a/arch/powerpc/kernel/exceptions-64s.S
6435+++ b/arch/powerpc/kernel/exceptions-64s.S
6436@@ -1206,10 +1206,10 @@ handle_page_fault:
6437 11: ld r4,_DAR(r1)
6438 ld r5,_DSISR(r1)
6439 addi r3,r1,STACK_FRAME_OVERHEAD
6440+ bl .save_nvgprs
6441 bl .do_page_fault
6442 cmpdi r3,0
6443 beq+ 12f
6444- bl .save_nvgprs
6445 mr r5,r3
6446 addi r3,r1,STACK_FRAME_OVERHEAD
6447 lwz r4,_DAR(r1)
6448diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
6449index 2e3200c..72095ce 100644
6450--- a/arch/powerpc/kernel/module_32.c
6451+++ b/arch/powerpc/kernel/module_32.c
6452@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
6453 me->arch.core_plt_section = i;
6454 }
6455 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
6456- printk("Module doesn't contain .plt or .init.plt sections.\n");
6457+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
6458 return -ENOEXEC;
6459 }
6460
6461@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
6462
6463 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
6464 /* Init, or core PLT? */
6465- if (location >= mod->module_core
6466- && location < mod->module_core + mod->core_size)
6467+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
6468+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
6469 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
6470- else
6471+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
6472+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
6473 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
6474+ else {
6475+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
6476+ return ~0UL;
6477+ }
6478
6479 /* Find this entry, or if that fails, the next avail. entry */
6480 while (entry->jump[0]) {
6481diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
6482index 8143067..21ae55b 100644
6483--- a/arch/powerpc/kernel/process.c
6484+++ b/arch/powerpc/kernel/process.c
6485@@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
6486 * Lookup NIP late so we have the best change of getting the
6487 * above info out without failing
6488 */
6489- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
6490- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
6491+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
6492+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
6493 #endif
6494 show_stack(current, (unsigned long *) regs->gpr[1]);
6495 if (!user_mode(regs))
6496@@ -1129,10 +1129,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6497 newsp = stack[0];
6498 ip = stack[STACK_FRAME_LR_SAVE];
6499 if (!firstframe || ip != lr) {
6500- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6501+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
6502 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6503 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6504- printk(" (%pS)",
6505+ printk(" (%pA)",
6506 (void *)current->ret_stack[curr_frame].ret);
6507 curr_frame--;
6508 }
6509@@ -1152,7 +1152,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6510 struct pt_regs *regs = (struct pt_regs *)
6511 (sp + STACK_FRAME_OVERHEAD);
6512 lr = regs->link;
6513- printk("--- Exception: %lx at %pS\n LR = %pS\n",
6514+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
6515 regs->trap, (void *)regs->nip, (void *)lr);
6516 firstframe = 1;
6517 }
6518@@ -1194,58 +1194,3 @@ void __ppc64_runlatch_off(void)
6519 mtspr(SPRN_CTRLT, ctrl);
6520 }
6521 #endif /* CONFIG_PPC64 */
6522-
6523-unsigned long arch_align_stack(unsigned long sp)
6524-{
6525- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6526- sp -= get_random_int() & ~PAGE_MASK;
6527- return sp & ~0xf;
6528-}
6529-
6530-static inline unsigned long brk_rnd(void)
6531-{
6532- unsigned long rnd = 0;
6533-
6534- /* 8MB for 32bit, 1GB for 64bit */
6535- if (is_32bit_task())
6536- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
6537- else
6538- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
6539-
6540- return rnd << PAGE_SHIFT;
6541-}
6542-
6543-unsigned long arch_randomize_brk(struct mm_struct *mm)
6544-{
6545- unsigned long base = mm->brk;
6546- unsigned long ret;
6547-
6548-#ifdef CONFIG_PPC_STD_MMU_64
6549- /*
6550- * If we are using 1TB segments and we are allowed to randomise
6551- * the heap, we can put it above 1TB so it is backed by a 1TB
6552- * segment. Otherwise the heap will be in the bottom 1TB
6553- * which always uses 256MB segments and this may result in a
6554- * performance penalty.
6555- */
6556- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
6557- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
6558-#endif
6559-
6560- ret = PAGE_ALIGN(base + brk_rnd());
6561-
6562- if (ret < mm->brk)
6563- return mm->brk;
6564-
6565- return ret;
6566-}
6567-
6568-unsigned long randomize_et_dyn(unsigned long base)
6569-{
6570- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
6571-
6572- if (ret < base)
6573- return base;
6574-
6575- return ret;
6576-}
6577diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
6578index c497000..8fde506 100644
6579--- a/arch/powerpc/kernel/ptrace.c
6580+++ b/arch/powerpc/kernel/ptrace.c
6581@@ -1737,6 +1737,10 @@ long arch_ptrace(struct task_struct *child, long request,
6582 return ret;
6583 }
6584
6585+#ifdef CONFIG_GRKERNSEC_SETXID
6586+extern void gr_delayed_cred_worker(void);
6587+#endif
6588+
6589 /*
6590 * We must return the syscall number to actually look up in the table.
6591 * This can be -1L to skip running any syscall at all.
6592@@ -1747,6 +1751,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
6593
6594 secure_computing_strict(regs->gpr[0]);
6595
6596+#ifdef CONFIG_GRKERNSEC_SETXID
6597+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6598+ gr_delayed_cred_worker();
6599+#endif
6600+
6601 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
6602 tracehook_report_syscall_entry(regs))
6603 /*
6604@@ -1781,6 +1790,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
6605 {
6606 int step;
6607
6608+#ifdef CONFIG_GRKERNSEC_SETXID
6609+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6610+ gr_delayed_cred_worker();
6611+#endif
6612+
6613 audit_syscall_exit(regs);
6614
6615 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6616diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
6617index 804e323..79181c1 100644
6618--- a/arch/powerpc/kernel/signal_32.c
6619+++ b/arch/powerpc/kernel/signal_32.c
6620@@ -851,7 +851,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
6621 /* Save user registers on the stack */
6622 frame = &rt_sf->uc.uc_mcontext;
6623 addr = frame;
6624- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
6625+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6626 if (save_user_regs(regs, frame, 0, 1))
6627 goto badframe;
6628 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
6629diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
6630index 1ca045d..139c3f7 100644
6631--- a/arch/powerpc/kernel/signal_64.c
6632+++ b/arch/powerpc/kernel/signal_64.c
6633@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
6634 current->thread.fpscr.val = 0;
6635
6636 /* Set up to return from userspace. */
6637- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
6638+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6639 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
6640 } else {
6641 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
6642diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
6643index 3ce1f86..c30e629 100644
6644--- a/arch/powerpc/kernel/sysfs.c
6645+++ b/arch/powerpc/kernel/sysfs.c
6646@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
6647 return NOTIFY_OK;
6648 }
6649
6650-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
6651+static struct notifier_block sysfs_cpu_nb = {
6652 .notifier_call = sysfs_cpu_notify,
6653 };
6654
6655diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
6656index 3251840..3f7c77a 100644
6657--- a/arch/powerpc/kernel/traps.c
6658+++ b/arch/powerpc/kernel/traps.c
6659@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
6660 return flags;
6661 }
6662
6663+extern void gr_handle_kernel_exploit(void);
6664+
6665 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6666 int signr)
6667 {
6668@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6669 panic("Fatal exception in interrupt");
6670 if (panic_on_oops)
6671 panic("Fatal exception");
6672+
6673+ gr_handle_kernel_exploit();
6674+
6675 do_exit(signr);
6676 }
6677
6678diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
6679index 1b2076f..835e4be 100644
6680--- a/arch/powerpc/kernel/vdso.c
6681+++ b/arch/powerpc/kernel/vdso.c
6682@@ -34,6 +34,7 @@
6683 #include <asm/firmware.h>
6684 #include <asm/vdso.h>
6685 #include <asm/vdso_datapage.h>
6686+#include <asm/mman.h>
6687
6688 #include "setup.h"
6689
6690@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6691 vdso_base = VDSO32_MBASE;
6692 #endif
6693
6694- current->mm->context.vdso_base = 0;
6695+ current->mm->context.vdso_base = ~0UL;
6696
6697 /* vDSO has a problem and was disabled, just don't "enable" it for the
6698 * process
6699@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6700 vdso_base = get_unmapped_area(NULL, vdso_base,
6701 (vdso_pages << PAGE_SHIFT) +
6702 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
6703- 0, 0);
6704+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
6705 if (IS_ERR_VALUE(vdso_base)) {
6706 rc = vdso_base;
6707 goto fail_mmapsem;
6708diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
6709index 5eea6f3..5d10396 100644
6710--- a/arch/powerpc/lib/usercopy_64.c
6711+++ b/arch/powerpc/lib/usercopy_64.c
6712@@ -9,22 +9,6 @@
6713 #include <linux/module.h>
6714 #include <asm/uaccess.h>
6715
6716-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
6717-{
6718- if (likely(access_ok(VERIFY_READ, from, n)))
6719- n = __copy_from_user(to, from, n);
6720- else
6721- memset(to, 0, n);
6722- return n;
6723-}
6724-
6725-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
6726-{
6727- if (likely(access_ok(VERIFY_WRITE, to, n)))
6728- n = __copy_to_user(to, from, n);
6729- return n;
6730-}
6731-
6732 unsigned long copy_in_user(void __user *to, const void __user *from,
6733 unsigned long n)
6734 {
6735@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
6736 return n;
6737 }
6738
6739-EXPORT_SYMBOL(copy_from_user);
6740-EXPORT_SYMBOL(copy_to_user);
6741 EXPORT_SYMBOL(copy_in_user);
6742
6743diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
6744index 3a8489a..6a63b3b 100644
6745--- a/arch/powerpc/mm/fault.c
6746+++ b/arch/powerpc/mm/fault.c
6747@@ -32,6 +32,10 @@
6748 #include <linux/perf_event.h>
6749 #include <linux/magic.h>
6750 #include <linux/ratelimit.h>
6751+#include <linux/slab.h>
6752+#include <linux/pagemap.h>
6753+#include <linux/compiler.h>
6754+#include <linux/unistd.h>
6755
6756 #include <asm/firmware.h>
6757 #include <asm/page.h>
6758@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
6759 }
6760 #endif
6761
6762+#ifdef CONFIG_PAX_PAGEEXEC
6763+/*
6764+ * PaX: decide what to do with offenders (regs->nip = fault address)
6765+ *
6766+ * returns 1 when task should be killed
6767+ */
6768+static int pax_handle_fetch_fault(struct pt_regs *regs)
6769+{
6770+ return 1;
6771+}
6772+
6773+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6774+{
6775+ unsigned long i;
6776+
6777+ printk(KERN_ERR "PAX: bytes at PC: ");
6778+ for (i = 0; i < 5; i++) {
6779+ unsigned int c;
6780+ if (get_user(c, (unsigned int __user *)pc+i))
6781+ printk(KERN_CONT "???????? ");
6782+ else
6783+ printk(KERN_CONT "%08x ", c);
6784+ }
6785+ printk("\n");
6786+}
6787+#endif
6788+
6789 /*
6790 * Check whether the instruction at regs->nip is a store using
6791 * an update addressing form which will update r1.
6792@@ -213,7 +244,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
6793 * indicate errors in DSISR but can validly be set in SRR1.
6794 */
6795 if (trap == 0x400)
6796- error_code &= 0x48200000;
6797+ error_code &= 0x58200000;
6798 else
6799 is_write = error_code & DSISR_ISSTORE;
6800 #else
6801@@ -364,7 +395,7 @@ good_area:
6802 * "undefined". Of those that can be set, this is the only
6803 * one which seems bad.
6804 */
6805- if (error_code & 0x10000000)
6806+ if (error_code & DSISR_GUARDED)
6807 /* Guarded storage error. */
6808 goto bad_area;
6809 #endif /* CONFIG_8xx */
6810@@ -379,7 +410,7 @@ good_area:
6811 * processors use the same I/D cache coherency mechanism
6812 * as embedded.
6813 */
6814- if (error_code & DSISR_PROTFAULT)
6815+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
6816 goto bad_area;
6817 #endif /* CONFIG_PPC_STD_MMU */
6818
6819@@ -462,6 +493,23 @@ bad_area:
6820 bad_area_nosemaphore:
6821 /* User mode accesses cause a SIGSEGV */
6822 if (user_mode(regs)) {
6823+
6824+#ifdef CONFIG_PAX_PAGEEXEC
6825+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
6826+#ifdef CONFIG_PPC_STD_MMU
6827+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
6828+#else
6829+ if (is_exec && regs->nip == address) {
6830+#endif
6831+ switch (pax_handle_fetch_fault(regs)) {
6832+ }
6833+
6834+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
6835+ do_group_exit(SIGKILL);
6836+ }
6837+ }
6838+#endif
6839+
6840 _exception(SIGSEGV, regs, code, address);
6841 return 0;
6842 }
6843diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
6844index 67a42ed..cd463e0 100644
6845--- a/arch/powerpc/mm/mmap_64.c
6846+++ b/arch/powerpc/mm/mmap_64.c
6847@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
6848 {
6849 unsigned long rnd = 0;
6850
6851+#ifdef CONFIG_PAX_RANDMMAP
6852+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6853+#endif
6854+
6855 if (current->flags & PF_RANDOMIZE) {
6856 /* 8MB for 32bit, 1GB for 64bit */
6857 if (is_32bit_task())
6858@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6859 */
6860 if (mmap_is_legacy()) {
6861 mm->mmap_base = TASK_UNMAPPED_BASE;
6862+
6863+#ifdef CONFIG_PAX_RANDMMAP
6864+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6865+ mm->mmap_base += mm->delta_mmap;
6866+#endif
6867+
6868 mm->get_unmapped_area = arch_get_unmapped_area;
6869 mm->unmap_area = arch_unmap_area;
6870 } else {
6871 mm->mmap_base = mmap_base();
6872+
6873+#ifdef CONFIG_PAX_RANDMMAP
6874+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6875+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6876+#endif
6877+
6878 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6879 mm->unmap_area = arch_unmap_area_topdown;
6880 }
6881diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
6882index e779642..e5bb889 100644
6883--- a/arch/powerpc/mm/mmu_context_nohash.c
6884+++ b/arch/powerpc/mm/mmu_context_nohash.c
6885@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
6886 return NOTIFY_OK;
6887 }
6888
6889-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
6890+static struct notifier_block mmu_context_cpu_nb = {
6891 .notifier_call = mmu_context_cpu_notify,
6892 };
6893
6894diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
6895index bba87ca..c346a33 100644
6896--- a/arch/powerpc/mm/numa.c
6897+++ b/arch/powerpc/mm/numa.c
6898@@ -932,7 +932,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
6899 return ret;
6900 }
6901
6902-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
6903+static struct notifier_block ppc64_numa_nb = {
6904 .notifier_call = cpu_numa_callback,
6905 .priority = 1 /* Must run before sched domains notifier. */
6906 };
6907diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
6908index cf9dada..241529f 100644
6909--- a/arch/powerpc/mm/slice.c
6910+++ b/arch/powerpc/mm/slice.c
6911@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
6912 if ((mm->task_size - len) < addr)
6913 return 0;
6914 vma = find_vma(mm, addr);
6915- return (!vma || (addr + len) <= vma->vm_start);
6916+ return check_heap_stack_gap(vma, addr, len, 0);
6917 }
6918
6919 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
6920@@ -272,7 +272,7 @@ full_search:
6921 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
6922 continue;
6923 }
6924- if (!vma || addr + len <= vma->vm_start) {
6925+ if (check_heap_stack_gap(vma, addr, len, 0)) {
6926 /*
6927 * Remember the place where we stopped the search:
6928 */
6929@@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
6930 }
6931 }
6932
6933- addr = mm->mmap_base;
6934- while (addr > len) {
6935+ if (mm->mmap_base < len)
6936+ addr = -ENOMEM;
6937+ else
6938+ addr = mm->mmap_base - len;
6939+
6940+ while (!IS_ERR_VALUE(addr)) {
6941 /* Go down by chunk size */
6942- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
6943+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
6944
6945 /* Check for hit with different page size */
6946 mask = slice_range_to_mask(addr, len);
6947@@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
6948 * return with success:
6949 */
6950 vma = find_vma(mm, addr);
6951- if (!vma || (addr + len) <= vma->vm_start) {
6952+ if (check_heap_stack_gap(vma, addr, len, 0)) {
6953 /* remember the address as a hint for next time */
6954 if (use_cache)
6955 mm->free_area_cache = addr;
6956@@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
6957 mm->cached_hole_size = vma->vm_start - addr;
6958
6959 /* try just below the current vma->vm_start */
6960- addr = vma->vm_start;
6961+ addr = skip_heap_stack_gap(vma, len, 0);
6962 }
6963
6964 /*
6965@@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
6966 if (fixed && addr > (mm->task_size - len))
6967 return -EINVAL;
6968
6969+#ifdef CONFIG_PAX_RANDMMAP
6970+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
6971+ addr = 0;
6972+#endif
6973+
6974 /* If hint, make sure it matches our alignment restrictions */
6975 if (!fixed && addr) {
6976 addr = _ALIGN_UP(addr, 1ul << pshift);
6977diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
6978index bdb738a..49c9f95 100644
6979--- a/arch/powerpc/platforms/powermac/smp.c
6980+++ b/arch/powerpc/platforms/powermac/smp.c
6981@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
6982 return NOTIFY_OK;
6983 }
6984
6985-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
6986+static struct notifier_block smp_core99_cpu_nb = {
6987 .notifier_call = smp_core99_cpu_notify,
6988 };
6989 #endif /* CONFIG_HOTPLUG_CPU */
6990diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
6991index c797832..ce575c8 100644
6992--- a/arch/s390/include/asm/atomic.h
6993+++ b/arch/s390/include/asm/atomic.h
6994@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
6995 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
6996 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6997
6998+#define atomic64_read_unchecked(v) atomic64_read(v)
6999+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7000+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7001+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7002+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7003+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7004+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7005+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7006+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7007+
7008 #define smp_mb__before_atomic_dec() smp_mb()
7009 #define smp_mb__after_atomic_dec() smp_mb()
7010 #define smp_mb__before_atomic_inc() smp_mb()
7011diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7012index 4d7ccac..d03d0ad 100644
7013--- a/arch/s390/include/asm/cache.h
7014+++ b/arch/s390/include/asm/cache.h
7015@@ -9,8 +9,10 @@
7016 #ifndef __ARCH_S390_CACHE_H
7017 #define __ARCH_S390_CACHE_H
7018
7019-#define L1_CACHE_BYTES 256
7020+#include <linux/const.h>
7021+
7022 #define L1_CACHE_SHIFT 8
7023+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7024 #define NET_SKB_PAD 32
7025
7026 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7027diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7028index 178ff96..8c93bd1 100644
7029--- a/arch/s390/include/asm/elf.h
7030+++ b/arch/s390/include/asm/elf.h
7031@@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
7032 the loader. We need to make sure that it is out of the way of the program
7033 that it will "exec", and that there is sufficient room for the brk. */
7034
7035-extern unsigned long randomize_et_dyn(unsigned long base);
7036-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7037+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7038+
7039+#ifdef CONFIG_PAX_ASLR
7040+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7041+
7042+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7043+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7044+#endif
7045
7046 /* This yields a mask that user programs can use to figure out what
7047 instruction set this CPU supports. */
7048@@ -210,9 +216,6 @@ struct linux_binprm;
7049 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7050 int arch_setup_additional_pages(struct linux_binprm *, int);
7051
7052-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7053-#define arch_randomize_brk arch_randomize_brk
7054-
7055 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7056
7057 #endif
7058diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7059index c4a93d6..4d2a9b4 100644
7060--- a/arch/s390/include/asm/exec.h
7061+++ b/arch/s390/include/asm/exec.h
7062@@ -7,6 +7,6 @@
7063 #ifndef __ASM_EXEC_H
7064 #define __ASM_EXEC_H
7065
7066-extern unsigned long arch_align_stack(unsigned long sp);
7067+#define arch_align_stack(x) ((x) & ~0xfUL)
7068
7069 #endif /* __ASM_EXEC_H */
7070diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7071index 34268df..ea97318 100644
7072--- a/arch/s390/include/asm/uaccess.h
7073+++ b/arch/s390/include/asm/uaccess.h
7074@@ -252,6 +252,10 @@ static inline unsigned long __must_check
7075 copy_to_user(void __user *to, const void *from, unsigned long n)
7076 {
7077 might_fault();
7078+
7079+ if ((long)n < 0)
7080+ return n;
7081+
7082 if (access_ok(VERIFY_WRITE, to, n))
7083 n = __copy_to_user(to, from, n);
7084 return n;
7085@@ -277,6 +281,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7086 static inline unsigned long __must_check
7087 __copy_from_user(void *to, const void __user *from, unsigned long n)
7088 {
7089+ if ((long)n < 0)
7090+ return n;
7091+
7092 if (__builtin_constant_p(n) && (n <= 256))
7093 return uaccess.copy_from_user_small(n, from, to);
7094 else
7095@@ -308,10 +315,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7096 static inline unsigned long __must_check
7097 copy_from_user(void *to, const void __user *from, unsigned long n)
7098 {
7099- unsigned int sz = __compiletime_object_size(to);
7100+ size_t sz = __compiletime_object_size(to);
7101
7102 might_fault();
7103- if (unlikely(sz != -1 && sz < n)) {
7104+
7105+ if ((long)n < 0)
7106+ return n;
7107+
7108+ if (unlikely(sz != (size_t)-1 && sz < n)) {
7109 copy_from_user_overflow();
7110 return n;
7111 }
7112diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7113index 4610dea..cf0af21 100644
7114--- a/arch/s390/kernel/module.c
7115+++ b/arch/s390/kernel/module.c
7116@@ -171,11 +171,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7117
7118 /* Increase core size by size of got & plt and set start
7119 offsets for got and plt. */
7120- me->core_size = ALIGN(me->core_size, 4);
7121- me->arch.got_offset = me->core_size;
7122- me->core_size += me->arch.got_size;
7123- me->arch.plt_offset = me->core_size;
7124- me->core_size += me->arch.plt_size;
7125+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
7126+ me->arch.got_offset = me->core_size_rw;
7127+ me->core_size_rw += me->arch.got_size;
7128+ me->arch.plt_offset = me->core_size_rx;
7129+ me->core_size_rx += me->arch.plt_size;
7130 return 0;
7131 }
7132
7133@@ -252,7 +252,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7134 if (info->got_initialized == 0) {
7135 Elf_Addr *gotent;
7136
7137- gotent = me->module_core + me->arch.got_offset +
7138+ gotent = me->module_core_rw + me->arch.got_offset +
7139 info->got_offset;
7140 *gotent = val;
7141 info->got_initialized = 1;
7142@@ -276,7 +276,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7143 else if (r_type == R_390_GOTENT ||
7144 r_type == R_390_GOTPLTENT)
7145 *(unsigned int *) loc =
7146- (val + (Elf_Addr) me->module_core - loc) >> 1;
7147+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
7148 else if (r_type == R_390_GOT64 ||
7149 r_type == R_390_GOTPLT64)
7150 *(unsigned long *) loc = val;
7151@@ -290,7 +290,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7152 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7153 if (info->plt_initialized == 0) {
7154 unsigned int *ip;
7155- ip = me->module_core + me->arch.plt_offset +
7156+ ip = me->module_core_rx + me->arch.plt_offset +
7157 info->plt_offset;
7158 #ifndef CONFIG_64BIT
7159 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7160@@ -315,7 +315,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7161 val - loc + 0xffffUL < 0x1ffffeUL) ||
7162 (r_type == R_390_PLT32DBL &&
7163 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7164- val = (Elf_Addr) me->module_core +
7165+ val = (Elf_Addr) me->module_core_rx +
7166 me->arch.plt_offset +
7167 info->plt_offset;
7168 val += rela->r_addend - loc;
7169@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7170 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7171 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7172 val = val + rela->r_addend -
7173- ((Elf_Addr) me->module_core + me->arch.got_offset);
7174+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7175 if (r_type == R_390_GOTOFF16)
7176 *(unsigned short *) loc = val;
7177 else if (r_type == R_390_GOTOFF32)
7178@@ -347,7 +347,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7179 break;
7180 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7181 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7182- val = (Elf_Addr) me->module_core + me->arch.got_offset +
7183+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7184 rela->r_addend - loc;
7185 if (r_type == R_390_GOTPC)
7186 *(unsigned int *) loc = val;
7187diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7188index 536d645..4a5bd9e 100644
7189--- a/arch/s390/kernel/process.c
7190+++ b/arch/s390/kernel/process.c
7191@@ -250,39 +250,3 @@ unsigned long get_wchan(struct task_struct *p)
7192 }
7193 return 0;
7194 }
7195-
7196-unsigned long arch_align_stack(unsigned long sp)
7197-{
7198- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7199- sp -= get_random_int() & ~PAGE_MASK;
7200- return sp & ~0xf;
7201-}
7202-
7203-static inline unsigned long brk_rnd(void)
7204-{
7205- /* 8MB for 32bit, 1GB for 64bit */
7206- if (is_32bit_task())
7207- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7208- else
7209- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7210-}
7211-
7212-unsigned long arch_randomize_brk(struct mm_struct *mm)
7213-{
7214- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7215-
7216- if (ret < mm->brk)
7217- return mm->brk;
7218- return ret;
7219-}
7220-
7221-unsigned long randomize_et_dyn(unsigned long base)
7222-{
7223- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7224-
7225- if (!(current->flags & PF_RANDOMIZE))
7226- return base;
7227- if (ret < base)
7228- return base;
7229- return ret;
7230-}
7231diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7232index c59a5ef..3fae59c 100644
7233--- a/arch/s390/mm/mmap.c
7234+++ b/arch/s390/mm/mmap.c
7235@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7236 */
7237 if (mmap_is_legacy()) {
7238 mm->mmap_base = TASK_UNMAPPED_BASE;
7239+
7240+#ifdef CONFIG_PAX_RANDMMAP
7241+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7242+ mm->mmap_base += mm->delta_mmap;
7243+#endif
7244+
7245 mm->get_unmapped_area = arch_get_unmapped_area;
7246 mm->unmap_area = arch_unmap_area;
7247 } else {
7248 mm->mmap_base = mmap_base();
7249+
7250+#ifdef CONFIG_PAX_RANDMMAP
7251+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7252+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7253+#endif
7254+
7255 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7256 mm->unmap_area = arch_unmap_area_topdown;
7257 }
7258@@ -172,10 +184,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7259 */
7260 if (mmap_is_legacy()) {
7261 mm->mmap_base = TASK_UNMAPPED_BASE;
7262+
7263+#ifdef CONFIG_PAX_RANDMMAP
7264+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7265+ mm->mmap_base += mm->delta_mmap;
7266+#endif
7267+
7268 mm->get_unmapped_area = s390_get_unmapped_area;
7269 mm->unmap_area = arch_unmap_area;
7270 } else {
7271 mm->mmap_base = mmap_base();
7272+
7273+#ifdef CONFIG_PAX_RANDMMAP
7274+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7275+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7276+#endif
7277+
7278 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7279 mm->unmap_area = arch_unmap_area_topdown;
7280 }
7281diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7282index ae3d59f..f65f075 100644
7283--- a/arch/score/include/asm/cache.h
7284+++ b/arch/score/include/asm/cache.h
7285@@ -1,7 +1,9 @@
7286 #ifndef _ASM_SCORE_CACHE_H
7287 #define _ASM_SCORE_CACHE_H
7288
7289+#include <linux/const.h>
7290+
7291 #define L1_CACHE_SHIFT 4
7292-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7293+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7294
7295 #endif /* _ASM_SCORE_CACHE_H */
7296diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7297index f9f3cd5..58ff438 100644
7298--- a/arch/score/include/asm/exec.h
7299+++ b/arch/score/include/asm/exec.h
7300@@ -1,6 +1,6 @@
7301 #ifndef _ASM_SCORE_EXEC_H
7302 #define _ASM_SCORE_EXEC_H
7303
7304-extern unsigned long arch_align_stack(unsigned long sp);
7305+#define arch_align_stack(x) (x)
7306
7307 #endif /* _ASM_SCORE_EXEC_H */
7308diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7309index 7956846..5f37677 100644
7310--- a/arch/score/kernel/process.c
7311+++ b/arch/score/kernel/process.c
7312@@ -134,8 +134,3 @@ unsigned long get_wchan(struct task_struct *task)
7313
7314 return task_pt_regs(task)->cp0_epc;
7315 }
7316-
7317-unsigned long arch_align_stack(unsigned long sp)
7318-{
7319- return sp;
7320-}
7321diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7322index ef9e555..331bd29 100644
7323--- a/arch/sh/include/asm/cache.h
7324+++ b/arch/sh/include/asm/cache.h
7325@@ -9,10 +9,11 @@
7326 #define __ASM_SH_CACHE_H
7327 #ifdef __KERNEL__
7328
7329+#include <linux/const.h>
7330 #include <linux/init.h>
7331 #include <cpu/cache.h>
7332
7333-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7334+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7335
7336 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7337
7338diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7339index 03f2b55..b027032 100644
7340--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7341+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7342@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7343 return NOTIFY_OK;
7344 }
7345
7346-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7347+static struct notifier_block shx3_cpu_notifier = {
7348 .notifier_call = shx3_cpu_callback,
7349 };
7350
7351diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7352index 6777177..cb5e44f 100644
7353--- a/arch/sh/mm/mmap.c
7354+++ b/arch/sh/mm/mmap.c
7355@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7356 struct mm_struct *mm = current->mm;
7357 struct vm_area_struct *vma;
7358 int do_colour_align;
7359+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7360 struct vm_unmapped_area_info info;
7361
7362 if (flags & MAP_FIXED) {
7363@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7364 if (filp || (flags & MAP_SHARED))
7365 do_colour_align = 1;
7366
7367+#ifdef CONFIG_PAX_RANDMMAP
7368+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7369+#endif
7370+
7371 if (addr) {
7372 if (do_colour_align)
7373 addr = COLOUR_ALIGN(addr, pgoff);
7374@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7375 addr = PAGE_ALIGN(addr);
7376
7377 vma = find_vma(mm, addr);
7378- if (TASK_SIZE - len >= addr &&
7379- (!vma || addr + len <= vma->vm_start))
7380+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7381 return addr;
7382 }
7383
7384 info.flags = 0;
7385 info.length = len;
7386- info.low_limit = TASK_UNMAPPED_BASE;
7387+ info.low_limit = mm->mmap_base;
7388 info.high_limit = TASK_SIZE;
7389 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
7390 info.align_offset = pgoff << PAGE_SHIFT;
7391@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7392 struct mm_struct *mm = current->mm;
7393 unsigned long addr = addr0;
7394 int do_colour_align;
7395+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7396 struct vm_unmapped_area_info info;
7397
7398 if (flags & MAP_FIXED) {
7399@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7400 if (filp || (flags & MAP_SHARED))
7401 do_colour_align = 1;
7402
7403+#ifdef CONFIG_PAX_RANDMMAP
7404+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7405+#endif
7406+
7407 /* requesting a specific address */
7408 if (addr) {
7409 if (do_colour_align)
7410@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7411 addr = PAGE_ALIGN(addr);
7412
7413 vma = find_vma(mm, addr);
7414- if (TASK_SIZE - len >= addr &&
7415- (!vma || addr + len <= vma->vm_start))
7416+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7417 return addr;
7418 }
7419
7420@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7421 VM_BUG_ON(addr != -ENOMEM);
7422 info.flags = 0;
7423 info.low_limit = TASK_UNMAPPED_BASE;
7424+
7425+#ifdef CONFIG_PAX_RANDMMAP
7426+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7427+ info.low_limit += mm->delta_mmap;
7428+#endif
7429+
7430 info.high_limit = TASK_SIZE;
7431 addr = vm_unmapped_area(&info);
7432 }
7433diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
7434index be56a24..443328f 100644
7435--- a/arch/sparc/include/asm/atomic_64.h
7436+++ b/arch/sparc/include/asm/atomic_64.h
7437@@ -14,18 +14,40 @@
7438 #define ATOMIC64_INIT(i) { (i) }
7439
7440 #define atomic_read(v) (*(volatile int *)&(v)->counter)
7441+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7442+{
7443+ return v->counter;
7444+}
7445 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
7446+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7447+{
7448+ return v->counter;
7449+}
7450
7451 #define atomic_set(v, i) (((v)->counter) = i)
7452+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7453+{
7454+ v->counter = i;
7455+}
7456 #define atomic64_set(v, i) (((v)->counter) = i)
7457+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7458+{
7459+ v->counter = i;
7460+}
7461
7462 extern void atomic_add(int, atomic_t *);
7463+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
7464 extern void atomic64_add(long, atomic64_t *);
7465+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
7466 extern void atomic_sub(int, atomic_t *);
7467+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
7468 extern void atomic64_sub(long, atomic64_t *);
7469+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
7470
7471 extern int atomic_add_ret(int, atomic_t *);
7472+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
7473 extern long atomic64_add_ret(long, atomic64_t *);
7474+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
7475 extern int atomic_sub_ret(int, atomic_t *);
7476 extern long atomic64_sub_ret(long, atomic64_t *);
7477
7478@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7479 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
7480
7481 #define atomic_inc_return(v) atomic_add_ret(1, v)
7482+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7483+{
7484+ return atomic_add_ret_unchecked(1, v);
7485+}
7486 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
7487+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7488+{
7489+ return atomic64_add_ret_unchecked(1, v);
7490+}
7491
7492 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
7493 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
7494
7495 #define atomic_add_return(i, v) atomic_add_ret(i, v)
7496+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7497+{
7498+ return atomic_add_ret_unchecked(i, v);
7499+}
7500 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
7501+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7502+{
7503+ return atomic64_add_ret_unchecked(i, v);
7504+}
7505
7506 /*
7507 * atomic_inc_and_test - increment and test
7508@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7509 * other cases.
7510 */
7511 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7512+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7513+{
7514+ return atomic_inc_return_unchecked(v) == 0;
7515+}
7516 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7517
7518 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
7519@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7520 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
7521
7522 #define atomic_inc(v) atomic_add(1, v)
7523+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7524+{
7525+ atomic_add_unchecked(1, v);
7526+}
7527 #define atomic64_inc(v) atomic64_add(1, v)
7528+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7529+{
7530+ atomic64_add_unchecked(1, v);
7531+}
7532
7533 #define atomic_dec(v) atomic_sub(1, v)
7534+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7535+{
7536+ atomic_sub_unchecked(1, v);
7537+}
7538 #define atomic64_dec(v) atomic64_sub(1, v)
7539+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7540+{
7541+ atomic64_sub_unchecked(1, v);
7542+}
7543
7544 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
7545 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
7546
7547 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7548+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7549+{
7550+ return cmpxchg(&v->counter, old, new);
7551+}
7552 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7553+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7554+{
7555+ return xchg(&v->counter, new);
7556+}
7557
7558 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7559 {
7560- int c, old;
7561+ int c, old, new;
7562 c = atomic_read(v);
7563 for (;;) {
7564- if (unlikely(c == (u)))
7565+ if (unlikely(c == u))
7566 break;
7567- old = atomic_cmpxchg((v), c, c + (a));
7568+
7569+ asm volatile("addcc %2, %0, %0\n"
7570+
7571+#ifdef CONFIG_PAX_REFCOUNT
7572+ "tvs %%icc, 6\n"
7573+#endif
7574+
7575+ : "=r" (new)
7576+ : "0" (c), "ir" (a)
7577+ : "cc");
7578+
7579+ old = atomic_cmpxchg(v, c, new);
7580 if (likely(old == c))
7581 break;
7582 c = old;
7583@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7584 #define atomic64_cmpxchg(v, o, n) \
7585 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
7586 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
7587+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7588+{
7589+ return xchg(&v->counter, new);
7590+}
7591
7592 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7593 {
7594- long c, old;
7595+ long c, old, new;
7596 c = atomic64_read(v);
7597 for (;;) {
7598- if (unlikely(c == (u)))
7599+ if (unlikely(c == u))
7600 break;
7601- old = atomic64_cmpxchg((v), c, c + (a));
7602+
7603+ asm volatile("addcc %2, %0, %0\n"
7604+
7605+#ifdef CONFIG_PAX_REFCOUNT
7606+ "tvs %%xcc, 6\n"
7607+#endif
7608+
7609+ : "=r" (new)
7610+ : "0" (c), "ir" (a)
7611+ : "cc");
7612+
7613+ old = atomic64_cmpxchg(v, c, new);
7614 if (likely(old == c))
7615 break;
7616 c = old;
7617 }
7618- return c != (u);
7619+ return c != u;
7620 }
7621
7622 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7623diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
7624index 5bb6991..5c2132e 100644
7625--- a/arch/sparc/include/asm/cache.h
7626+++ b/arch/sparc/include/asm/cache.h
7627@@ -7,10 +7,12 @@
7628 #ifndef _SPARC_CACHE_H
7629 #define _SPARC_CACHE_H
7630
7631+#include <linux/const.h>
7632+
7633 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
7634
7635 #define L1_CACHE_SHIFT 5
7636-#define L1_CACHE_BYTES 32
7637+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7638
7639 #ifdef CONFIG_SPARC32
7640 #define SMP_CACHE_BYTES_SHIFT 5
7641diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
7642index ac74a2c..a9e58af 100644
7643--- a/arch/sparc/include/asm/elf_32.h
7644+++ b/arch/sparc/include/asm/elf_32.h
7645@@ -114,6 +114,13 @@ typedef struct {
7646
7647 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
7648
7649+#ifdef CONFIG_PAX_ASLR
7650+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7651+
7652+#define PAX_DELTA_MMAP_LEN 16
7653+#define PAX_DELTA_STACK_LEN 16
7654+#endif
7655+
7656 /* This yields a mask that user programs can use to figure out what
7657 instruction set this cpu supports. This can NOT be done in userspace
7658 on Sparc. */
7659diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
7660index 370ca1e..d4f4a98 100644
7661--- a/arch/sparc/include/asm/elf_64.h
7662+++ b/arch/sparc/include/asm/elf_64.h
7663@@ -189,6 +189,13 @@ typedef struct {
7664 #define ELF_ET_DYN_BASE 0x0000010000000000UL
7665 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
7666
7667+#ifdef CONFIG_PAX_ASLR
7668+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
7669+
7670+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
7671+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
7672+#endif
7673+
7674 extern unsigned long sparc64_elf_hwcap;
7675 #define ELF_HWCAP sparc64_elf_hwcap
7676
7677diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
7678index 9b1c36d..209298b 100644
7679--- a/arch/sparc/include/asm/pgalloc_32.h
7680+++ b/arch/sparc/include/asm/pgalloc_32.h
7681@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
7682 }
7683
7684 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
7685+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
7686
7687 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
7688 unsigned long address)
7689diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
7690index bcfe063..b333142 100644
7691--- a/arch/sparc/include/asm/pgalloc_64.h
7692+++ b/arch/sparc/include/asm/pgalloc_64.h
7693@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7694 }
7695
7696 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
7697+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
7698
7699 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
7700 {
7701diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
7702index 6fc1348..390c50a 100644
7703--- a/arch/sparc/include/asm/pgtable_32.h
7704+++ b/arch/sparc/include/asm/pgtable_32.h
7705@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
7706 #define PAGE_SHARED SRMMU_PAGE_SHARED
7707 #define PAGE_COPY SRMMU_PAGE_COPY
7708 #define PAGE_READONLY SRMMU_PAGE_RDONLY
7709+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
7710+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
7711+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
7712 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
7713
7714 /* Top-level page directory - dummy used by init-mm.
7715@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
7716
7717 /* xwr */
7718 #define __P000 PAGE_NONE
7719-#define __P001 PAGE_READONLY
7720-#define __P010 PAGE_COPY
7721-#define __P011 PAGE_COPY
7722+#define __P001 PAGE_READONLY_NOEXEC
7723+#define __P010 PAGE_COPY_NOEXEC
7724+#define __P011 PAGE_COPY_NOEXEC
7725 #define __P100 PAGE_READONLY
7726 #define __P101 PAGE_READONLY
7727 #define __P110 PAGE_COPY
7728 #define __P111 PAGE_COPY
7729
7730 #define __S000 PAGE_NONE
7731-#define __S001 PAGE_READONLY
7732-#define __S010 PAGE_SHARED
7733-#define __S011 PAGE_SHARED
7734+#define __S001 PAGE_READONLY_NOEXEC
7735+#define __S010 PAGE_SHARED_NOEXEC
7736+#define __S011 PAGE_SHARED_NOEXEC
7737 #define __S100 PAGE_READONLY
7738 #define __S101 PAGE_READONLY
7739 #define __S110 PAGE_SHARED
7740diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
7741index 79da178..c2eede8 100644
7742--- a/arch/sparc/include/asm/pgtsrmmu.h
7743+++ b/arch/sparc/include/asm/pgtsrmmu.h
7744@@ -115,6 +115,11 @@
7745 SRMMU_EXEC | SRMMU_REF)
7746 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
7747 SRMMU_EXEC | SRMMU_REF)
7748+
7749+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
7750+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7751+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7752+
7753 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
7754 SRMMU_DIRTY | SRMMU_REF)
7755
7756diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
7757index 9689176..63c18ea 100644
7758--- a/arch/sparc/include/asm/spinlock_64.h
7759+++ b/arch/sparc/include/asm/spinlock_64.h
7760@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
7761
7762 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
7763
7764-static void inline arch_read_lock(arch_rwlock_t *lock)
7765+static inline void arch_read_lock(arch_rwlock_t *lock)
7766 {
7767 unsigned long tmp1, tmp2;
7768
7769 __asm__ __volatile__ (
7770 "1: ldsw [%2], %0\n"
7771 " brlz,pn %0, 2f\n"
7772-"4: add %0, 1, %1\n"
7773+"4: addcc %0, 1, %1\n"
7774+
7775+#ifdef CONFIG_PAX_REFCOUNT
7776+" tvs %%icc, 6\n"
7777+#endif
7778+
7779 " cas [%2], %0, %1\n"
7780 " cmp %0, %1\n"
7781 " bne,pn %%icc, 1b\n"
7782@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
7783 " .previous"
7784 : "=&r" (tmp1), "=&r" (tmp2)
7785 : "r" (lock)
7786- : "memory");
7787+ : "memory", "cc");
7788 }
7789
7790-static int inline arch_read_trylock(arch_rwlock_t *lock)
7791+static inline int arch_read_trylock(arch_rwlock_t *lock)
7792 {
7793 int tmp1, tmp2;
7794
7795@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
7796 "1: ldsw [%2], %0\n"
7797 " brlz,a,pn %0, 2f\n"
7798 " mov 0, %0\n"
7799-" add %0, 1, %1\n"
7800+" addcc %0, 1, %1\n"
7801+
7802+#ifdef CONFIG_PAX_REFCOUNT
7803+" tvs %%icc, 6\n"
7804+#endif
7805+
7806 " cas [%2], %0, %1\n"
7807 " cmp %0, %1\n"
7808 " bne,pn %%icc, 1b\n"
7809@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
7810 return tmp1;
7811 }
7812
7813-static void inline arch_read_unlock(arch_rwlock_t *lock)
7814+static inline void arch_read_unlock(arch_rwlock_t *lock)
7815 {
7816 unsigned long tmp1, tmp2;
7817
7818 __asm__ __volatile__(
7819 "1: lduw [%2], %0\n"
7820-" sub %0, 1, %1\n"
7821+" subcc %0, 1, %1\n"
7822+
7823+#ifdef CONFIG_PAX_REFCOUNT
7824+" tvs %%icc, 6\n"
7825+#endif
7826+
7827 " cas [%2], %0, %1\n"
7828 " cmp %0, %1\n"
7829 " bne,pn %%xcc, 1b\n"
7830@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
7831 : "memory");
7832 }
7833
7834-static void inline arch_write_lock(arch_rwlock_t *lock)
7835+static inline void arch_write_lock(arch_rwlock_t *lock)
7836 {
7837 unsigned long mask, tmp1, tmp2;
7838
7839@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
7840 : "memory");
7841 }
7842
7843-static void inline arch_write_unlock(arch_rwlock_t *lock)
7844+static inline void arch_write_unlock(arch_rwlock_t *lock)
7845 {
7846 __asm__ __volatile__(
7847 " stw %%g0, [%0]"
7848@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
7849 : "memory");
7850 }
7851
7852-static int inline arch_write_trylock(arch_rwlock_t *lock)
7853+static inline int arch_write_trylock(arch_rwlock_t *lock)
7854 {
7855 unsigned long mask, tmp1, tmp2, result;
7856
7857diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
7858index 25849ae..924c54b 100644
7859--- a/arch/sparc/include/asm/thread_info_32.h
7860+++ b/arch/sparc/include/asm/thread_info_32.h
7861@@ -49,6 +49,8 @@ struct thread_info {
7862 unsigned long w_saved;
7863
7864 struct restart_block restart_block;
7865+
7866+ unsigned long lowest_stack;
7867 };
7868
7869 /*
7870diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
7871index 269bd92..e46a9b8 100644
7872--- a/arch/sparc/include/asm/thread_info_64.h
7873+++ b/arch/sparc/include/asm/thread_info_64.h
7874@@ -63,6 +63,8 @@ struct thread_info {
7875 struct pt_regs *kern_una_regs;
7876 unsigned int kern_una_insn;
7877
7878+ unsigned long lowest_stack;
7879+
7880 unsigned long fpregs[0] __attribute__ ((aligned(64)));
7881 };
7882
7883@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
7884 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
7885 /* flag bit 6 is available */
7886 #define TIF_32BIT 7 /* 32-bit binary */
7887-/* flag bit 8 is available */
7888+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
7889 #define TIF_SECCOMP 9 /* secure computing */
7890 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
7891 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
7892+
7893 /* NOTE: Thread flags >= 12 should be ones we have no interest
7894 * in using in assembly, else we can't use the mask as
7895 * an immediate value in instructions such as andcc.
7896@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
7897 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
7898 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7899 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
7900+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7901
7902 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
7903 _TIF_DO_NOTIFY_RESUME_MASK | \
7904 _TIF_NEED_RESCHED)
7905 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
7906
7907+#define _TIF_WORK_SYSCALL \
7908+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
7909+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7910+
7911+
7912 /*
7913 * Thread-synchronous status.
7914 *
7915diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
7916index 0167d26..9acd8ed 100644
7917--- a/arch/sparc/include/asm/uaccess.h
7918+++ b/arch/sparc/include/asm/uaccess.h
7919@@ -1,5 +1,13 @@
7920 #ifndef ___ASM_SPARC_UACCESS_H
7921 #define ___ASM_SPARC_UACCESS_H
7922+
7923+#ifdef __KERNEL__
7924+#ifndef __ASSEMBLY__
7925+#include <linux/types.h>
7926+extern void check_object_size(const void *ptr, unsigned long n, bool to);
7927+#endif
7928+#endif
7929+
7930 #if defined(__sparc__) && defined(__arch64__)
7931 #include <asm/uaccess_64.h>
7932 #else
7933diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
7934index 53a28dd..50c38c3 100644
7935--- a/arch/sparc/include/asm/uaccess_32.h
7936+++ b/arch/sparc/include/asm/uaccess_32.h
7937@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
7938
7939 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
7940 {
7941- if (n && __access_ok((unsigned long) to, n))
7942+ if ((long)n < 0)
7943+ return n;
7944+
7945+ if (n && __access_ok((unsigned long) to, n)) {
7946+ if (!__builtin_constant_p(n))
7947+ check_object_size(from, n, true);
7948 return __copy_user(to, (__force void __user *) from, n);
7949- else
7950+ } else
7951 return n;
7952 }
7953
7954 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
7955 {
7956+ if ((long)n < 0)
7957+ return n;
7958+
7959+ if (!__builtin_constant_p(n))
7960+ check_object_size(from, n, true);
7961+
7962 return __copy_user(to, (__force void __user *) from, n);
7963 }
7964
7965 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
7966 {
7967- if (n && __access_ok((unsigned long) from, n))
7968+ if ((long)n < 0)
7969+ return n;
7970+
7971+ if (n && __access_ok((unsigned long) from, n)) {
7972+ if (!__builtin_constant_p(n))
7973+ check_object_size(to, n, false);
7974 return __copy_user((__force void __user *) to, from, n);
7975- else
7976+ } else
7977 return n;
7978 }
7979
7980 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
7981 {
7982+ if ((long)n < 0)
7983+ return n;
7984+
7985 return __copy_user((__force void __user *) to, from, n);
7986 }
7987
7988diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
7989index e562d3c..191f176 100644
7990--- a/arch/sparc/include/asm/uaccess_64.h
7991+++ b/arch/sparc/include/asm/uaccess_64.h
7992@@ -10,6 +10,7 @@
7993 #include <linux/compiler.h>
7994 #include <linux/string.h>
7995 #include <linux/thread_info.h>
7996+#include <linux/kernel.h>
7997 #include <asm/asi.h>
7998 #include <asm/spitfire.h>
7999 #include <asm-generic/uaccess-unaligned.h>
8000@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8001 static inline unsigned long __must_check
8002 copy_from_user(void *to, const void __user *from, unsigned long size)
8003 {
8004- unsigned long ret = ___copy_from_user(to, from, size);
8005+ unsigned long ret;
8006
8007+ if ((long)size < 0 || size > INT_MAX)
8008+ return size;
8009+
8010+ if (!__builtin_constant_p(size))
8011+ check_object_size(to, size, false);
8012+
8013+ ret = ___copy_from_user(to, from, size);
8014 if (unlikely(ret))
8015 ret = copy_from_user_fixup(to, from, size);
8016
8017@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8018 static inline unsigned long __must_check
8019 copy_to_user(void __user *to, const void *from, unsigned long size)
8020 {
8021- unsigned long ret = ___copy_to_user(to, from, size);
8022+ unsigned long ret;
8023
8024+ if ((long)size < 0 || size > INT_MAX)
8025+ return size;
8026+
8027+ if (!__builtin_constant_p(size))
8028+ check_object_size(from, size, true);
8029+
8030+ ret = ___copy_to_user(to, from, size);
8031 if (unlikely(ret))
8032 ret = copy_to_user_fixup(to, from, size);
8033 return ret;
8034diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8035index 6cf591b..b49e65a 100644
8036--- a/arch/sparc/kernel/Makefile
8037+++ b/arch/sparc/kernel/Makefile
8038@@ -3,7 +3,7 @@
8039 #
8040
8041 asflags-y := -ansi
8042-ccflags-y := -Werror
8043+#ccflags-y := -Werror
8044
8045 extra-y := head_$(BITS).o
8046
8047diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8048index be8e862..5b50b12 100644
8049--- a/arch/sparc/kernel/process_32.c
8050+++ b/arch/sparc/kernel/process_32.c
8051@@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
8052
8053 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8054 r->psr, r->pc, r->npc, r->y, print_tainted());
8055- printk("PC: <%pS>\n", (void *) r->pc);
8056+ printk("PC: <%pA>\n", (void *) r->pc);
8057 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8058 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8059 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8060 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8061 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8062 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8063- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8064+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8065
8066 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8067 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8068@@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8069 rw = (struct reg_window32 *) fp;
8070 pc = rw->ins[7];
8071 printk("[%08lx : ", pc);
8072- printk("%pS ] ", (void *) pc);
8073+ printk("%pA ] ", (void *) pc);
8074 fp = rw->ins[6];
8075 } while (++count < 16);
8076 printk("\n");
8077diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8078index cdb80b2..5ca141d 100644
8079--- a/arch/sparc/kernel/process_64.c
8080+++ b/arch/sparc/kernel/process_64.c
8081@@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
8082 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8083 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8084 if (regs->tstate & TSTATE_PRIV)
8085- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8086+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8087 }
8088
8089 void show_regs(struct pt_regs *regs)
8090 {
8091 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8092 regs->tpc, regs->tnpc, regs->y, print_tainted());
8093- printk("TPC: <%pS>\n", (void *) regs->tpc);
8094+ printk("TPC: <%pA>\n", (void *) regs->tpc);
8095 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8096 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8097 regs->u_regs[3]);
8098@@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
8099 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8100 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8101 regs->u_regs[15]);
8102- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8103+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8104 show_regwindow(regs);
8105 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8106 }
8107@@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
8108 ((tp && tp->task) ? tp->task->pid : -1));
8109
8110 if (gp->tstate & TSTATE_PRIV) {
8111- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8112+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8113 (void *) gp->tpc,
8114 (void *) gp->o7,
8115 (void *) gp->i7,
8116diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8117index 7ff45e4..a58f271 100644
8118--- a/arch/sparc/kernel/ptrace_64.c
8119+++ b/arch/sparc/kernel/ptrace_64.c
8120@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8121 return ret;
8122 }
8123
8124+#ifdef CONFIG_GRKERNSEC_SETXID
8125+extern void gr_delayed_cred_worker(void);
8126+#endif
8127+
8128 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8129 {
8130 int ret = 0;
8131@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8132 /* do the secure computing check first */
8133 secure_computing_strict(regs->u_regs[UREG_G1]);
8134
8135+#ifdef CONFIG_GRKERNSEC_SETXID
8136+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8137+ gr_delayed_cred_worker();
8138+#endif
8139+
8140 if (test_thread_flag(TIF_SYSCALL_TRACE))
8141 ret = tracehook_report_syscall_entry(regs);
8142
8143@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8144
8145 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8146 {
8147+#ifdef CONFIG_GRKERNSEC_SETXID
8148+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8149+ gr_delayed_cred_worker();
8150+#endif
8151+
8152 audit_syscall_exit(regs);
8153
8154 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8155diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8156index 2da0bdc..79128d2 100644
8157--- a/arch/sparc/kernel/sys_sparc_32.c
8158+++ b/arch/sparc/kernel/sys_sparc_32.c
8159@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8160 if (len > TASK_SIZE - PAGE_SIZE)
8161 return -ENOMEM;
8162 if (!addr)
8163- addr = TASK_UNMAPPED_BASE;
8164+ addr = current->mm->mmap_base;
8165
8166 info.flags = 0;
8167 info.length = len;
8168diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8169index 708bc29..f0129cb 100644
8170--- a/arch/sparc/kernel/sys_sparc_64.c
8171+++ b/arch/sparc/kernel/sys_sparc_64.c
8172@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8173 struct vm_area_struct * vma;
8174 unsigned long task_size = TASK_SIZE;
8175 int do_color_align;
8176+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8177 struct vm_unmapped_area_info info;
8178
8179 if (flags & MAP_FIXED) {
8180 /* We do not accept a shared mapping if it would violate
8181 * cache aliasing constraints.
8182 */
8183- if ((flags & MAP_SHARED) &&
8184+ if ((filp || (flags & MAP_SHARED)) &&
8185 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8186 return -EINVAL;
8187 return addr;
8188@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8189 if (filp || (flags & MAP_SHARED))
8190 do_color_align = 1;
8191
8192+#ifdef CONFIG_PAX_RANDMMAP
8193+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8194+#endif
8195+
8196 if (addr) {
8197 if (do_color_align)
8198 addr = COLOR_ALIGN(addr, pgoff);
8199@@ -118,14 +123,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8200 addr = PAGE_ALIGN(addr);
8201
8202 vma = find_vma(mm, addr);
8203- if (task_size - len >= addr &&
8204- (!vma || addr + len <= vma->vm_start))
8205+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8206 return addr;
8207 }
8208
8209 info.flags = 0;
8210 info.length = len;
8211- info.low_limit = TASK_UNMAPPED_BASE;
8212+ info.low_limit = mm->mmap_base;
8213 info.high_limit = min(task_size, VA_EXCLUDE_START);
8214 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8215 info.align_offset = pgoff << PAGE_SHIFT;
8216@@ -134,6 +138,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8217 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8218 VM_BUG_ON(addr != -ENOMEM);
8219 info.low_limit = VA_EXCLUDE_END;
8220+
8221+#ifdef CONFIG_PAX_RANDMMAP
8222+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8223+ info.low_limit += mm->delta_mmap;
8224+#endif
8225+
8226 info.high_limit = task_size;
8227 addr = vm_unmapped_area(&info);
8228 }
8229@@ -151,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8230 unsigned long task_size = STACK_TOP32;
8231 unsigned long addr = addr0;
8232 int do_color_align;
8233+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8234 struct vm_unmapped_area_info info;
8235
8236 /* This should only ever run for 32-bit processes. */
8237@@ -160,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8238 /* We do not accept a shared mapping if it would violate
8239 * cache aliasing constraints.
8240 */
8241- if ((flags & MAP_SHARED) &&
8242+ if ((filp || (flags & MAP_SHARED)) &&
8243 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8244 return -EINVAL;
8245 return addr;
8246@@ -173,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8247 if (filp || (flags & MAP_SHARED))
8248 do_color_align = 1;
8249
8250+#ifdef CONFIG_PAX_RANDMMAP
8251+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8252+#endif
8253+
8254 /* requesting a specific address */
8255 if (addr) {
8256 if (do_color_align)
8257@@ -181,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8258 addr = PAGE_ALIGN(addr);
8259
8260 vma = find_vma(mm, addr);
8261- if (task_size - len >= addr &&
8262- (!vma || addr + len <= vma->vm_start))
8263+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8264 return addr;
8265 }
8266
8267@@ -204,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8268 VM_BUG_ON(addr != -ENOMEM);
8269 info.flags = 0;
8270 info.low_limit = TASK_UNMAPPED_BASE;
8271+
8272+#ifdef CONFIG_PAX_RANDMMAP
8273+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8274+ info.low_limit += mm->delta_mmap;
8275+#endif
8276+
8277 info.high_limit = STACK_TOP32;
8278 addr = vm_unmapped_area(&info);
8279 }
8280@@ -264,6 +284,10 @@ static unsigned long mmap_rnd(void)
8281 {
8282 unsigned long rnd = 0UL;
8283
8284+#ifdef CONFIG_PAX_RANDMMAP
8285+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8286+#endif
8287+
8288 if (current->flags & PF_RANDOMIZE) {
8289 unsigned long val = get_random_int();
8290 if (test_thread_flag(TIF_32BIT))
8291@@ -289,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8292 gap == RLIM_INFINITY ||
8293 sysctl_legacy_va_layout) {
8294 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8295+
8296+#ifdef CONFIG_PAX_RANDMMAP
8297+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8298+ mm->mmap_base += mm->delta_mmap;
8299+#endif
8300+
8301 mm->get_unmapped_area = arch_get_unmapped_area;
8302 mm->unmap_area = arch_unmap_area;
8303 } else {
8304@@ -301,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8305 gap = (task_size / 6 * 5);
8306
8307 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8308+
8309+#ifdef CONFIG_PAX_RANDMMAP
8310+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8311+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8312+#endif
8313+
8314 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8315 mm->unmap_area = arch_unmap_area_topdown;
8316 }
8317diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8318index e0fed77..604a7e5 100644
8319--- a/arch/sparc/kernel/syscalls.S
8320+++ b/arch/sparc/kernel/syscalls.S
8321@@ -58,7 +58,7 @@ sys32_rt_sigreturn:
8322 #endif
8323 .align 32
8324 1: ldx [%g6 + TI_FLAGS], %l5
8325- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8326+ andcc %l5, _TIF_WORK_SYSCALL, %g0
8327 be,pt %icc, rtrap
8328 nop
8329 call syscall_trace_leave
8330@@ -190,7 +190,7 @@ linux_sparc_syscall32:
8331
8332 srl %i5, 0, %o5 ! IEU1
8333 srl %i2, 0, %o2 ! IEU0 Group
8334- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8335+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8336 bne,pn %icc, linux_syscall_trace32 ! CTI
8337 mov %i0, %l5 ! IEU1
8338 call %l7 ! CTI Group brk forced
8339@@ -213,7 +213,7 @@ linux_sparc_syscall:
8340
8341 mov %i3, %o3 ! IEU1
8342 mov %i4, %o4 ! IEU0 Group
8343- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8344+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8345 bne,pn %icc, linux_syscall_trace ! CTI Group
8346 mov %i0, %l5 ! IEU0
8347 2: call %l7 ! CTI Group brk forced
8348@@ -229,7 +229,7 @@ ret_sys_call:
8349
8350 cmp %o0, -ERESTART_RESTARTBLOCK
8351 bgeu,pn %xcc, 1f
8352- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8353+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8354 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
8355
8356 2:
8357diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
8358index 654e8aa..45f431b 100644
8359--- a/arch/sparc/kernel/sysfs.c
8360+++ b/arch/sparc/kernel/sysfs.c
8361@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8362 return NOTIFY_OK;
8363 }
8364
8365-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8366+static struct notifier_block sysfs_cpu_nb = {
8367 .notifier_call = sysfs_cpu_notify,
8368 };
8369
8370diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
8371index a5785ea..405c5f7 100644
8372--- a/arch/sparc/kernel/traps_32.c
8373+++ b/arch/sparc/kernel/traps_32.c
8374@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
8375 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
8376 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
8377
8378+extern void gr_handle_kernel_exploit(void);
8379+
8380 void die_if_kernel(char *str, struct pt_regs *regs)
8381 {
8382 static int die_counter;
8383@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8384 count++ < 30 &&
8385 (((unsigned long) rw) >= PAGE_OFFSET) &&
8386 !(((unsigned long) rw) & 0x7)) {
8387- printk("Caller[%08lx]: %pS\n", rw->ins[7],
8388+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
8389 (void *) rw->ins[7]);
8390 rw = (struct reg_window32 *)rw->ins[6];
8391 }
8392 }
8393 printk("Instruction DUMP:");
8394 instruction_dump ((unsigned long *) regs->pc);
8395- if(regs->psr & PSR_PS)
8396+ if(regs->psr & PSR_PS) {
8397+ gr_handle_kernel_exploit();
8398 do_exit(SIGKILL);
8399+ }
8400 do_exit(SIGSEGV);
8401 }
8402
8403diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
8404index e7ecf15..6520e65 100644
8405--- a/arch/sparc/kernel/traps_64.c
8406+++ b/arch/sparc/kernel/traps_64.c
8407@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
8408 i + 1,
8409 p->trapstack[i].tstate, p->trapstack[i].tpc,
8410 p->trapstack[i].tnpc, p->trapstack[i].tt);
8411- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
8412+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
8413 }
8414 }
8415
8416@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
8417
8418 lvl -= 0x100;
8419 if (regs->tstate & TSTATE_PRIV) {
8420+
8421+#ifdef CONFIG_PAX_REFCOUNT
8422+ if (lvl == 6)
8423+ pax_report_refcount_overflow(regs);
8424+#endif
8425+
8426 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
8427 die_if_kernel(buffer, regs);
8428 }
8429@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
8430 void bad_trap_tl1(struct pt_regs *regs, long lvl)
8431 {
8432 char buffer[32];
8433-
8434+
8435 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
8436 0, lvl, SIGTRAP) == NOTIFY_STOP)
8437 return;
8438
8439+#ifdef CONFIG_PAX_REFCOUNT
8440+ if (lvl == 6)
8441+ pax_report_refcount_overflow(regs);
8442+#endif
8443+
8444 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
8445
8446 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
8447@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
8448 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
8449 printk("%s" "ERROR(%d): ",
8450 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
8451- printk("TPC<%pS>\n", (void *) regs->tpc);
8452+ printk("TPC<%pA>\n", (void *) regs->tpc);
8453 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
8454 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
8455 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
8456@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8457 smp_processor_id(),
8458 (type & 0x1) ? 'I' : 'D',
8459 regs->tpc);
8460- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
8461+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
8462 panic("Irrecoverable Cheetah+ parity error.");
8463 }
8464
8465@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8466 smp_processor_id(),
8467 (type & 0x1) ? 'I' : 'D',
8468 regs->tpc);
8469- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
8470+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
8471 }
8472
8473 struct sun4v_error_entry {
8474@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
8475
8476 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
8477 regs->tpc, tl);
8478- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
8479+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
8480 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8481- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
8482+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
8483 (void *) regs->u_regs[UREG_I7]);
8484 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
8485 "pte[%lx] error[%lx]\n",
8486@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
8487
8488 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
8489 regs->tpc, tl);
8490- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
8491+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
8492 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8493- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
8494+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
8495 (void *) regs->u_regs[UREG_I7]);
8496 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
8497 "pte[%lx] error[%lx]\n",
8498@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8499 fp = (unsigned long)sf->fp + STACK_BIAS;
8500 }
8501
8502- printk(" [%016lx] %pS\n", pc, (void *) pc);
8503+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8504 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8505 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
8506 int index = tsk->curr_ret_stack;
8507 if (tsk->ret_stack && index >= graph) {
8508 pc = tsk->ret_stack[index - graph].ret;
8509- printk(" [%016lx] %pS\n", pc, (void *) pc);
8510+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8511 graph++;
8512 }
8513 }
8514@@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
8515 return (struct reg_window *) (fp + STACK_BIAS);
8516 }
8517
8518+extern void gr_handle_kernel_exploit(void);
8519+
8520 void die_if_kernel(char *str, struct pt_regs *regs)
8521 {
8522 static int die_counter;
8523@@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8524 while (rw &&
8525 count++ < 30 &&
8526 kstack_valid(tp, (unsigned long) rw)) {
8527- printk("Caller[%016lx]: %pS\n", rw->ins[7],
8528+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
8529 (void *) rw->ins[7]);
8530
8531 rw = kernel_stack_up(rw);
8532@@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8533 }
8534 user_instruction_dump ((unsigned int __user *) regs->tpc);
8535 }
8536- if (regs->tstate & TSTATE_PRIV)
8537+ if (regs->tstate & TSTATE_PRIV) {
8538+ gr_handle_kernel_exploit();
8539 do_exit(SIGKILL);
8540+ }
8541 do_exit(SIGSEGV);
8542 }
8543 EXPORT_SYMBOL(die_if_kernel);
8544diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
8545index 8201c25e..072a2a7 100644
8546--- a/arch/sparc/kernel/unaligned_64.c
8547+++ b/arch/sparc/kernel/unaligned_64.c
8548@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
8549 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
8550
8551 if (__ratelimit(&ratelimit)) {
8552- printk("Kernel unaligned access at TPC[%lx] %pS\n",
8553+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
8554 regs->tpc, (void *) regs->tpc);
8555 }
8556 }
8557diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
8558index 8410065f2..4fd4ca22 100644
8559--- a/arch/sparc/lib/Makefile
8560+++ b/arch/sparc/lib/Makefile
8561@@ -2,7 +2,7 @@
8562 #
8563
8564 asflags-y := -ansi -DST_DIV0=0x02
8565-ccflags-y := -Werror
8566+#ccflags-y := -Werror
8567
8568 lib-$(CONFIG_SPARC32) += ashrdi3.o
8569 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
8570diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
8571index 85c233d..68500e0 100644
8572--- a/arch/sparc/lib/atomic_64.S
8573+++ b/arch/sparc/lib/atomic_64.S
8574@@ -17,7 +17,12 @@
8575 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8576 BACKOFF_SETUP(%o2)
8577 1: lduw [%o1], %g1
8578- add %g1, %o0, %g7
8579+ addcc %g1, %o0, %g7
8580+
8581+#ifdef CONFIG_PAX_REFCOUNT
8582+ tvs %icc, 6
8583+#endif
8584+
8585 cas [%o1], %g1, %g7
8586 cmp %g1, %g7
8587 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8588@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8589 2: BACKOFF_SPIN(%o2, %o3, 1b)
8590 ENDPROC(atomic_add)
8591
8592+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8593+ BACKOFF_SETUP(%o2)
8594+1: lduw [%o1], %g1
8595+ add %g1, %o0, %g7
8596+ cas [%o1], %g1, %g7
8597+ cmp %g1, %g7
8598+ bne,pn %icc, 2f
8599+ nop
8600+ retl
8601+ nop
8602+2: BACKOFF_SPIN(%o2, %o3, 1b)
8603+ENDPROC(atomic_add_unchecked)
8604+
8605 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8606 BACKOFF_SETUP(%o2)
8607 1: lduw [%o1], %g1
8608- sub %g1, %o0, %g7
8609+ subcc %g1, %o0, %g7
8610+
8611+#ifdef CONFIG_PAX_REFCOUNT
8612+ tvs %icc, 6
8613+#endif
8614+
8615 cas [%o1], %g1, %g7
8616 cmp %g1, %g7
8617 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8618@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8619 2: BACKOFF_SPIN(%o2, %o3, 1b)
8620 ENDPROC(atomic_sub)
8621
8622+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
8623+ BACKOFF_SETUP(%o2)
8624+1: lduw [%o1], %g1
8625+ sub %g1, %o0, %g7
8626+ cas [%o1], %g1, %g7
8627+ cmp %g1, %g7
8628+ bne,pn %icc, 2f
8629+ nop
8630+ retl
8631+ nop
8632+2: BACKOFF_SPIN(%o2, %o3, 1b)
8633+ENDPROC(atomic_sub_unchecked)
8634+
8635 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8636 BACKOFF_SETUP(%o2)
8637 1: lduw [%o1], %g1
8638- add %g1, %o0, %g7
8639+ addcc %g1, %o0, %g7
8640+
8641+#ifdef CONFIG_PAX_REFCOUNT
8642+ tvs %icc, 6
8643+#endif
8644+
8645 cas [%o1], %g1, %g7
8646 cmp %g1, %g7
8647 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8648@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8649 2: BACKOFF_SPIN(%o2, %o3, 1b)
8650 ENDPROC(atomic_add_ret)
8651
8652+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8653+ BACKOFF_SETUP(%o2)
8654+1: lduw [%o1], %g1
8655+ addcc %g1, %o0, %g7
8656+ cas [%o1], %g1, %g7
8657+ cmp %g1, %g7
8658+ bne,pn %icc, 2f
8659+ add %g7, %o0, %g7
8660+ sra %g7, 0, %o0
8661+ retl
8662+ nop
8663+2: BACKOFF_SPIN(%o2, %o3, 1b)
8664+ENDPROC(atomic_add_ret_unchecked)
8665+
8666 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
8667 BACKOFF_SETUP(%o2)
8668 1: lduw [%o1], %g1
8669- sub %g1, %o0, %g7
8670+ subcc %g1, %o0, %g7
8671+
8672+#ifdef CONFIG_PAX_REFCOUNT
8673+ tvs %icc, 6
8674+#endif
8675+
8676 cas [%o1], %g1, %g7
8677 cmp %g1, %g7
8678 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8679@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
8680 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8681 BACKOFF_SETUP(%o2)
8682 1: ldx [%o1], %g1
8683- add %g1, %o0, %g7
8684+ addcc %g1, %o0, %g7
8685+
8686+#ifdef CONFIG_PAX_REFCOUNT
8687+ tvs %xcc, 6
8688+#endif
8689+
8690 casx [%o1], %g1, %g7
8691 cmp %g1, %g7
8692 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8693@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8694 2: BACKOFF_SPIN(%o2, %o3, 1b)
8695 ENDPROC(atomic64_add)
8696
8697+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8698+ BACKOFF_SETUP(%o2)
8699+1: ldx [%o1], %g1
8700+ addcc %g1, %o0, %g7
8701+ casx [%o1], %g1, %g7
8702+ cmp %g1, %g7
8703+ bne,pn %xcc, 2f
8704+ nop
8705+ retl
8706+ nop
8707+2: BACKOFF_SPIN(%o2, %o3, 1b)
8708+ENDPROC(atomic64_add_unchecked)
8709+
8710 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8711 BACKOFF_SETUP(%o2)
8712 1: ldx [%o1], %g1
8713- sub %g1, %o0, %g7
8714+ subcc %g1, %o0, %g7
8715+
8716+#ifdef CONFIG_PAX_REFCOUNT
8717+ tvs %xcc, 6
8718+#endif
8719+
8720 casx [%o1], %g1, %g7
8721 cmp %g1, %g7
8722 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8723@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8724 2: BACKOFF_SPIN(%o2, %o3, 1b)
8725 ENDPROC(atomic64_sub)
8726
8727+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
8728+ BACKOFF_SETUP(%o2)
8729+1: ldx [%o1], %g1
8730+ subcc %g1, %o0, %g7
8731+ casx [%o1], %g1, %g7
8732+ cmp %g1, %g7
8733+ bne,pn %xcc, 2f
8734+ nop
8735+ retl
8736+ nop
8737+2: BACKOFF_SPIN(%o2, %o3, 1b)
8738+ENDPROC(atomic64_sub_unchecked)
8739+
8740 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8741 BACKOFF_SETUP(%o2)
8742 1: ldx [%o1], %g1
8743- add %g1, %o0, %g7
8744+ addcc %g1, %o0, %g7
8745+
8746+#ifdef CONFIG_PAX_REFCOUNT
8747+ tvs %xcc, 6
8748+#endif
8749+
8750 casx [%o1], %g1, %g7
8751 cmp %g1, %g7
8752 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8753@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8754 2: BACKOFF_SPIN(%o2, %o3, 1b)
8755 ENDPROC(atomic64_add_ret)
8756
8757+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8758+ BACKOFF_SETUP(%o2)
8759+1: ldx [%o1], %g1
8760+ addcc %g1, %o0, %g7
8761+ casx [%o1], %g1, %g7
8762+ cmp %g1, %g7
8763+ bne,pn %xcc, 2f
8764+ add %g7, %o0, %g7
8765+ mov %g7, %o0
8766+ retl
8767+ nop
8768+2: BACKOFF_SPIN(%o2, %o3, 1b)
8769+ENDPROC(atomic64_add_ret_unchecked)
8770+
8771 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
8772 BACKOFF_SETUP(%o2)
8773 1: ldx [%o1], %g1
8774- sub %g1, %o0, %g7
8775+ subcc %g1, %o0, %g7
8776+
8777+#ifdef CONFIG_PAX_REFCOUNT
8778+ tvs %xcc, 6
8779+#endif
8780+
8781 casx [%o1], %g1, %g7
8782 cmp %g1, %g7
8783 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8784diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
8785index 0c4e35e..745d3e4 100644
8786--- a/arch/sparc/lib/ksyms.c
8787+++ b/arch/sparc/lib/ksyms.c
8788@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
8789
8790 /* Atomic counter implementation. */
8791 EXPORT_SYMBOL(atomic_add);
8792+EXPORT_SYMBOL(atomic_add_unchecked);
8793 EXPORT_SYMBOL(atomic_add_ret);
8794+EXPORT_SYMBOL(atomic_add_ret_unchecked);
8795 EXPORT_SYMBOL(atomic_sub);
8796+EXPORT_SYMBOL(atomic_sub_unchecked);
8797 EXPORT_SYMBOL(atomic_sub_ret);
8798 EXPORT_SYMBOL(atomic64_add);
8799+EXPORT_SYMBOL(atomic64_add_unchecked);
8800 EXPORT_SYMBOL(atomic64_add_ret);
8801+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
8802 EXPORT_SYMBOL(atomic64_sub);
8803+EXPORT_SYMBOL(atomic64_sub_unchecked);
8804 EXPORT_SYMBOL(atomic64_sub_ret);
8805 EXPORT_SYMBOL(atomic64_dec_if_positive);
8806
8807diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
8808index 30c3ecc..736f015 100644
8809--- a/arch/sparc/mm/Makefile
8810+++ b/arch/sparc/mm/Makefile
8811@@ -2,7 +2,7 @@
8812 #
8813
8814 asflags-y := -ansi
8815-ccflags-y := -Werror
8816+#ccflags-y := -Werror
8817
8818 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
8819 obj-y += fault_$(BITS).o
8820diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
8821index e98bfda..ea8d221 100644
8822--- a/arch/sparc/mm/fault_32.c
8823+++ b/arch/sparc/mm/fault_32.c
8824@@ -21,6 +21,9 @@
8825 #include <linux/perf_event.h>
8826 #include <linux/interrupt.h>
8827 #include <linux/kdebug.h>
8828+#include <linux/slab.h>
8829+#include <linux/pagemap.h>
8830+#include <linux/compiler.h>
8831
8832 #include <asm/page.h>
8833 #include <asm/pgtable.h>
8834@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
8835 return safe_compute_effective_address(regs, insn);
8836 }
8837
8838+#ifdef CONFIG_PAX_PAGEEXEC
8839+#ifdef CONFIG_PAX_DLRESOLVE
8840+static void pax_emuplt_close(struct vm_area_struct *vma)
8841+{
8842+ vma->vm_mm->call_dl_resolve = 0UL;
8843+}
8844+
8845+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
8846+{
8847+ unsigned int *kaddr;
8848+
8849+ vmf->page = alloc_page(GFP_HIGHUSER);
8850+ if (!vmf->page)
8851+ return VM_FAULT_OOM;
8852+
8853+ kaddr = kmap(vmf->page);
8854+ memset(kaddr, 0, PAGE_SIZE);
8855+ kaddr[0] = 0x9DE3BFA8U; /* save */
8856+ flush_dcache_page(vmf->page);
8857+ kunmap(vmf->page);
8858+ return VM_FAULT_MAJOR;
8859+}
8860+
8861+static const struct vm_operations_struct pax_vm_ops = {
8862+ .close = pax_emuplt_close,
8863+ .fault = pax_emuplt_fault
8864+};
8865+
8866+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
8867+{
8868+ int ret;
8869+
8870+ INIT_LIST_HEAD(&vma->anon_vma_chain);
8871+ vma->vm_mm = current->mm;
8872+ vma->vm_start = addr;
8873+ vma->vm_end = addr + PAGE_SIZE;
8874+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
8875+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
8876+ vma->vm_ops = &pax_vm_ops;
8877+
8878+ ret = insert_vm_struct(current->mm, vma);
8879+ if (ret)
8880+ return ret;
8881+
8882+ ++current->mm->total_vm;
8883+ return 0;
8884+}
8885+#endif
8886+
8887+/*
8888+ * PaX: decide what to do with offenders (regs->pc = fault address)
8889+ *
8890+ * returns 1 when task should be killed
8891+ * 2 when patched PLT trampoline was detected
8892+ * 3 when unpatched PLT trampoline was detected
8893+ */
8894+static int pax_handle_fetch_fault(struct pt_regs *regs)
8895+{
8896+
8897+#ifdef CONFIG_PAX_EMUPLT
8898+ int err;
8899+
8900+ do { /* PaX: patched PLT emulation #1 */
8901+ unsigned int sethi1, sethi2, jmpl;
8902+
8903+ err = get_user(sethi1, (unsigned int *)regs->pc);
8904+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
8905+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
8906+
8907+ if (err)
8908+ break;
8909+
8910+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
8911+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
8912+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
8913+ {
8914+ unsigned int addr;
8915+
8916+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
8917+ addr = regs->u_regs[UREG_G1];
8918+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
8919+ regs->pc = addr;
8920+ regs->npc = addr+4;
8921+ return 2;
8922+ }
8923+ } while (0);
8924+
8925+ do { /* PaX: patched PLT emulation #2 */
8926+ unsigned int ba;
8927+
8928+ err = get_user(ba, (unsigned int *)regs->pc);
8929+
8930+ if (err)
8931+ break;
8932+
8933+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
8934+ unsigned int addr;
8935+
8936+ if ((ba & 0xFFC00000U) == 0x30800000U)
8937+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
8938+ else
8939+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
8940+ regs->pc = addr;
8941+ regs->npc = addr+4;
8942+ return 2;
8943+ }
8944+ } while (0);
8945+
8946+ do { /* PaX: patched PLT emulation #3 */
8947+ unsigned int sethi, bajmpl, nop;
8948+
8949+ err = get_user(sethi, (unsigned int *)regs->pc);
8950+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
8951+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
8952+
8953+ if (err)
8954+ break;
8955+
8956+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8957+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
8958+ nop == 0x01000000U)
8959+ {
8960+ unsigned int addr;
8961+
8962+ addr = (sethi & 0x003FFFFFU) << 10;
8963+ regs->u_regs[UREG_G1] = addr;
8964+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
8965+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
8966+ else
8967+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
8968+ regs->pc = addr;
8969+ regs->npc = addr+4;
8970+ return 2;
8971+ }
8972+ } while (0);
8973+
8974+ do { /* PaX: unpatched PLT emulation step 1 */
8975+ unsigned int sethi, ba, nop;
8976+
8977+ err = get_user(sethi, (unsigned int *)regs->pc);
8978+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
8979+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
8980+
8981+ if (err)
8982+ break;
8983+
8984+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8985+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
8986+ nop == 0x01000000U)
8987+ {
8988+ unsigned int addr, save, call;
8989+
8990+ if ((ba & 0xFFC00000U) == 0x30800000U)
8991+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
8992+ else
8993+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
8994+
8995+ err = get_user(save, (unsigned int *)addr);
8996+ err |= get_user(call, (unsigned int *)(addr+4));
8997+ err |= get_user(nop, (unsigned int *)(addr+8));
8998+ if (err)
8999+ break;
9000+
9001+#ifdef CONFIG_PAX_DLRESOLVE
9002+ if (save == 0x9DE3BFA8U &&
9003+ (call & 0xC0000000U) == 0x40000000U &&
9004+ nop == 0x01000000U)
9005+ {
9006+ struct vm_area_struct *vma;
9007+ unsigned long call_dl_resolve;
9008+
9009+ down_read(&current->mm->mmap_sem);
9010+ call_dl_resolve = current->mm->call_dl_resolve;
9011+ up_read(&current->mm->mmap_sem);
9012+ if (likely(call_dl_resolve))
9013+ goto emulate;
9014+
9015+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9016+
9017+ down_write(&current->mm->mmap_sem);
9018+ if (current->mm->call_dl_resolve) {
9019+ call_dl_resolve = current->mm->call_dl_resolve;
9020+ up_write(&current->mm->mmap_sem);
9021+ if (vma)
9022+ kmem_cache_free(vm_area_cachep, vma);
9023+ goto emulate;
9024+ }
9025+
9026+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9027+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9028+ up_write(&current->mm->mmap_sem);
9029+ if (vma)
9030+ kmem_cache_free(vm_area_cachep, vma);
9031+ return 1;
9032+ }
9033+
9034+ if (pax_insert_vma(vma, call_dl_resolve)) {
9035+ up_write(&current->mm->mmap_sem);
9036+ kmem_cache_free(vm_area_cachep, vma);
9037+ return 1;
9038+ }
9039+
9040+ current->mm->call_dl_resolve = call_dl_resolve;
9041+ up_write(&current->mm->mmap_sem);
9042+
9043+emulate:
9044+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9045+ regs->pc = call_dl_resolve;
9046+ regs->npc = addr+4;
9047+ return 3;
9048+ }
9049+#endif
9050+
9051+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9052+ if ((save & 0xFFC00000U) == 0x05000000U &&
9053+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9054+ nop == 0x01000000U)
9055+ {
9056+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9057+ regs->u_regs[UREG_G2] = addr + 4;
9058+ addr = (save & 0x003FFFFFU) << 10;
9059+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9060+ regs->pc = addr;
9061+ regs->npc = addr+4;
9062+ return 3;
9063+ }
9064+ }
9065+ } while (0);
9066+
9067+ do { /* PaX: unpatched PLT emulation step 2 */
9068+ unsigned int save, call, nop;
9069+
9070+ err = get_user(save, (unsigned int *)(regs->pc-4));
9071+ err |= get_user(call, (unsigned int *)regs->pc);
9072+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
9073+ if (err)
9074+ break;
9075+
9076+ if (save == 0x9DE3BFA8U &&
9077+ (call & 0xC0000000U) == 0x40000000U &&
9078+ nop == 0x01000000U)
9079+ {
9080+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9081+
9082+ regs->u_regs[UREG_RETPC] = regs->pc;
9083+ regs->pc = dl_resolve;
9084+ regs->npc = dl_resolve+4;
9085+ return 3;
9086+ }
9087+ } while (0);
9088+#endif
9089+
9090+ return 1;
9091+}
9092+
9093+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9094+{
9095+ unsigned long i;
9096+
9097+ printk(KERN_ERR "PAX: bytes at PC: ");
9098+ for (i = 0; i < 8; i++) {
9099+ unsigned int c;
9100+ if (get_user(c, (unsigned int *)pc+i))
9101+ printk(KERN_CONT "???????? ");
9102+ else
9103+ printk(KERN_CONT "%08x ", c);
9104+ }
9105+ printk("\n");
9106+}
9107+#endif
9108+
9109 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9110 int text_fault)
9111 {
9112@@ -230,6 +504,24 @@ good_area:
9113 if (!(vma->vm_flags & VM_WRITE))
9114 goto bad_area;
9115 } else {
9116+
9117+#ifdef CONFIG_PAX_PAGEEXEC
9118+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9119+ up_read(&mm->mmap_sem);
9120+ switch (pax_handle_fetch_fault(regs)) {
9121+
9122+#ifdef CONFIG_PAX_EMUPLT
9123+ case 2:
9124+ case 3:
9125+ return;
9126+#endif
9127+
9128+ }
9129+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9130+ do_group_exit(SIGKILL);
9131+ }
9132+#endif
9133+
9134 /* Allow reads even for write-only mappings */
9135 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9136 goto bad_area;
9137diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9138index 5062ff3..e0b75f3 100644
9139--- a/arch/sparc/mm/fault_64.c
9140+++ b/arch/sparc/mm/fault_64.c
9141@@ -21,6 +21,9 @@
9142 #include <linux/kprobes.h>
9143 #include <linux/kdebug.h>
9144 #include <linux/percpu.h>
9145+#include <linux/slab.h>
9146+#include <linux/pagemap.h>
9147+#include <linux/compiler.h>
9148
9149 #include <asm/page.h>
9150 #include <asm/pgtable.h>
9151@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9152 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9153 regs->tpc);
9154 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9155- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9156+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9157 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9158 dump_stack();
9159 unhandled_fault(regs->tpc, current, regs);
9160@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9161 show_regs(regs);
9162 }
9163
9164+#ifdef CONFIG_PAX_PAGEEXEC
9165+#ifdef CONFIG_PAX_DLRESOLVE
9166+static void pax_emuplt_close(struct vm_area_struct *vma)
9167+{
9168+ vma->vm_mm->call_dl_resolve = 0UL;
9169+}
9170+
9171+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9172+{
9173+ unsigned int *kaddr;
9174+
9175+ vmf->page = alloc_page(GFP_HIGHUSER);
9176+ if (!vmf->page)
9177+ return VM_FAULT_OOM;
9178+
9179+ kaddr = kmap(vmf->page);
9180+ memset(kaddr, 0, PAGE_SIZE);
9181+ kaddr[0] = 0x9DE3BFA8U; /* save */
9182+ flush_dcache_page(vmf->page);
9183+ kunmap(vmf->page);
9184+ return VM_FAULT_MAJOR;
9185+}
9186+
9187+static const struct vm_operations_struct pax_vm_ops = {
9188+ .close = pax_emuplt_close,
9189+ .fault = pax_emuplt_fault
9190+};
9191+
9192+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9193+{
9194+ int ret;
9195+
9196+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9197+ vma->vm_mm = current->mm;
9198+ vma->vm_start = addr;
9199+ vma->vm_end = addr + PAGE_SIZE;
9200+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9201+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9202+ vma->vm_ops = &pax_vm_ops;
9203+
9204+ ret = insert_vm_struct(current->mm, vma);
9205+ if (ret)
9206+ return ret;
9207+
9208+ ++current->mm->total_vm;
9209+ return 0;
9210+}
9211+#endif
9212+
9213+/*
9214+ * PaX: decide what to do with offenders (regs->tpc = fault address)
9215+ *
9216+ * returns 1 when task should be killed
9217+ * 2 when patched PLT trampoline was detected
9218+ * 3 when unpatched PLT trampoline was detected
9219+ */
9220+static int pax_handle_fetch_fault(struct pt_regs *regs)
9221+{
9222+
9223+#ifdef CONFIG_PAX_EMUPLT
9224+ int err;
9225+
9226+ do { /* PaX: patched PLT emulation #1 */
9227+ unsigned int sethi1, sethi2, jmpl;
9228+
9229+ err = get_user(sethi1, (unsigned int *)regs->tpc);
9230+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9231+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9232+
9233+ if (err)
9234+ break;
9235+
9236+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9237+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9238+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9239+ {
9240+ unsigned long addr;
9241+
9242+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9243+ addr = regs->u_regs[UREG_G1];
9244+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9245+
9246+ if (test_thread_flag(TIF_32BIT))
9247+ addr &= 0xFFFFFFFFUL;
9248+
9249+ regs->tpc = addr;
9250+ regs->tnpc = addr+4;
9251+ return 2;
9252+ }
9253+ } while (0);
9254+
9255+ do { /* PaX: patched PLT emulation #2 */
9256+ unsigned int ba;
9257+
9258+ err = get_user(ba, (unsigned int *)regs->tpc);
9259+
9260+ if (err)
9261+ break;
9262+
9263+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9264+ unsigned long addr;
9265+
9266+ if ((ba & 0xFFC00000U) == 0x30800000U)
9267+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9268+ else
9269+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9270+
9271+ if (test_thread_flag(TIF_32BIT))
9272+ addr &= 0xFFFFFFFFUL;
9273+
9274+ regs->tpc = addr;
9275+ regs->tnpc = addr+4;
9276+ return 2;
9277+ }
9278+ } while (0);
9279+
9280+ do { /* PaX: patched PLT emulation #3 */
9281+ unsigned int sethi, bajmpl, nop;
9282+
9283+ err = get_user(sethi, (unsigned int *)regs->tpc);
9284+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9285+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9286+
9287+ if (err)
9288+ break;
9289+
9290+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9291+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9292+ nop == 0x01000000U)
9293+ {
9294+ unsigned long addr;
9295+
9296+ addr = (sethi & 0x003FFFFFU) << 10;
9297+ regs->u_regs[UREG_G1] = addr;
9298+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9299+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9300+ else
9301+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9302+
9303+ if (test_thread_flag(TIF_32BIT))
9304+ addr &= 0xFFFFFFFFUL;
9305+
9306+ regs->tpc = addr;
9307+ regs->tnpc = addr+4;
9308+ return 2;
9309+ }
9310+ } while (0);
9311+
9312+ do { /* PaX: patched PLT emulation #4 */
9313+ unsigned int sethi, mov1, call, mov2;
9314+
9315+ err = get_user(sethi, (unsigned int *)regs->tpc);
9316+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9317+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
9318+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9319+
9320+ if (err)
9321+ break;
9322+
9323+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9324+ mov1 == 0x8210000FU &&
9325+ (call & 0xC0000000U) == 0x40000000U &&
9326+ mov2 == 0x9E100001U)
9327+ {
9328+ unsigned long addr;
9329+
9330+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
9331+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9332+
9333+ if (test_thread_flag(TIF_32BIT))
9334+ addr &= 0xFFFFFFFFUL;
9335+
9336+ regs->tpc = addr;
9337+ regs->tnpc = addr+4;
9338+ return 2;
9339+ }
9340+ } while (0);
9341+
9342+ do { /* PaX: patched PLT emulation #5 */
9343+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
9344+
9345+ err = get_user(sethi, (unsigned int *)regs->tpc);
9346+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9347+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9348+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
9349+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
9350+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
9351+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
9352+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
9353+
9354+ if (err)
9355+ break;
9356+
9357+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9358+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9359+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9360+ (or1 & 0xFFFFE000U) == 0x82106000U &&
9361+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9362+ sllx == 0x83287020U &&
9363+ jmpl == 0x81C04005U &&
9364+ nop == 0x01000000U)
9365+ {
9366+ unsigned long addr;
9367+
9368+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9369+ regs->u_regs[UREG_G1] <<= 32;
9370+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9371+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9372+ regs->tpc = addr;
9373+ regs->tnpc = addr+4;
9374+ return 2;
9375+ }
9376+ } while (0);
9377+
9378+ do { /* PaX: patched PLT emulation #6 */
9379+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
9380+
9381+ err = get_user(sethi, (unsigned int *)regs->tpc);
9382+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9383+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9384+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
9385+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
9386+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
9387+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
9388+
9389+ if (err)
9390+ break;
9391+
9392+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9393+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9394+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9395+ sllx == 0x83287020U &&
9396+ (or & 0xFFFFE000U) == 0x8A116000U &&
9397+ jmpl == 0x81C04005U &&
9398+ nop == 0x01000000U)
9399+ {
9400+ unsigned long addr;
9401+
9402+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
9403+ regs->u_regs[UREG_G1] <<= 32;
9404+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
9405+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9406+ regs->tpc = addr;
9407+ regs->tnpc = addr+4;
9408+ return 2;
9409+ }
9410+ } while (0);
9411+
9412+ do { /* PaX: unpatched PLT emulation step 1 */
9413+ unsigned int sethi, ba, nop;
9414+
9415+ err = get_user(sethi, (unsigned int *)regs->tpc);
9416+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9417+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9418+
9419+ if (err)
9420+ break;
9421+
9422+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9423+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9424+ nop == 0x01000000U)
9425+ {
9426+ unsigned long addr;
9427+ unsigned int save, call;
9428+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
9429+
9430+ if ((ba & 0xFFC00000U) == 0x30800000U)
9431+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9432+ else
9433+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9434+
9435+ if (test_thread_flag(TIF_32BIT))
9436+ addr &= 0xFFFFFFFFUL;
9437+
9438+ err = get_user(save, (unsigned int *)addr);
9439+ err |= get_user(call, (unsigned int *)(addr+4));
9440+ err |= get_user(nop, (unsigned int *)(addr+8));
9441+ if (err)
9442+ break;
9443+
9444+#ifdef CONFIG_PAX_DLRESOLVE
9445+ if (save == 0x9DE3BFA8U &&
9446+ (call & 0xC0000000U) == 0x40000000U &&
9447+ nop == 0x01000000U)
9448+ {
9449+ struct vm_area_struct *vma;
9450+ unsigned long call_dl_resolve;
9451+
9452+ down_read(&current->mm->mmap_sem);
9453+ call_dl_resolve = current->mm->call_dl_resolve;
9454+ up_read(&current->mm->mmap_sem);
9455+ if (likely(call_dl_resolve))
9456+ goto emulate;
9457+
9458+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9459+
9460+ down_write(&current->mm->mmap_sem);
9461+ if (current->mm->call_dl_resolve) {
9462+ call_dl_resolve = current->mm->call_dl_resolve;
9463+ up_write(&current->mm->mmap_sem);
9464+ if (vma)
9465+ kmem_cache_free(vm_area_cachep, vma);
9466+ goto emulate;
9467+ }
9468+
9469+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9470+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9471+ up_write(&current->mm->mmap_sem);
9472+ if (vma)
9473+ kmem_cache_free(vm_area_cachep, vma);
9474+ return 1;
9475+ }
9476+
9477+ if (pax_insert_vma(vma, call_dl_resolve)) {
9478+ up_write(&current->mm->mmap_sem);
9479+ kmem_cache_free(vm_area_cachep, vma);
9480+ return 1;
9481+ }
9482+
9483+ current->mm->call_dl_resolve = call_dl_resolve;
9484+ up_write(&current->mm->mmap_sem);
9485+
9486+emulate:
9487+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9488+ regs->tpc = call_dl_resolve;
9489+ regs->tnpc = addr+4;
9490+ return 3;
9491+ }
9492+#endif
9493+
9494+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9495+ if ((save & 0xFFC00000U) == 0x05000000U &&
9496+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9497+ nop == 0x01000000U)
9498+ {
9499+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9500+ regs->u_regs[UREG_G2] = addr + 4;
9501+ addr = (save & 0x003FFFFFU) << 10;
9502+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9503+
9504+ if (test_thread_flag(TIF_32BIT))
9505+ addr &= 0xFFFFFFFFUL;
9506+
9507+ regs->tpc = addr;
9508+ regs->tnpc = addr+4;
9509+ return 3;
9510+ }
9511+
9512+ /* PaX: 64-bit PLT stub */
9513+ err = get_user(sethi1, (unsigned int *)addr);
9514+ err |= get_user(sethi2, (unsigned int *)(addr+4));
9515+ err |= get_user(or1, (unsigned int *)(addr+8));
9516+ err |= get_user(or2, (unsigned int *)(addr+12));
9517+ err |= get_user(sllx, (unsigned int *)(addr+16));
9518+ err |= get_user(add, (unsigned int *)(addr+20));
9519+ err |= get_user(jmpl, (unsigned int *)(addr+24));
9520+ err |= get_user(nop, (unsigned int *)(addr+28));
9521+ if (err)
9522+ break;
9523+
9524+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
9525+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9526+ (or1 & 0xFFFFE000U) == 0x88112000U &&
9527+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9528+ sllx == 0x89293020U &&
9529+ add == 0x8A010005U &&
9530+ jmpl == 0x89C14000U &&
9531+ nop == 0x01000000U)
9532+ {
9533+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9534+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9535+ regs->u_regs[UREG_G4] <<= 32;
9536+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9537+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
9538+ regs->u_regs[UREG_G4] = addr + 24;
9539+ addr = regs->u_regs[UREG_G5];
9540+ regs->tpc = addr;
9541+ regs->tnpc = addr+4;
9542+ return 3;
9543+ }
9544+ }
9545+ } while (0);
9546+
9547+#ifdef CONFIG_PAX_DLRESOLVE
9548+ do { /* PaX: unpatched PLT emulation step 2 */
9549+ unsigned int save, call, nop;
9550+
9551+ err = get_user(save, (unsigned int *)(regs->tpc-4));
9552+ err |= get_user(call, (unsigned int *)regs->tpc);
9553+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
9554+ if (err)
9555+ break;
9556+
9557+ if (save == 0x9DE3BFA8U &&
9558+ (call & 0xC0000000U) == 0x40000000U &&
9559+ nop == 0x01000000U)
9560+ {
9561+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9562+
9563+ if (test_thread_flag(TIF_32BIT))
9564+ dl_resolve &= 0xFFFFFFFFUL;
9565+
9566+ regs->u_regs[UREG_RETPC] = regs->tpc;
9567+ regs->tpc = dl_resolve;
9568+ regs->tnpc = dl_resolve+4;
9569+ return 3;
9570+ }
9571+ } while (0);
9572+#endif
9573+
9574+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
9575+ unsigned int sethi, ba, nop;
9576+
9577+ err = get_user(sethi, (unsigned int *)regs->tpc);
9578+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9579+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9580+
9581+ if (err)
9582+ break;
9583+
9584+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9585+ (ba & 0xFFF00000U) == 0x30600000U &&
9586+ nop == 0x01000000U)
9587+ {
9588+ unsigned long addr;
9589+
9590+ addr = (sethi & 0x003FFFFFU) << 10;
9591+ regs->u_regs[UREG_G1] = addr;
9592+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9593+
9594+ if (test_thread_flag(TIF_32BIT))
9595+ addr &= 0xFFFFFFFFUL;
9596+
9597+ regs->tpc = addr;
9598+ regs->tnpc = addr+4;
9599+ return 2;
9600+ }
9601+ } while (0);
9602+
9603+#endif
9604+
9605+ return 1;
9606+}
9607+
9608+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9609+{
9610+ unsigned long i;
9611+
9612+ printk(KERN_ERR "PAX: bytes at PC: ");
9613+ for (i = 0; i < 8; i++) {
9614+ unsigned int c;
9615+ if (get_user(c, (unsigned int *)pc+i))
9616+ printk(KERN_CONT "???????? ");
9617+ else
9618+ printk(KERN_CONT "%08x ", c);
9619+ }
9620+ printk("\n");
9621+}
9622+#endif
9623+
9624 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
9625 {
9626 struct mm_struct *mm = current->mm;
9627@@ -341,6 +804,29 @@ retry:
9628 if (!vma)
9629 goto bad_area;
9630
9631+#ifdef CONFIG_PAX_PAGEEXEC
9632+ /* PaX: detect ITLB misses on non-exec pages */
9633+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
9634+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
9635+ {
9636+ if (address != regs->tpc)
9637+ goto good_area;
9638+
9639+ up_read(&mm->mmap_sem);
9640+ switch (pax_handle_fetch_fault(regs)) {
9641+
9642+#ifdef CONFIG_PAX_EMUPLT
9643+ case 2:
9644+ case 3:
9645+ return;
9646+#endif
9647+
9648+ }
9649+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
9650+ do_group_exit(SIGKILL);
9651+ }
9652+#endif
9653+
9654 /* Pure DTLB misses do not tell us whether the fault causing
9655 * load/store/atomic was a write or not, it only says that there
9656 * was no match. So in such a case we (carefully) read the
9657diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
9658index d2b5944..bd813f2 100644
9659--- a/arch/sparc/mm/hugetlbpage.c
9660+++ b/arch/sparc/mm/hugetlbpage.c
9661@@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9662
9663 info.flags = 0;
9664 info.length = len;
9665- info.low_limit = TASK_UNMAPPED_BASE;
9666+ info.low_limit = mm->mmap_base;
9667 info.high_limit = min(task_size, VA_EXCLUDE_START);
9668 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
9669 info.align_offset = 0;
9670@@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9671 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9672 VM_BUG_ON(addr != -ENOMEM);
9673 info.low_limit = VA_EXCLUDE_END;
9674+
9675+#ifdef CONFIG_PAX_RANDMMAP
9676+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9677+ info.low_limit += mm->delta_mmap;
9678+#endif
9679+
9680 info.high_limit = task_size;
9681 addr = vm_unmapped_area(&info);
9682 }
9683@@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9684 VM_BUG_ON(addr != -ENOMEM);
9685 info.flags = 0;
9686 info.low_limit = TASK_UNMAPPED_BASE;
9687+
9688+#ifdef CONFIG_PAX_RANDMMAP
9689+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9690+ info.low_limit += mm->delta_mmap;
9691+#endif
9692+
9693 info.high_limit = STACK_TOP32;
9694 addr = vm_unmapped_area(&info);
9695 }
9696@@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9697 struct mm_struct *mm = current->mm;
9698 struct vm_area_struct *vma;
9699 unsigned long task_size = TASK_SIZE;
9700+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
9701
9702 if (test_thread_flag(TIF_32BIT))
9703 task_size = STACK_TOP32;
9704@@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9705 return addr;
9706 }
9707
9708+#ifdef CONFIG_PAX_RANDMMAP
9709+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9710+#endif
9711+
9712 if (addr) {
9713 addr = ALIGN(addr, HPAGE_SIZE);
9714 vma = find_vma(mm, addr);
9715- if (task_size - len >= addr &&
9716- (!vma || addr + len <= vma->vm_start))
9717+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9718 return addr;
9719 }
9720 if (mm->get_unmapped_area == arch_get_unmapped_area)
9721diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
9722index f4500c6..889656c 100644
9723--- a/arch/tile/include/asm/atomic_64.h
9724+++ b/arch/tile/include/asm/atomic_64.h
9725@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9726
9727 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9728
9729+#define atomic64_read_unchecked(v) atomic64_read(v)
9730+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9731+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9732+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9733+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9734+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9735+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9736+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9737+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9738+
9739 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
9740 #define smp_mb__before_atomic_dec() smp_mb()
9741 #define smp_mb__after_atomic_dec() smp_mb()
9742diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
9743index a9a5299..0fce79e 100644
9744--- a/arch/tile/include/asm/cache.h
9745+++ b/arch/tile/include/asm/cache.h
9746@@ -15,11 +15,12 @@
9747 #ifndef _ASM_TILE_CACHE_H
9748 #define _ASM_TILE_CACHE_H
9749
9750+#include <linux/const.h>
9751 #include <arch/chip.h>
9752
9753 /* bytes per L1 data cache line */
9754 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
9755-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9756+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9757
9758 /* bytes per L2 cache line */
9759 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
9760diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
9761index 9ab078a..d6635c2 100644
9762--- a/arch/tile/include/asm/uaccess.h
9763+++ b/arch/tile/include/asm/uaccess.h
9764@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
9765 const void __user *from,
9766 unsigned long n)
9767 {
9768- int sz = __compiletime_object_size(to);
9769+ size_t sz = __compiletime_object_size(to);
9770
9771- if (likely(sz == -1 || sz >= n))
9772+ if (likely(sz == (size_t)-1 || sz >= n))
9773 n = _copy_from_user(to, from, n);
9774 else
9775 copy_from_user_overflow();
9776diff --git a/arch/um/Makefile b/arch/um/Makefile
9777index 133f7de..1d6f2f1 100644
9778--- a/arch/um/Makefile
9779+++ b/arch/um/Makefile
9780@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
9781 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
9782 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
9783
9784+ifdef CONSTIFY_PLUGIN
9785+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
9786+endif
9787+
9788 #This will adjust *FLAGS accordingly to the platform.
9789 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
9790
9791diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
9792index 19e1bdd..3665b77 100644
9793--- a/arch/um/include/asm/cache.h
9794+++ b/arch/um/include/asm/cache.h
9795@@ -1,6 +1,7 @@
9796 #ifndef __UM_CACHE_H
9797 #define __UM_CACHE_H
9798
9799+#include <linux/const.h>
9800
9801 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
9802 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9803@@ -12,6 +13,6 @@
9804 # define L1_CACHE_SHIFT 5
9805 #endif
9806
9807-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9808+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9809
9810 #endif
9811diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
9812index 2e0a6b1..a64d0f5 100644
9813--- a/arch/um/include/asm/kmap_types.h
9814+++ b/arch/um/include/asm/kmap_types.h
9815@@ -8,6 +8,6 @@
9816
9817 /* No more #include "asm/arch/kmap_types.h" ! */
9818
9819-#define KM_TYPE_NR 14
9820+#define KM_TYPE_NR 15
9821
9822 #endif
9823diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
9824index 5ff53d9..5850cdf 100644
9825--- a/arch/um/include/asm/page.h
9826+++ b/arch/um/include/asm/page.h
9827@@ -14,6 +14,9 @@
9828 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
9829 #define PAGE_MASK (~(PAGE_SIZE-1))
9830
9831+#define ktla_ktva(addr) (addr)
9832+#define ktva_ktla(addr) (addr)
9833+
9834 #ifndef __ASSEMBLY__
9835
9836 struct page;
9837diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
9838index 0032f92..cd151e0 100644
9839--- a/arch/um/include/asm/pgtable-3level.h
9840+++ b/arch/um/include/asm/pgtable-3level.h
9841@@ -58,6 +58,7 @@
9842 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
9843 #define pud_populate(mm, pud, pmd) \
9844 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
9845+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
9846
9847 #ifdef CONFIG_64BIT
9848 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
9849diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
9850index b462b13..e7a19aa 100644
9851--- a/arch/um/kernel/process.c
9852+++ b/arch/um/kernel/process.c
9853@@ -386,22 +386,6 @@ int singlestepping(void * t)
9854 return 2;
9855 }
9856
9857-/*
9858- * Only x86 and x86_64 have an arch_align_stack().
9859- * All other arches have "#define arch_align_stack(x) (x)"
9860- * in their asm/system.h
9861- * As this is included in UML from asm-um/system-generic.h,
9862- * we can use it to behave as the subarch does.
9863- */
9864-#ifndef arch_align_stack
9865-unsigned long arch_align_stack(unsigned long sp)
9866-{
9867- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9868- sp -= get_random_int() % 8192;
9869- return sp & ~0xf;
9870-}
9871-#endif
9872-
9873 unsigned long get_wchan(struct task_struct *p)
9874 {
9875 unsigned long stack_page, sp, ip;
9876diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
9877index ad8f795..2c7eec6 100644
9878--- a/arch/unicore32/include/asm/cache.h
9879+++ b/arch/unicore32/include/asm/cache.h
9880@@ -12,8 +12,10 @@
9881 #ifndef __UNICORE_CACHE_H__
9882 #define __UNICORE_CACHE_H__
9883
9884-#define L1_CACHE_SHIFT (5)
9885-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9886+#include <linux/const.h>
9887+
9888+#define L1_CACHE_SHIFT 5
9889+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9890
9891 /*
9892 * Memory returned by kmalloc() may be used for DMA, so we must make
9893diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
9894index 0694d09..b58b3aa 100644
9895--- a/arch/x86/Kconfig
9896+++ b/arch/x86/Kconfig
9897@@ -238,7 +238,7 @@ config X86_HT
9898
9899 config X86_32_LAZY_GS
9900 def_bool y
9901- depends on X86_32 && !CC_STACKPROTECTOR
9902+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
9903
9904 config ARCH_HWEIGHT_CFLAGS
9905 string
9906@@ -1031,6 +1031,7 @@ config MICROCODE_OLD_INTERFACE
9907
9908 config X86_MSR
9909 tristate "/dev/cpu/*/msr - Model-specific register support"
9910+ depends on !GRKERNSEC_KMEM
9911 ---help---
9912 This device gives privileged processes access to the x86
9913 Model-Specific Registers (MSRs). It is a character device with
9914@@ -1054,7 +1055,7 @@ choice
9915
9916 config NOHIGHMEM
9917 bool "off"
9918- depends on !X86_NUMAQ
9919+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
9920 ---help---
9921 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
9922 However, the address space of 32-bit x86 processors is only 4
9923@@ -1091,7 +1092,7 @@ config NOHIGHMEM
9924
9925 config HIGHMEM4G
9926 bool "4GB"
9927- depends on !X86_NUMAQ
9928+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
9929 ---help---
9930 Select this if you have a 32-bit processor and between 1 and 4
9931 gigabytes of physical RAM.
9932@@ -1145,7 +1146,7 @@ config PAGE_OFFSET
9933 hex
9934 default 0xB0000000 if VMSPLIT_3G_OPT
9935 default 0x80000000 if VMSPLIT_2G
9936- default 0x78000000 if VMSPLIT_2G_OPT
9937+ default 0x70000000 if VMSPLIT_2G_OPT
9938 default 0x40000000 if VMSPLIT_1G
9939 default 0xC0000000
9940 depends on X86_32
9941@@ -1542,6 +1543,7 @@ config SECCOMP
9942
9943 config CC_STACKPROTECTOR
9944 bool "Enable -fstack-protector buffer overflow detection"
9945+ depends on X86_64 || !PAX_MEMORY_UDEREF
9946 ---help---
9947 This option turns on the -fstack-protector GCC feature. This
9948 feature puts, at the beginning of functions, a canary value on
9949@@ -1599,6 +1601,7 @@ config KEXEC_JUMP
9950 config PHYSICAL_START
9951 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
9952 default "0x1000000"
9953+ range 0x400000 0x40000000
9954 ---help---
9955 This gives the physical address where the kernel is loaded.
9956
9957@@ -1662,6 +1665,7 @@ config X86_NEED_RELOCS
9958 config PHYSICAL_ALIGN
9959 hex "Alignment value to which kernel should be aligned" if X86_32
9960 default "0x1000000"
9961+ range 0x400000 0x1000000 if PAX_KERNEXEC
9962 range 0x2000 0x1000000
9963 ---help---
9964 This value puts the alignment restrictions on physical address
9965@@ -1737,9 +1741,10 @@ config DEBUG_HOTPLUG_CPU0
9966 If unsure, say N.
9967
9968 config COMPAT_VDSO
9969- def_bool y
9970+ def_bool n
9971 prompt "Compat VDSO support"
9972 depends on X86_32 || IA32_EMULATION
9973+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
9974 ---help---
9975 Map the 32-bit VDSO to the predictable old-style address too.
9976
9977diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
9978index c026cca..14657ae 100644
9979--- a/arch/x86/Kconfig.cpu
9980+++ b/arch/x86/Kconfig.cpu
9981@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
9982
9983 config X86_F00F_BUG
9984 def_bool y
9985- depends on M586MMX || M586TSC || M586 || M486
9986+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
9987
9988 config X86_INVD_BUG
9989 def_bool y
9990@@ -327,7 +327,7 @@ config X86_INVD_BUG
9991
9992 config X86_ALIGNMENT_16
9993 def_bool y
9994- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
9995+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
9996
9997 config X86_INTEL_USERCOPY
9998 def_bool y
9999@@ -373,7 +373,7 @@ config X86_CMPXCHG64
10000 # generates cmov.
10001 config X86_CMOV
10002 def_bool y
10003- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10004+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10005
10006 config X86_MINIMUM_CPU_FAMILY
10007 int
10008diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
10009index b322f12..652d0d9 100644
10010--- a/arch/x86/Kconfig.debug
10011+++ b/arch/x86/Kconfig.debug
10012@@ -84,7 +84,7 @@ config X86_PTDUMP
10013 config DEBUG_RODATA
10014 bool "Write protect kernel read-only data structures"
10015 default y
10016- depends on DEBUG_KERNEL
10017+ depends on DEBUG_KERNEL && BROKEN
10018 ---help---
10019 Mark the kernel read-only data as write-protected in the pagetables,
10020 in order to catch accidental (and incorrect) writes to such const
10021@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
10022
10023 config DEBUG_SET_MODULE_RONX
10024 bool "Set loadable kernel module data as NX and text as RO"
10025- depends on MODULES
10026+ depends on MODULES && BROKEN
10027 ---help---
10028 This option helps catch unintended modifications to loadable
10029 kernel module's text and read-only data. It also prevents execution
10030@@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
10031
10032 config DEBUG_STRICT_USER_COPY_CHECKS
10033 bool "Strict copy size checks"
10034- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
10035+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
10036 ---help---
10037 Enabling this option turns a certain set of sanity checks for user
10038 copy operations into compile time failures.
10039diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10040index e71fc42..7829607 100644
10041--- a/arch/x86/Makefile
10042+++ b/arch/x86/Makefile
10043@@ -50,6 +50,7 @@ else
10044 UTS_MACHINE := x86_64
10045 CHECKFLAGS += -D__x86_64__ -m64
10046
10047+ biarch := $(call cc-option,-m64)
10048 KBUILD_AFLAGS += -m64
10049 KBUILD_CFLAGS += -m64
10050
10051@@ -230,3 +231,12 @@ define archhelp
10052 echo ' FDARGS="..." arguments for the booted kernel'
10053 echo ' FDINITRD=file initrd for the booted kernel'
10054 endef
10055+
10056+define OLD_LD
10057+
10058+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10059+*** Please upgrade your binutils to 2.18 or newer
10060+endef
10061+
10062+archprepare:
10063+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10064diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10065index 379814b..add62ce 100644
10066--- a/arch/x86/boot/Makefile
10067+++ b/arch/x86/boot/Makefile
10068@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10069 $(call cc-option, -fno-stack-protector) \
10070 $(call cc-option, -mpreferred-stack-boundary=2)
10071 KBUILD_CFLAGS += $(call cc-option, -m32)
10072+ifdef CONSTIFY_PLUGIN
10073+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10074+endif
10075 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10076 GCOV_PROFILE := n
10077
10078diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10079index 878e4b9..20537ab 100644
10080--- a/arch/x86/boot/bitops.h
10081+++ b/arch/x86/boot/bitops.h
10082@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10083 u8 v;
10084 const u32 *p = (const u32 *)addr;
10085
10086- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10087+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10088 return v;
10089 }
10090
10091@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10092
10093 static inline void set_bit(int nr, void *addr)
10094 {
10095- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10096+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10097 }
10098
10099 #endif /* BOOT_BITOPS_H */
10100diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10101index 18997e5..83d9c67 100644
10102--- a/arch/x86/boot/boot.h
10103+++ b/arch/x86/boot/boot.h
10104@@ -85,7 +85,7 @@ static inline void io_delay(void)
10105 static inline u16 ds(void)
10106 {
10107 u16 seg;
10108- asm("movw %%ds,%0" : "=rm" (seg));
10109+ asm volatile("movw %%ds,%0" : "=rm" (seg));
10110 return seg;
10111 }
10112
10113@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10114 static inline int memcmp(const void *s1, const void *s2, size_t len)
10115 {
10116 u8 diff;
10117- asm("repe; cmpsb; setnz %0"
10118+ asm volatile("repe; cmpsb; setnz %0"
10119 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10120 return diff;
10121 }
10122diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10123index 8a84501..b2d165f 100644
10124--- a/arch/x86/boot/compressed/Makefile
10125+++ b/arch/x86/boot/compressed/Makefile
10126@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10127 KBUILD_CFLAGS += $(cflags-y)
10128 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10129 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10130+ifdef CONSTIFY_PLUGIN
10131+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10132+endif
10133
10134 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10135 GCOV_PROFILE := n
10136diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10137index c205035..5853587 100644
10138--- a/arch/x86/boot/compressed/eboot.c
10139+++ b/arch/x86/boot/compressed/eboot.c
10140@@ -150,7 +150,6 @@ again:
10141 *addr = max_addr;
10142 }
10143
10144-free_pool:
10145 efi_call_phys1(sys_table->boottime->free_pool, map);
10146
10147 fail:
10148@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10149 if (i == map_size / desc_size)
10150 status = EFI_NOT_FOUND;
10151
10152-free_pool:
10153 efi_call_phys1(sys_table->boottime->free_pool, map);
10154 fail:
10155 return status;
10156diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10157index 1e3184f..0d11e2e 100644
10158--- a/arch/x86/boot/compressed/head_32.S
10159+++ b/arch/x86/boot/compressed/head_32.S
10160@@ -118,7 +118,7 @@ preferred_addr:
10161 notl %eax
10162 andl %eax, %ebx
10163 #else
10164- movl $LOAD_PHYSICAL_ADDR, %ebx
10165+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10166 #endif
10167
10168 /* Target address to relocate to for decompression */
10169@@ -204,7 +204,7 @@ relocated:
10170 * and where it was actually loaded.
10171 */
10172 movl %ebp, %ebx
10173- subl $LOAD_PHYSICAL_ADDR, %ebx
10174+ subl $____LOAD_PHYSICAL_ADDR, %ebx
10175 jz 2f /* Nothing to be done if loaded at compiled addr. */
10176 /*
10177 * Process relocations.
10178@@ -212,8 +212,7 @@ relocated:
10179
10180 1: subl $4, %edi
10181 movl (%edi), %ecx
10182- testl %ecx, %ecx
10183- jz 2f
10184+ jecxz 2f
10185 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10186 jmp 1b
10187 2:
10188diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10189index f5d1aaa..cce11dc 100644
10190--- a/arch/x86/boot/compressed/head_64.S
10191+++ b/arch/x86/boot/compressed/head_64.S
10192@@ -91,7 +91,7 @@ ENTRY(startup_32)
10193 notl %eax
10194 andl %eax, %ebx
10195 #else
10196- movl $LOAD_PHYSICAL_ADDR, %ebx
10197+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10198 #endif
10199
10200 /* Target address to relocate to for decompression */
10201@@ -273,7 +273,7 @@ preferred_addr:
10202 notq %rax
10203 andq %rax, %rbp
10204 #else
10205- movq $LOAD_PHYSICAL_ADDR, %rbp
10206+ movq $____LOAD_PHYSICAL_ADDR, %rbp
10207 #endif
10208
10209 /* Target address to relocate to for decompression */
10210diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
10211index 88f7ff6..ed695dd 100644
10212--- a/arch/x86/boot/compressed/misc.c
10213+++ b/arch/x86/boot/compressed/misc.c
10214@@ -303,7 +303,7 @@ static void parse_elf(void *output)
10215 case PT_LOAD:
10216 #ifdef CONFIG_RELOCATABLE
10217 dest = output;
10218- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
10219+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
10220 #else
10221 dest = (void *)(phdr->p_paddr);
10222 #endif
10223@@ -352,7 +352,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
10224 error("Destination address too large");
10225 #endif
10226 #ifndef CONFIG_RELOCATABLE
10227- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
10228+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
10229 error("Wrong destination address");
10230 #endif
10231
10232diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
10233index 4d3ff03..e4972ff 100644
10234--- a/arch/x86/boot/cpucheck.c
10235+++ b/arch/x86/boot/cpucheck.c
10236@@ -74,7 +74,7 @@ static int has_fpu(void)
10237 u16 fcw = -1, fsw = -1;
10238 u32 cr0;
10239
10240- asm("movl %%cr0,%0" : "=r" (cr0));
10241+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
10242 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
10243 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
10244 asm volatile("movl %0,%%cr0" : : "r" (cr0));
10245@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
10246 {
10247 u32 f0, f1;
10248
10249- asm("pushfl ; "
10250+ asm volatile("pushfl ; "
10251 "pushfl ; "
10252 "popl %0 ; "
10253 "movl %0,%1 ; "
10254@@ -115,7 +115,7 @@ static void get_flags(void)
10255 set_bit(X86_FEATURE_FPU, cpu.flags);
10256
10257 if (has_eflag(X86_EFLAGS_ID)) {
10258- asm("cpuid"
10259+ asm volatile("cpuid"
10260 : "=a" (max_intel_level),
10261 "=b" (cpu_vendor[0]),
10262 "=d" (cpu_vendor[1]),
10263@@ -124,7 +124,7 @@ static void get_flags(void)
10264
10265 if (max_intel_level >= 0x00000001 &&
10266 max_intel_level <= 0x0000ffff) {
10267- asm("cpuid"
10268+ asm volatile("cpuid"
10269 : "=a" (tfms),
10270 "=c" (cpu.flags[4]),
10271 "=d" (cpu.flags[0])
10272@@ -136,7 +136,7 @@ static void get_flags(void)
10273 cpu.model += ((tfms >> 16) & 0xf) << 4;
10274 }
10275
10276- asm("cpuid"
10277+ asm volatile("cpuid"
10278 : "=a" (max_amd_level)
10279 : "a" (0x80000000)
10280 : "ebx", "ecx", "edx");
10281@@ -144,7 +144,7 @@ static void get_flags(void)
10282 if (max_amd_level >= 0x80000001 &&
10283 max_amd_level <= 0x8000ffff) {
10284 u32 eax = 0x80000001;
10285- asm("cpuid"
10286+ asm volatile("cpuid"
10287 : "+a" (eax),
10288 "=c" (cpu.flags[6]),
10289 "=d" (cpu.flags[1])
10290@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10291 u32 ecx = MSR_K7_HWCR;
10292 u32 eax, edx;
10293
10294- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10295+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10296 eax &= ~(1 << 15);
10297- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10298+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10299
10300 get_flags(); /* Make sure it really did something */
10301 err = check_flags();
10302@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10303 u32 ecx = MSR_VIA_FCR;
10304 u32 eax, edx;
10305
10306- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10307+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10308 eax |= (1<<1)|(1<<7);
10309- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10310+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10311
10312 set_bit(X86_FEATURE_CX8, cpu.flags);
10313 err = check_flags();
10314@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10315 u32 eax, edx;
10316 u32 level = 1;
10317
10318- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10319- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10320- asm("cpuid"
10321+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10322+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10323+ asm volatile("cpuid"
10324 : "+a" (level), "=d" (cpu.flags[0])
10325 : : "ecx", "ebx");
10326- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10327+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10328
10329 err = check_flags();
10330 }
10331diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
10332index 944ce59..87ee37a 100644
10333--- a/arch/x86/boot/header.S
10334+++ b/arch/x86/boot/header.S
10335@@ -401,10 +401,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
10336 # single linked list of
10337 # struct setup_data
10338
10339-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
10340+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
10341
10342 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
10343+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10344+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
10345+#else
10346 #define VO_INIT_SIZE (VO__end - VO__text)
10347+#endif
10348 #if ZO_INIT_SIZE > VO_INIT_SIZE
10349 #define INIT_SIZE ZO_INIT_SIZE
10350 #else
10351diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
10352index db75d07..8e6d0af 100644
10353--- a/arch/x86/boot/memory.c
10354+++ b/arch/x86/boot/memory.c
10355@@ -19,7 +19,7 @@
10356
10357 static int detect_memory_e820(void)
10358 {
10359- int count = 0;
10360+ unsigned int count = 0;
10361 struct biosregs ireg, oreg;
10362 struct e820entry *desc = boot_params.e820_map;
10363 static struct e820entry buf; /* static so it is zeroed */
10364diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
10365index 11e8c6e..fdbb1ed 100644
10366--- a/arch/x86/boot/video-vesa.c
10367+++ b/arch/x86/boot/video-vesa.c
10368@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
10369
10370 boot_params.screen_info.vesapm_seg = oreg.es;
10371 boot_params.screen_info.vesapm_off = oreg.di;
10372+ boot_params.screen_info.vesapm_size = oreg.cx;
10373 }
10374
10375 /*
10376diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
10377index 43eda28..5ab5fdb 100644
10378--- a/arch/x86/boot/video.c
10379+++ b/arch/x86/boot/video.c
10380@@ -96,7 +96,7 @@ static void store_mode_params(void)
10381 static unsigned int get_entry(void)
10382 {
10383 char entry_buf[4];
10384- int i, len = 0;
10385+ unsigned int i, len = 0;
10386 int key;
10387 unsigned int v;
10388
10389diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
10390index 5b577d5..3c1fed4 100644
10391--- a/arch/x86/crypto/aes-x86_64-asm_64.S
10392+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
10393@@ -8,6 +8,8 @@
10394 * including this sentence is retained in full.
10395 */
10396
10397+#include <asm/alternative-asm.h>
10398+
10399 .extern crypto_ft_tab
10400 .extern crypto_it_tab
10401 .extern crypto_fl_tab
10402@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
10403 je B192; \
10404 leaq 32(r9),r9;
10405
10406+#define ret pax_force_retaddr 0, 1; ret
10407+
10408 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
10409 movq r1,r2; \
10410 movq r3,r4; \
10411diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
10412index 3470624..201259d 100644
10413--- a/arch/x86/crypto/aesni-intel_asm.S
10414+++ b/arch/x86/crypto/aesni-intel_asm.S
10415@@ -31,6 +31,7 @@
10416
10417 #include <linux/linkage.h>
10418 #include <asm/inst.h>
10419+#include <asm/alternative-asm.h>
10420
10421 #ifdef __x86_64__
10422 .data
10423@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
10424 pop %r14
10425 pop %r13
10426 pop %r12
10427+ pax_force_retaddr 0, 1
10428 ret
10429+ENDPROC(aesni_gcm_dec)
10430
10431
10432 /*****************************************************************************
10433@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
10434 pop %r14
10435 pop %r13
10436 pop %r12
10437+ pax_force_retaddr 0, 1
10438 ret
10439+ENDPROC(aesni_gcm_enc)
10440
10441 #endif
10442
10443@@ -1714,6 +1719,7 @@ _key_expansion_256a:
10444 pxor %xmm1, %xmm0
10445 movaps %xmm0, (TKEYP)
10446 add $0x10, TKEYP
10447+ pax_force_retaddr_bts
10448 ret
10449
10450 .align 4
10451@@ -1738,6 +1744,7 @@ _key_expansion_192a:
10452 shufps $0b01001110, %xmm2, %xmm1
10453 movaps %xmm1, 0x10(TKEYP)
10454 add $0x20, TKEYP
10455+ pax_force_retaddr_bts
10456 ret
10457
10458 .align 4
10459@@ -1757,6 +1764,7 @@ _key_expansion_192b:
10460
10461 movaps %xmm0, (TKEYP)
10462 add $0x10, TKEYP
10463+ pax_force_retaddr_bts
10464 ret
10465
10466 .align 4
10467@@ -1769,6 +1777,7 @@ _key_expansion_256b:
10468 pxor %xmm1, %xmm2
10469 movaps %xmm2, (TKEYP)
10470 add $0x10, TKEYP
10471+ pax_force_retaddr_bts
10472 ret
10473
10474 /*
10475@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
10476 #ifndef __x86_64__
10477 popl KEYP
10478 #endif
10479+ pax_force_retaddr 0, 1
10480 ret
10481+ENDPROC(aesni_set_key)
10482
10483 /*
10484 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
10485@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
10486 popl KLEN
10487 popl KEYP
10488 #endif
10489+ pax_force_retaddr 0, 1
10490 ret
10491+ENDPROC(aesni_enc)
10492
10493 /*
10494 * _aesni_enc1: internal ABI
10495@@ -1959,6 +1972,7 @@ _aesni_enc1:
10496 AESENC KEY STATE
10497 movaps 0x70(TKEYP), KEY
10498 AESENCLAST KEY STATE
10499+ pax_force_retaddr_bts
10500 ret
10501
10502 /*
10503@@ -2067,6 +2081,7 @@ _aesni_enc4:
10504 AESENCLAST KEY STATE2
10505 AESENCLAST KEY STATE3
10506 AESENCLAST KEY STATE4
10507+ pax_force_retaddr_bts
10508 ret
10509
10510 /*
10511@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
10512 popl KLEN
10513 popl KEYP
10514 #endif
10515+ pax_force_retaddr 0, 1
10516 ret
10517+ENDPROC(aesni_dec)
10518
10519 /*
10520 * _aesni_dec1: internal ABI
10521@@ -2146,6 +2163,7 @@ _aesni_dec1:
10522 AESDEC KEY STATE
10523 movaps 0x70(TKEYP), KEY
10524 AESDECLAST KEY STATE
10525+ pax_force_retaddr_bts
10526 ret
10527
10528 /*
10529@@ -2254,6 +2272,7 @@ _aesni_dec4:
10530 AESDECLAST KEY STATE2
10531 AESDECLAST KEY STATE3
10532 AESDECLAST KEY STATE4
10533+ pax_force_retaddr_bts
10534 ret
10535
10536 /*
10537@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
10538 popl KEYP
10539 popl LEN
10540 #endif
10541+ pax_force_retaddr 0, 1
10542 ret
10543+ENDPROC(aesni_ecb_enc)
10544
10545 /*
10546 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
10547@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
10548 popl KEYP
10549 popl LEN
10550 #endif
10551+ pax_force_retaddr 0, 1
10552 ret
10553+ENDPROC(aesni_ecb_dec)
10554
10555 /*
10556 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
10557@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
10558 popl LEN
10559 popl IVP
10560 #endif
10561+ pax_force_retaddr 0, 1
10562 ret
10563+ENDPROC(aesni_cbc_enc)
10564
10565 /*
10566 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
10567@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
10568 popl LEN
10569 popl IVP
10570 #endif
10571+ pax_force_retaddr 0, 1
10572 ret
10573+ENDPROC(aesni_cbc_dec)
10574
10575 #ifdef __x86_64__
10576 .align 16
10577@@ -2526,6 +2553,7 @@ _aesni_inc_init:
10578 mov $1, TCTR_LOW
10579 MOVQ_R64_XMM TCTR_LOW INC
10580 MOVQ_R64_XMM CTR TCTR_LOW
10581+ pax_force_retaddr_bts
10582 ret
10583
10584 /*
10585@@ -2554,6 +2582,7 @@ _aesni_inc:
10586 .Linc_low:
10587 movaps CTR, IV
10588 PSHUFB_XMM BSWAP_MASK IV
10589+ pax_force_retaddr_bts
10590 ret
10591
10592 /*
10593@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
10594 .Lctr_enc_ret:
10595 movups IV, (IVP)
10596 .Lctr_enc_just_ret:
10597+ pax_force_retaddr 0, 1
10598 ret
10599+ENDPROC(aesni_ctr_enc)
10600 #endif
10601diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10602index 391d245..67f35c2 100644
10603--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
10604+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10605@@ -20,6 +20,8 @@
10606 *
10607 */
10608
10609+#include <asm/alternative-asm.h>
10610+
10611 .file "blowfish-x86_64-asm.S"
10612 .text
10613
10614@@ -151,9 +153,11 @@ __blowfish_enc_blk:
10615 jnz __enc_xor;
10616
10617 write_block();
10618+ pax_force_retaddr 0, 1
10619 ret;
10620 __enc_xor:
10621 xor_block();
10622+ pax_force_retaddr 0, 1
10623 ret;
10624
10625 .align 8
10626@@ -188,6 +192,7 @@ blowfish_dec_blk:
10627
10628 movq %r11, %rbp;
10629
10630+ pax_force_retaddr 0, 1
10631 ret;
10632
10633 /**********************************************************************
10634@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
10635
10636 popq %rbx;
10637 popq %rbp;
10638+ pax_force_retaddr 0, 1
10639 ret;
10640
10641 __enc_xor4:
10642@@ -349,6 +355,7 @@ __enc_xor4:
10643
10644 popq %rbx;
10645 popq %rbp;
10646+ pax_force_retaddr 0, 1
10647 ret;
10648
10649 .align 8
10650@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
10651 popq %rbx;
10652 popq %rbp;
10653
10654+ pax_force_retaddr 0, 1
10655 ret;
10656
10657diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
10658index 0b33743..7a56206 100644
10659--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
10660+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
10661@@ -20,6 +20,8 @@
10662 *
10663 */
10664
10665+#include <asm/alternative-asm.h>
10666+
10667 .file "camellia-x86_64-asm_64.S"
10668 .text
10669
10670@@ -229,12 +231,14 @@ __enc_done:
10671 enc_outunpack(mov, RT1);
10672
10673 movq RRBP, %rbp;
10674+ pax_force_retaddr 0, 1
10675 ret;
10676
10677 __enc_xor:
10678 enc_outunpack(xor, RT1);
10679
10680 movq RRBP, %rbp;
10681+ pax_force_retaddr 0, 1
10682 ret;
10683
10684 .global camellia_dec_blk;
10685@@ -275,6 +279,7 @@ __dec_rounds16:
10686 dec_outunpack();
10687
10688 movq RRBP, %rbp;
10689+ pax_force_retaddr 0, 1
10690 ret;
10691
10692 /**********************************************************************
10693@@ -468,6 +473,7 @@ __enc2_done:
10694
10695 movq RRBP, %rbp;
10696 popq %rbx;
10697+ pax_force_retaddr 0, 1
10698 ret;
10699
10700 __enc2_xor:
10701@@ -475,6 +481,7 @@ __enc2_xor:
10702
10703 movq RRBP, %rbp;
10704 popq %rbx;
10705+ pax_force_retaddr 0, 1
10706 ret;
10707
10708 .global camellia_dec_blk_2way;
10709@@ -517,4 +524,5 @@ __dec2_rounds16:
10710
10711 movq RRBP, %rbp;
10712 movq RXOR, %rbx;
10713+ pax_force_retaddr 0, 1
10714 ret;
10715diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
10716index 15b00ac..2071784 100644
10717--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
10718+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
10719@@ -23,6 +23,8 @@
10720 *
10721 */
10722
10723+#include <asm/alternative-asm.h>
10724+
10725 .file "cast5-avx-x86_64-asm_64.S"
10726
10727 .extern cast_s1
10728@@ -281,6 +283,7 @@ __skip_enc:
10729 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
10730 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
10731
10732+ pax_force_retaddr 0, 1
10733 ret;
10734
10735 .align 16
10736@@ -353,6 +356,7 @@ __dec_tail:
10737 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
10738 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
10739
10740+ pax_force_retaddr 0, 1
10741 ret;
10742
10743 __skip_dec:
10744@@ -392,6 +396,7 @@ cast5_ecb_enc_16way:
10745 vmovdqu RR4, (6*4*4)(%r11);
10746 vmovdqu RL4, (7*4*4)(%r11);
10747
10748+ pax_force_retaddr
10749 ret;
10750
10751 .align 16
10752@@ -427,6 +432,7 @@ cast5_ecb_dec_16way:
10753 vmovdqu RR4, (6*4*4)(%r11);
10754 vmovdqu RL4, (7*4*4)(%r11);
10755
10756+ pax_force_retaddr
10757 ret;
10758
10759 .align 16
10760@@ -479,6 +485,7 @@ cast5_cbc_dec_16way:
10761
10762 popq %r12;
10763
10764+ pax_force_retaddr
10765 ret;
10766
10767 .align 16
10768@@ -555,4 +562,5 @@ cast5_ctr_16way:
10769
10770 popq %r12;
10771
10772+ pax_force_retaddr
10773 ret;
10774diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
10775index 2569d0d..637c289 100644
10776--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
10777+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
10778@@ -23,6 +23,8 @@
10779 *
10780 */
10781
10782+#include <asm/alternative-asm.h>
10783+
10784 #include "glue_helper-asm-avx.S"
10785
10786 .file "cast6-avx-x86_64-asm_64.S"
10787@@ -294,6 +296,7 @@ __cast6_enc_blk8:
10788 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
10789 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
10790
10791+ pax_force_retaddr 0, 1
10792 ret;
10793
10794 .align 8
10795@@ -340,6 +343,7 @@ __cast6_dec_blk8:
10796 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
10797 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
10798
10799+ pax_force_retaddr 0, 1
10800 ret;
10801
10802 .align 8
10803@@ -361,6 +365,7 @@ cast6_ecb_enc_8way:
10804
10805 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10806
10807+ pax_force_retaddr
10808 ret;
10809
10810 .align 8
10811@@ -382,6 +387,7 @@ cast6_ecb_dec_8way:
10812
10813 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10814
10815+ pax_force_retaddr
10816 ret;
10817
10818 .align 8
10819@@ -408,6 +414,7 @@ cast6_cbc_dec_8way:
10820
10821 popq %r12;
10822
10823+ pax_force_retaddr
10824 ret;
10825
10826 .align 8
10827@@ -436,4 +443,5 @@ cast6_ctr_8way:
10828
10829 popq %r12;
10830
10831+ pax_force_retaddr
10832 ret;
10833diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
10834index 6214a9b..1f4fc9a 100644
10835--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
10836+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
10837@@ -1,3 +1,5 @@
10838+#include <asm/alternative-asm.h>
10839+
10840 # enter ECRYPT_encrypt_bytes
10841 .text
10842 .p2align 5
10843@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
10844 add %r11,%rsp
10845 mov %rdi,%rax
10846 mov %rsi,%rdx
10847+ pax_force_retaddr 0, 1
10848 ret
10849 # bytesatleast65:
10850 ._bytesatleast65:
10851@@ -891,6 +894,7 @@ ECRYPT_keysetup:
10852 add %r11,%rsp
10853 mov %rdi,%rax
10854 mov %rsi,%rdx
10855+ pax_force_retaddr
10856 ret
10857 # enter ECRYPT_ivsetup
10858 .text
10859@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
10860 add %r11,%rsp
10861 mov %rdi,%rax
10862 mov %rsi,%rdx
10863+ pax_force_retaddr
10864 ret
10865diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
10866index 02b0e9f..cf4cf5c 100644
10867--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
10868+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
10869@@ -24,6 +24,8 @@
10870 *
10871 */
10872
10873+#include <asm/alternative-asm.h>
10874+
10875 #include "glue_helper-asm-avx.S"
10876
10877 .file "serpent-avx-x86_64-asm_64.S"
10878@@ -618,6 +620,7 @@ __serpent_enc_blk8_avx:
10879 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
10880 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
10881
10882+ pax_force_retaddr
10883 ret;
10884
10885 .align 8
10886@@ -673,6 +676,7 @@ __serpent_dec_blk8_avx:
10887 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
10888 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
10889
10890+ pax_force_retaddr
10891 ret;
10892
10893 .align 8
10894@@ -692,6 +696,7 @@ serpent_ecb_enc_8way_avx:
10895
10896 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10897
10898+ pax_force_retaddr
10899 ret;
10900
10901 .align 8
10902@@ -711,6 +716,7 @@ serpent_ecb_dec_8way_avx:
10903
10904 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
10905
10906+ pax_force_retaddr
10907 ret;
10908
10909 .align 8
10910@@ -730,6 +736,7 @@ serpent_cbc_dec_8way_avx:
10911
10912 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
10913
10914+ pax_force_retaddr
10915 ret;
10916
10917 .align 8
10918@@ -751,4 +758,5 @@ serpent_ctr_8way_avx:
10919
10920 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10921
10922+ pax_force_retaddr
10923 ret;
10924diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
10925index 3ee1ff0..cbc568b 100644
10926--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
10927+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
10928@@ -24,6 +24,8 @@
10929 *
10930 */
10931
10932+#include <asm/alternative-asm.h>
10933+
10934 .file "serpent-sse2-x86_64-asm_64.S"
10935 .text
10936
10937@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
10938 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
10939 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
10940
10941+ pax_force_retaddr
10942 ret;
10943
10944 __enc_xor8:
10945 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
10946 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
10947
10948+ pax_force_retaddr
10949 ret;
10950
10951 .align 8
10952@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
10953 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
10954 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
10955
10956+ pax_force_retaddr
10957 ret;
10958diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
10959index 49d6987..df66bd4 100644
10960--- a/arch/x86/crypto/sha1_ssse3_asm.S
10961+++ b/arch/x86/crypto/sha1_ssse3_asm.S
10962@@ -28,6 +28,8 @@
10963 * (at your option) any later version.
10964 */
10965
10966+#include <asm/alternative-asm.h>
10967+
10968 #define CTX %rdi // arg1
10969 #define BUF %rsi // arg2
10970 #define CNT %rdx // arg3
10971@@ -104,6 +106,7 @@
10972 pop %r12
10973 pop %rbp
10974 pop %rbx
10975+ pax_force_retaddr 0, 1
10976 ret
10977
10978 .size \name, .-\name
10979diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
10980index ebac16b..8092eb9 100644
10981--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
10982+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
10983@@ -23,6 +23,8 @@
10984 *
10985 */
10986
10987+#include <asm/alternative-asm.h>
10988+
10989 #include "glue_helper-asm-avx.S"
10990
10991 .file "twofish-avx-x86_64-asm_64.S"
10992@@ -283,6 +285,7 @@ __twofish_enc_blk8:
10993 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
10994 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
10995
10996+ pax_force_retaddr 0, 1
10997 ret;
10998
10999 .align 8
11000@@ -324,6 +327,7 @@ __twofish_dec_blk8:
11001 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
11002 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
11003
11004+ pax_force_retaddr 0, 1
11005 ret;
11006
11007 .align 8
11008@@ -345,6 +349,7 @@ twofish_ecb_enc_8way:
11009
11010 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
11011
11012+ pax_force_retaddr 0, 1
11013 ret;
11014
11015 .align 8
11016@@ -366,6 +371,7 @@ twofish_ecb_dec_8way:
11017
11018 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11019
11020+ pax_force_retaddr 0, 1
11021 ret;
11022
11023 .align 8
11024@@ -392,6 +398,7 @@ twofish_cbc_dec_8way:
11025
11026 popq %r12;
11027
11028+ pax_force_retaddr 0, 1
11029 ret;
11030
11031 .align 8
11032@@ -420,4 +427,5 @@ twofish_ctr_8way:
11033
11034 popq %r12;
11035
11036+ pax_force_retaddr 0, 1
11037 ret;
11038diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11039index 5b012a2..36d5364 100644
11040--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11041+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11042@@ -20,6 +20,8 @@
11043 *
11044 */
11045
11046+#include <asm/alternative-asm.h>
11047+
11048 .file "twofish-x86_64-asm-3way.S"
11049 .text
11050
11051@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
11052 popq %r13;
11053 popq %r14;
11054 popq %r15;
11055+ pax_force_retaddr 0, 1
11056 ret;
11057
11058 __enc_xor3:
11059@@ -271,6 +274,7 @@ __enc_xor3:
11060 popq %r13;
11061 popq %r14;
11062 popq %r15;
11063+ pax_force_retaddr 0, 1
11064 ret;
11065
11066 .global twofish_dec_blk_3way
11067@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
11068 popq %r13;
11069 popq %r14;
11070 popq %r15;
11071+ pax_force_retaddr 0, 1
11072 ret;
11073
11074diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
11075index 7bcf3fc..f53832f 100644
11076--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
11077+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
11078@@ -21,6 +21,7 @@
11079 .text
11080
11081 #include <asm/asm-offsets.h>
11082+#include <asm/alternative-asm.h>
11083
11084 #define a_offset 0
11085 #define b_offset 4
11086@@ -268,6 +269,7 @@ twofish_enc_blk:
11087
11088 popq R1
11089 movq $1,%rax
11090+ pax_force_retaddr 0, 1
11091 ret
11092
11093 twofish_dec_blk:
11094@@ -319,4 +321,5 @@ twofish_dec_blk:
11095
11096 popq R1
11097 movq $1,%rax
11098+ pax_force_retaddr 0, 1
11099 ret
11100diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
11101index a703af1..f5b9c36 100644
11102--- a/arch/x86/ia32/ia32_aout.c
11103+++ b/arch/x86/ia32/ia32_aout.c
11104@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
11105 unsigned long dump_start, dump_size;
11106 struct user32 dump;
11107
11108+ memset(&dump, 0, sizeof(dump));
11109+
11110 fs = get_fs();
11111 set_fs(KERNEL_DS);
11112 has_dumped = 1;
11113diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
11114index a1daf4a..f8c4537 100644
11115--- a/arch/x86/ia32/ia32_signal.c
11116+++ b/arch/x86/ia32/ia32_signal.c
11117@@ -348,7 +348,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
11118 sp -= frame_size;
11119 /* Align the stack pointer according to the i386 ABI,
11120 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
11121- sp = ((sp + 4) & -16ul) - 4;
11122+ sp = ((sp - 12) & -16ul) - 4;
11123 return (void __user *) sp;
11124 }
11125
11126@@ -406,7 +406,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
11127 * These are actually not used anymore, but left because some
11128 * gdb versions depend on them as a marker.
11129 */
11130- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11131+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11132 } put_user_catch(err);
11133
11134 if (err)
11135@@ -448,7 +448,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11136 0xb8,
11137 __NR_ia32_rt_sigreturn,
11138 0x80cd,
11139- 0,
11140+ 0
11141 };
11142
11143 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
11144@@ -471,16 +471,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11145
11146 if (ka->sa.sa_flags & SA_RESTORER)
11147 restorer = ka->sa.sa_restorer;
11148+ else if (current->mm->context.vdso)
11149+ /* Return stub is in 32bit vsyscall page */
11150+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
11151 else
11152- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
11153- rt_sigreturn);
11154+ restorer = &frame->retcode;
11155 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
11156
11157 /*
11158 * Not actually used anymore, but left because some gdb
11159 * versions need it.
11160 */
11161- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11162+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11163 } put_user_catch(err);
11164
11165 err |= copy_siginfo_to_user32(&frame->info, info);
11166diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
11167index 142c4ce..19b683f 100644
11168--- a/arch/x86/ia32/ia32entry.S
11169+++ b/arch/x86/ia32/ia32entry.S
11170@@ -15,8 +15,10 @@
11171 #include <asm/irqflags.h>
11172 #include <asm/asm.h>
11173 #include <asm/smap.h>
11174+#include <asm/pgtable.h>
11175 #include <linux/linkage.h>
11176 #include <linux/err.h>
11177+#include <asm/alternative-asm.h>
11178
11179 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11180 #include <linux/elf-em.h>
11181@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
11182 ENDPROC(native_irq_enable_sysexit)
11183 #endif
11184
11185+ .macro pax_enter_kernel_user
11186+ pax_set_fptr_mask
11187+#ifdef CONFIG_PAX_MEMORY_UDEREF
11188+ call pax_enter_kernel_user
11189+#endif
11190+ .endm
11191+
11192+ .macro pax_exit_kernel_user
11193+#ifdef CONFIG_PAX_MEMORY_UDEREF
11194+ call pax_exit_kernel_user
11195+#endif
11196+#ifdef CONFIG_PAX_RANDKSTACK
11197+ pushq %rax
11198+ pushq %r11
11199+ call pax_randomize_kstack
11200+ popq %r11
11201+ popq %rax
11202+#endif
11203+ .endm
11204+
11205+.macro pax_erase_kstack
11206+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11207+ call pax_erase_kstack
11208+#endif
11209+.endm
11210+
11211 /*
11212 * 32bit SYSENTER instruction entry.
11213 *
11214@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
11215 CFI_REGISTER rsp,rbp
11216 SWAPGS_UNSAFE_STACK
11217 movq PER_CPU_VAR(kernel_stack), %rsp
11218- addq $(KERNEL_STACK_OFFSET),%rsp
11219- /*
11220- * No need to follow this irqs on/off section: the syscall
11221- * disabled irqs, here we enable it straight after entry:
11222- */
11223- ENABLE_INTERRUPTS(CLBR_NONE)
11224 movl %ebp,%ebp /* zero extension */
11225 pushq_cfi $__USER32_DS
11226 /*CFI_REL_OFFSET ss,0*/
11227@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
11228 CFI_REL_OFFSET rsp,0
11229 pushfq_cfi
11230 /*CFI_REL_OFFSET rflags,0*/
11231- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
11232- CFI_REGISTER rip,r10
11233+ orl $X86_EFLAGS_IF,(%rsp)
11234+ GET_THREAD_INFO(%r11)
11235+ movl TI_sysenter_return(%r11), %r11d
11236+ CFI_REGISTER rip,r11
11237 pushq_cfi $__USER32_CS
11238 /*CFI_REL_OFFSET cs,0*/
11239 movl %eax, %eax
11240- pushq_cfi %r10
11241+ pushq_cfi %r11
11242 CFI_REL_OFFSET rip,0
11243 pushq_cfi %rax
11244 cld
11245 SAVE_ARGS 0,1,0
11246+ pax_enter_kernel_user
11247+
11248+#ifdef CONFIG_PAX_RANDKSTACK
11249+ pax_erase_kstack
11250+#endif
11251+
11252+ /*
11253+ * No need to follow this irqs on/off section: the syscall
11254+ * disabled irqs, here we enable it straight after entry:
11255+ */
11256+ ENABLE_INTERRUPTS(CLBR_NONE)
11257 /* no need to do an access_ok check here because rbp has been
11258 32bit zero extended */
11259+
11260+#ifdef CONFIG_PAX_MEMORY_UDEREF
11261+ mov $PAX_USER_SHADOW_BASE,%r11
11262+ add %r11,%rbp
11263+#endif
11264+
11265 ASM_STAC
11266 1: movl (%rbp),%ebp
11267 _ASM_EXTABLE(1b,ia32_badarg)
11268 ASM_CLAC
11269- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11270- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11271+ GET_THREAD_INFO(%r11)
11272+ orl $TS_COMPAT,TI_status(%r11)
11273+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11274 CFI_REMEMBER_STATE
11275 jnz sysenter_tracesys
11276 cmpq $(IA32_NR_syscalls-1),%rax
11277@@ -162,12 +204,15 @@ sysenter_do_call:
11278 sysenter_dispatch:
11279 call *ia32_sys_call_table(,%rax,8)
11280 movq %rax,RAX-ARGOFFSET(%rsp)
11281+ GET_THREAD_INFO(%r11)
11282 DISABLE_INTERRUPTS(CLBR_NONE)
11283 TRACE_IRQS_OFF
11284- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11285+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11286 jnz sysexit_audit
11287 sysexit_from_sys_call:
11288- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11289+ pax_exit_kernel_user
11290+ pax_erase_kstack
11291+ andl $~TS_COMPAT,TI_status(%r11)
11292 /* clear IF, that popfq doesn't enable interrupts early */
11293 andl $~0x200,EFLAGS-R11(%rsp)
11294 movl RIP-R11(%rsp),%edx /* User %eip */
11295@@ -193,6 +238,9 @@ sysexit_from_sys_call:
11296 movl %eax,%esi /* 2nd arg: syscall number */
11297 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
11298 call __audit_syscall_entry
11299+
11300+ pax_erase_kstack
11301+
11302 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
11303 cmpq $(IA32_NR_syscalls-1),%rax
11304 ja ia32_badsys
11305@@ -204,7 +252,7 @@ sysexit_from_sys_call:
11306 .endm
11307
11308 .macro auditsys_exit exit
11309- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11310+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11311 jnz ia32_ret_from_sys_call
11312 TRACE_IRQS_ON
11313 ENABLE_INTERRUPTS(CLBR_NONE)
11314@@ -215,11 +263,12 @@ sysexit_from_sys_call:
11315 1: setbe %al /* 1 if error, 0 if not */
11316 movzbl %al,%edi /* zero-extend that into %edi */
11317 call __audit_syscall_exit
11318+ GET_THREAD_INFO(%r11)
11319 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
11320 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
11321 DISABLE_INTERRUPTS(CLBR_NONE)
11322 TRACE_IRQS_OFF
11323- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11324+ testl %edi,TI_flags(%r11)
11325 jz \exit
11326 CLEAR_RREGS -ARGOFFSET
11327 jmp int_with_check
11328@@ -237,7 +286,7 @@ sysexit_audit:
11329
11330 sysenter_tracesys:
11331 #ifdef CONFIG_AUDITSYSCALL
11332- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11333+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11334 jz sysenter_auditsys
11335 #endif
11336 SAVE_REST
11337@@ -249,6 +298,9 @@ sysenter_tracesys:
11338 RESTORE_REST
11339 cmpq $(IA32_NR_syscalls-1),%rax
11340 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
11341+
11342+ pax_erase_kstack
11343+
11344 jmp sysenter_do_call
11345 CFI_ENDPROC
11346 ENDPROC(ia32_sysenter_target)
11347@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
11348 ENTRY(ia32_cstar_target)
11349 CFI_STARTPROC32 simple
11350 CFI_SIGNAL_FRAME
11351- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
11352+ CFI_DEF_CFA rsp,0
11353 CFI_REGISTER rip,rcx
11354 /*CFI_REGISTER rflags,r11*/
11355 SWAPGS_UNSAFE_STACK
11356 movl %esp,%r8d
11357 CFI_REGISTER rsp,r8
11358 movq PER_CPU_VAR(kernel_stack),%rsp
11359+ SAVE_ARGS 8*6,0,0
11360+ pax_enter_kernel_user
11361+
11362+#ifdef CONFIG_PAX_RANDKSTACK
11363+ pax_erase_kstack
11364+#endif
11365+
11366 /*
11367 * No need to follow this irqs on/off section: the syscall
11368 * disabled irqs and here we enable it straight after entry:
11369 */
11370 ENABLE_INTERRUPTS(CLBR_NONE)
11371- SAVE_ARGS 8,0,0
11372 movl %eax,%eax /* zero extension */
11373 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
11374 movq %rcx,RIP-ARGOFFSET(%rsp)
11375@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
11376 /* no need to do an access_ok check here because r8 has been
11377 32bit zero extended */
11378 /* hardware stack frame is complete now */
11379+
11380+#ifdef CONFIG_PAX_MEMORY_UDEREF
11381+ mov $PAX_USER_SHADOW_BASE,%r11
11382+ add %r11,%r8
11383+#endif
11384+
11385 ASM_STAC
11386 1: movl (%r8),%r9d
11387 _ASM_EXTABLE(1b,ia32_badarg)
11388 ASM_CLAC
11389- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11390- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11391+ GET_THREAD_INFO(%r11)
11392+ orl $TS_COMPAT,TI_status(%r11)
11393+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11394 CFI_REMEMBER_STATE
11395 jnz cstar_tracesys
11396 cmpq $IA32_NR_syscalls-1,%rax
11397@@ -319,12 +384,15 @@ cstar_do_call:
11398 cstar_dispatch:
11399 call *ia32_sys_call_table(,%rax,8)
11400 movq %rax,RAX-ARGOFFSET(%rsp)
11401+ GET_THREAD_INFO(%r11)
11402 DISABLE_INTERRUPTS(CLBR_NONE)
11403 TRACE_IRQS_OFF
11404- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11405+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11406 jnz sysretl_audit
11407 sysretl_from_sys_call:
11408- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11409+ pax_exit_kernel_user
11410+ pax_erase_kstack
11411+ andl $~TS_COMPAT,TI_status(%r11)
11412 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
11413 movl RIP-ARGOFFSET(%rsp),%ecx
11414 CFI_REGISTER rip,rcx
11415@@ -352,7 +420,7 @@ sysretl_audit:
11416
11417 cstar_tracesys:
11418 #ifdef CONFIG_AUDITSYSCALL
11419- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11420+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11421 jz cstar_auditsys
11422 #endif
11423 xchgl %r9d,%ebp
11424@@ -366,6 +434,9 @@ cstar_tracesys:
11425 xchgl %ebp,%r9d
11426 cmpq $(IA32_NR_syscalls-1),%rax
11427 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
11428+
11429+ pax_erase_kstack
11430+
11431 jmp cstar_do_call
11432 END(ia32_cstar_target)
11433
11434@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
11435 CFI_REL_OFFSET rip,RIP-RIP
11436 PARAVIRT_ADJUST_EXCEPTION_FRAME
11437 SWAPGS
11438- /*
11439- * No need to follow this irqs on/off section: the syscall
11440- * disabled irqs and here we enable it straight after entry:
11441- */
11442- ENABLE_INTERRUPTS(CLBR_NONE)
11443 movl %eax,%eax
11444 pushq_cfi %rax
11445 cld
11446 /* note the registers are not zero extended to the sf.
11447 this could be a problem. */
11448 SAVE_ARGS 0,1,0
11449- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11450- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11451+ pax_enter_kernel_user
11452+
11453+#ifdef CONFIG_PAX_RANDKSTACK
11454+ pax_erase_kstack
11455+#endif
11456+
11457+ /*
11458+ * No need to follow this irqs on/off section: the syscall
11459+ * disabled irqs and here we enable it straight after entry:
11460+ */
11461+ ENABLE_INTERRUPTS(CLBR_NONE)
11462+ GET_THREAD_INFO(%r11)
11463+ orl $TS_COMPAT,TI_status(%r11)
11464+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11465 jnz ia32_tracesys
11466 cmpq $(IA32_NR_syscalls-1),%rax
11467 ja ia32_badsys
11468@@ -442,6 +520,9 @@ ia32_tracesys:
11469 RESTORE_REST
11470 cmpq $(IA32_NR_syscalls-1),%rax
11471 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
11472+
11473+ pax_erase_kstack
11474+
11475 jmp ia32_do_call
11476 END(ia32_syscall)
11477
11478diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
11479index d0b689b..34be51d 100644
11480--- a/arch/x86/ia32/sys_ia32.c
11481+++ b/arch/x86/ia32/sys_ia32.c
11482@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
11483 */
11484 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
11485 {
11486- typeof(ubuf->st_uid) uid = 0;
11487- typeof(ubuf->st_gid) gid = 0;
11488+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
11489+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
11490 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
11491 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
11492 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
11493@@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
11494 mm_segment_t old_fs = get_fs();
11495
11496 set_fs(KERNEL_DS);
11497- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
11498+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
11499 set_fs(old_fs);
11500 if (put_compat_timespec(&t, interval))
11501 return -EFAULT;
11502@@ -319,7 +319,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
11503 mm_segment_t old_fs = get_fs();
11504
11505 set_fs(KERNEL_DS);
11506- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
11507+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
11508 set_fs(old_fs);
11509 if (!ret) {
11510 switch (_NSIG_WORDS) {
11511@@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
11512 if (copy_siginfo_from_user32(&info, uinfo))
11513 return -EFAULT;
11514 set_fs(KERNEL_DS);
11515- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
11516+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
11517 set_fs(old_fs);
11518 return ret;
11519 }
11520@@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
11521 return -EFAULT;
11522
11523 set_fs(KERNEL_DS);
11524- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
11525+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
11526 count);
11527 set_fs(old_fs);
11528
11529diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
11530index 372231c..a5aa1a1 100644
11531--- a/arch/x86/include/asm/alternative-asm.h
11532+++ b/arch/x86/include/asm/alternative-asm.h
11533@@ -18,6 +18,45 @@
11534 .endm
11535 #endif
11536
11537+#ifdef KERNEXEC_PLUGIN
11538+ .macro pax_force_retaddr_bts rip=0
11539+ btsq $63,\rip(%rsp)
11540+ .endm
11541+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11542+ .macro pax_force_retaddr rip=0, reload=0
11543+ btsq $63,\rip(%rsp)
11544+ .endm
11545+ .macro pax_force_fptr ptr
11546+ btsq $63,\ptr
11547+ .endm
11548+ .macro pax_set_fptr_mask
11549+ .endm
11550+#endif
11551+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
11552+ .macro pax_force_retaddr rip=0, reload=0
11553+ .if \reload
11554+ pax_set_fptr_mask
11555+ .endif
11556+ orq %r10,\rip(%rsp)
11557+ .endm
11558+ .macro pax_force_fptr ptr
11559+ orq %r10,\ptr
11560+ .endm
11561+ .macro pax_set_fptr_mask
11562+ movabs $0x8000000000000000,%r10
11563+ .endm
11564+#endif
11565+#else
11566+ .macro pax_force_retaddr rip=0, reload=0
11567+ .endm
11568+ .macro pax_force_fptr ptr
11569+ .endm
11570+ .macro pax_force_retaddr_bts rip=0
11571+ .endm
11572+ .macro pax_set_fptr_mask
11573+ .endm
11574+#endif
11575+
11576 .macro altinstruction_entry orig alt feature orig_len alt_len
11577 .long \orig - .
11578 .long \alt - .
11579diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
11580index 58ed6d9..f1cbe58 100644
11581--- a/arch/x86/include/asm/alternative.h
11582+++ b/arch/x86/include/asm/alternative.h
11583@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11584 ".pushsection .discard,\"aw\",@progbits\n" \
11585 DISCARD_ENTRY(1) \
11586 ".popsection\n" \
11587- ".pushsection .altinstr_replacement, \"ax\"\n" \
11588+ ".pushsection .altinstr_replacement, \"a\"\n" \
11589 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
11590 ".popsection"
11591
11592@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11593 DISCARD_ENTRY(1) \
11594 DISCARD_ENTRY(2) \
11595 ".popsection\n" \
11596- ".pushsection .altinstr_replacement, \"ax\"\n" \
11597+ ".pushsection .altinstr_replacement, \"a\"\n" \
11598 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
11599 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
11600 ".popsection"
11601diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
11602index 3388034..050f0b9 100644
11603--- a/arch/x86/include/asm/apic.h
11604+++ b/arch/x86/include/asm/apic.h
11605@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
11606
11607 #ifdef CONFIG_X86_LOCAL_APIC
11608
11609-extern unsigned int apic_verbosity;
11610+extern int apic_verbosity;
11611 extern int local_apic_timer_c2_ok;
11612
11613 extern int disable_apic;
11614diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
11615index 20370c6..a2eb9b0 100644
11616--- a/arch/x86/include/asm/apm.h
11617+++ b/arch/x86/include/asm/apm.h
11618@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
11619 __asm__ __volatile__(APM_DO_ZERO_SEGS
11620 "pushl %%edi\n\t"
11621 "pushl %%ebp\n\t"
11622- "lcall *%%cs:apm_bios_entry\n\t"
11623+ "lcall *%%ss:apm_bios_entry\n\t"
11624 "setc %%al\n\t"
11625 "popl %%ebp\n\t"
11626 "popl %%edi\n\t"
11627@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
11628 __asm__ __volatile__(APM_DO_ZERO_SEGS
11629 "pushl %%edi\n\t"
11630 "pushl %%ebp\n\t"
11631- "lcall *%%cs:apm_bios_entry\n\t"
11632+ "lcall *%%ss:apm_bios_entry\n\t"
11633 "setc %%bl\n\t"
11634 "popl %%ebp\n\t"
11635 "popl %%edi\n\t"
11636diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
11637index 722aa3b..3a0bb27 100644
11638--- a/arch/x86/include/asm/atomic.h
11639+++ b/arch/x86/include/asm/atomic.h
11640@@ -22,7 +22,18 @@
11641 */
11642 static inline int atomic_read(const atomic_t *v)
11643 {
11644- return (*(volatile int *)&(v)->counter);
11645+ return (*(volatile const int *)&(v)->counter);
11646+}
11647+
11648+/**
11649+ * atomic_read_unchecked - read atomic variable
11650+ * @v: pointer of type atomic_unchecked_t
11651+ *
11652+ * Atomically reads the value of @v.
11653+ */
11654+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
11655+{
11656+ return (*(volatile const int *)&(v)->counter);
11657 }
11658
11659 /**
11660@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
11661 }
11662
11663 /**
11664+ * atomic_set_unchecked - set atomic variable
11665+ * @v: pointer of type atomic_unchecked_t
11666+ * @i: required value
11667+ *
11668+ * Atomically sets the value of @v to @i.
11669+ */
11670+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
11671+{
11672+ v->counter = i;
11673+}
11674+
11675+/**
11676 * atomic_add - add integer to atomic variable
11677 * @i: integer value to add
11678 * @v: pointer of type atomic_t
11679@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
11680 */
11681 static inline void atomic_add(int i, atomic_t *v)
11682 {
11683- asm volatile(LOCK_PREFIX "addl %1,%0"
11684+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
11685+
11686+#ifdef CONFIG_PAX_REFCOUNT
11687+ "jno 0f\n"
11688+ LOCK_PREFIX "subl %1,%0\n"
11689+ "int $4\n0:\n"
11690+ _ASM_EXTABLE(0b, 0b)
11691+#endif
11692+
11693+ : "+m" (v->counter)
11694+ : "ir" (i));
11695+}
11696+
11697+/**
11698+ * atomic_add_unchecked - add integer to atomic variable
11699+ * @i: integer value to add
11700+ * @v: pointer of type atomic_unchecked_t
11701+ *
11702+ * Atomically adds @i to @v.
11703+ */
11704+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
11705+{
11706+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
11707 : "+m" (v->counter)
11708 : "ir" (i));
11709 }
11710@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
11711 */
11712 static inline void atomic_sub(int i, atomic_t *v)
11713 {
11714- asm volatile(LOCK_PREFIX "subl %1,%0"
11715+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
11716+
11717+#ifdef CONFIG_PAX_REFCOUNT
11718+ "jno 0f\n"
11719+ LOCK_PREFIX "addl %1,%0\n"
11720+ "int $4\n0:\n"
11721+ _ASM_EXTABLE(0b, 0b)
11722+#endif
11723+
11724+ : "+m" (v->counter)
11725+ : "ir" (i));
11726+}
11727+
11728+/**
11729+ * atomic_sub_unchecked - subtract integer from atomic variable
11730+ * @i: integer value to subtract
11731+ * @v: pointer of type atomic_unchecked_t
11732+ *
11733+ * Atomically subtracts @i from @v.
11734+ */
11735+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
11736+{
11737+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
11738 : "+m" (v->counter)
11739 : "ir" (i));
11740 }
11741@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
11742 {
11743 unsigned char c;
11744
11745- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
11746+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
11747+
11748+#ifdef CONFIG_PAX_REFCOUNT
11749+ "jno 0f\n"
11750+ LOCK_PREFIX "addl %2,%0\n"
11751+ "int $4\n0:\n"
11752+ _ASM_EXTABLE(0b, 0b)
11753+#endif
11754+
11755+ "sete %1\n"
11756 : "+m" (v->counter), "=qm" (c)
11757 : "ir" (i) : "memory");
11758 return c;
11759@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
11760 */
11761 static inline void atomic_inc(atomic_t *v)
11762 {
11763- asm volatile(LOCK_PREFIX "incl %0"
11764+ asm volatile(LOCK_PREFIX "incl %0\n"
11765+
11766+#ifdef CONFIG_PAX_REFCOUNT
11767+ "jno 0f\n"
11768+ LOCK_PREFIX "decl %0\n"
11769+ "int $4\n0:\n"
11770+ _ASM_EXTABLE(0b, 0b)
11771+#endif
11772+
11773+ : "+m" (v->counter));
11774+}
11775+
11776+/**
11777+ * atomic_inc_unchecked - increment atomic variable
11778+ * @v: pointer of type atomic_unchecked_t
11779+ *
11780+ * Atomically increments @v by 1.
11781+ */
11782+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
11783+{
11784+ asm volatile(LOCK_PREFIX "incl %0\n"
11785 : "+m" (v->counter));
11786 }
11787
11788@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
11789 */
11790 static inline void atomic_dec(atomic_t *v)
11791 {
11792- asm volatile(LOCK_PREFIX "decl %0"
11793+ asm volatile(LOCK_PREFIX "decl %0\n"
11794+
11795+#ifdef CONFIG_PAX_REFCOUNT
11796+ "jno 0f\n"
11797+ LOCK_PREFIX "incl %0\n"
11798+ "int $4\n0:\n"
11799+ _ASM_EXTABLE(0b, 0b)
11800+#endif
11801+
11802+ : "+m" (v->counter));
11803+}
11804+
11805+/**
11806+ * atomic_dec_unchecked - decrement atomic variable
11807+ * @v: pointer of type atomic_unchecked_t
11808+ *
11809+ * Atomically decrements @v by 1.
11810+ */
11811+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
11812+{
11813+ asm volatile(LOCK_PREFIX "decl %0\n"
11814 : "+m" (v->counter));
11815 }
11816
11817@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
11818 {
11819 unsigned char c;
11820
11821- asm volatile(LOCK_PREFIX "decl %0; sete %1"
11822+ asm volatile(LOCK_PREFIX "decl %0\n"
11823+
11824+#ifdef CONFIG_PAX_REFCOUNT
11825+ "jno 0f\n"
11826+ LOCK_PREFIX "incl %0\n"
11827+ "int $4\n0:\n"
11828+ _ASM_EXTABLE(0b, 0b)
11829+#endif
11830+
11831+ "sete %1\n"
11832 : "+m" (v->counter), "=qm" (c)
11833 : : "memory");
11834 return c != 0;
11835@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
11836 {
11837 unsigned char c;
11838
11839- asm volatile(LOCK_PREFIX "incl %0; sete %1"
11840+ asm volatile(LOCK_PREFIX "incl %0\n"
11841+
11842+#ifdef CONFIG_PAX_REFCOUNT
11843+ "jno 0f\n"
11844+ LOCK_PREFIX "decl %0\n"
11845+ "int $4\n0:\n"
11846+ _ASM_EXTABLE(0b, 0b)
11847+#endif
11848+
11849+ "sete %1\n"
11850+ : "+m" (v->counter), "=qm" (c)
11851+ : : "memory");
11852+ return c != 0;
11853+}
11854+
11855+/**
11856+ * atomic_inc_and_test_unchecked - increment and test
11857+ * @v: pointer of type atomic_unchecked_t
11858+ *
11859+ * Atomically increments @v by 1
11860+ * and returns true if the result is zero, or false for all
11861+ * other cases.
11862+ */
11863+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
11864+{
11865+ unsigned char c;
11866+
11867+ asm volatile(LOCK_PREFIX "incl %0\n"
11868+ "sete %1\n"
11869 : "+m" (v->counter), "=qm" (c)
11870 : : "memory");
11871 return c != 0;
11872@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
11873 {
11874 unsigned char c;
11875
11876- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
11877+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
11878+
11879+#ifdef CONFIG_PAX_REFCOUNT
11880+ "jno 0f\n"
11881+ LOCK_PREFIX "subl %2,%0\n"
11882+ "int $4\n0:\n"
11883+ _ASM_EXTABLE(0b, 0b)
11884+#endif
11885+
11886+ "sets %1\n"
11887 : "+m" (v->counter), "=qm" (c)
11888 : "ir" (i) : "memory");
11889 return c;
11890@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
11891 */
11892 static inline int atomic_add_return(int i, atomic_t *v)
11893 {
11894+ return i + xadd_check_overflow(&v->counter, i);
11895+}
11896+
11897+/**
11898+ * atomic_add_return_unchecked - add integer and return
11899+ * @i: integer value to add
11900+ * @v: pointer of type atomic_unchecked_t
11901+ *
11902+ * Atomically adds @i to @v and returns @i + @v
11903+ */
11904+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
11905+{
11906 return i + xadd(&v->counter, i);
11907 }
11908
11909@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
11910 }
11911
11912 #define atomic_inc_return(v) (atomic_add_return(1, v))
11913+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
11914+{
11915+ return atomic_add_return_unchecked(1, v);
11916+}
11917 #define atomic_dec_return(v) (atomic_sub_return(1, v))
11918
11919 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
11920@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
11921 return cmpxchg(&v->counter, old, new);
11922 }
11923
11924+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
11925+{
11926+ return cmpxchg(&v->counter, old, new);
11927+}
11928+
11929 static inline int atomic_xchg(atomic_t *v, int new)
11930 {
11931 return xchg(&v->counter, new);
11932 }
11933
11934+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
11935+{
11936+ return xchg(&v->counter, new);
11937+}
11938+
11939 /**
11940 * __atomic_add_unless - add unless the number is already a given value
11941 * @v: pointer of type atomic_t
11942@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
11943 */
11944 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
11945 {
11946- int c, old;
11947+ int c, old, new;
11948 c = atomic_read(v);
11949 for (;;) {
11950- if (unlikely(c == (u)))
11951+ if (unlikely(c == u))
11952 break;
11953- old = atomic_cmpxchg((v), c, c + (a));
11954+
11955+ asm volatile("addl %2,%0\n"
11956+
11957+#ifdef CONFIG_PAX_REFCOUNT
11958+ "jno 0f\n"
11959+ "subl %2,%0\n"
11960+ "int $4\n0:\n"
11961+ _ASM_EXTABLE(0b, 0b)
11962+#endif
11963+
11964+ : "=r" (new)
11965+ : "0" (c), "ir" (a));
11966+
11967+ old = atomic_cmpxchg(v, c, new);
11968 if (likely(old == c))
11969 break;
11970 c = old;
11971@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
11972 }
11973
11974 /**
11975+ * atomic_inc_not_zero_hint - increment if not null
11976+ * @v: pointer of type atomic_t
11977+ * @hint: probable value of the atomic before the increment
11978+ *
11979+ * This version of atomic_inc_not_zero() gives a hint of probable
11980+ * value of the atomic. This helps processor to not read the memory
11981+ * before doing the atomic read/modify/write cycle, lowering
11982+ * number of bus transactions on some arches.
11983+ *
11984+ * Returns: 0 if increment was not done, 1 otherwise.
11985+ */
11986+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
11987+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
11988+{
11989+ int val, c = hint, new;
11990+
11991+ /* sanity test, should be removed by compiler if hint is a constant */
11992+ if (!hint)
11993+ return __atomic_add_unless(v, 1, 0);
11994+
11995+ do {
11996+ asm volatile("incl %0\n"
11997+
11998+#ifdef CONFIG_PAX_REFCOUNT
11999+ "jno 0f\n"
12000+ "decl %0\n"
12001+ "int $4\n0:\n"
12002+ _ASM_EXTABLE(0b, 0b)
12003+#endif
12004+
12005+ : "=r" (new)
12006+ : "0" (c));
12007+
12008+ val = atomic_cmpxchg(v, c, new);
12009+ if (val == c)
12010+ return 1;
12011+ c = val;
12012+ } while (c);
12013+
12014+ return 0;
12015+}
12016+
12017+/**
12018 * atomic_inc_short - increment of a short integer
12019 * @v: pointer to type int
12020 *
12021@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
12022 #endif
12023
12024 /* These are x86-specific, used by some header files */
12025-#define atomic_clear_mask(mask, addr) \
12026- asm volatile(LOCK_PREFIX "andl %0,%1" \
12027- : : "r" (~(mask)), "m" (*(addr)) : "memory")
12028+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
12029+{
12030+ asm volatile(LOCK_PREFIX "andl %1,%0"
12031+ : "+m" (v->counter)
12032+ : "r" (~(mask))
12033+ : "memory");
12034+}
12035
12036-#define atomic_set_mask(mask, addr) \
12037- asm volatile(LOCK_PREFIX "orl %0,%1" \
12038- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
12039- : "memory")
12040+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12041+{
12042+ asm volatile(LOCK_PREFIX "andl %1,%0"
12043+ : "+m" (v->counter)
12044+ : "r" (~(mask))
12045+ : "memory");
12046+}
12047+
12048+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
12049+{
12050+ asm volatile(LOCK_PREFIX "orl %1,%0"
12051+ : "+m" (v->counter)
12052+ : "r" (mask)
12053+ : "memory");
12054+}
12055+
12056+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12057+{
12058+ asm volatile(LOCK_PREFIX "orl %1,%0"
12059+ : "+m" (v->counter)
12060+ : "r" (mask)
12061+ : "memory");
12062+}
12063
12064 /* Atomic operations are already serializing on x86 */
12065 #define smp_mb__before_atomic_dec() barrier()
12066diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
12067index b154de7..aadebd8 100644
12068--- a/arch/x86/include/asm/atomic64_32.h
12069+++ b/arch/x86/include/asm/atomic64_32.h
12070@@ -12,6 +12,14 @@ typedef struct {
12071 u64 __aligned(8) counter;
12072 } atomic64_t;
12073
12074+#ifdef CONFIG_PAX_REFCOUNT
12075+typedef struct {
12076+ u64 __aligned(8) counter;
12077+} atomic64_unchecked_t;
12078+#else
12079+typedef atomic64_t atomic64_unchecked_t;
12080+#endif
12081+
12082 #define ATOMIC64_INIT(val) { (val) }
12083
12084 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
12085@@ -37,21 +45,31 @@ typedef struct {
12086 ATOMIC64_DECL_ONE(sym##_386)
12087
12088 ATOMIC64_DECL_ONE(add_386);
12089+ATOMIC64_DECL_ONE(add_unchecked_386);
12090 ATOMIC64_DECL_ONE(sub_386);
12091+ATOMIC64_DECL_ONE(sub_unchecked_386);
12092 ATOMIC64_DECL_ONE(inc_386);
12093+ATOMIC64_DECL_ONE(inc_unchecked_386);
12094 ATOMIC64_DECL_ONE(dec_386);
12095+ATOMIC64_DECL_ONE(dec_unchecked_386);
12096 #endif
12097
12098 #define alternative_atomic64(f, out, in...) \
12099 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
12100
12101 ATOMIC64_DECL(read);
12102+ATOMIC64_DECL(read_unchecked);
12103 ATOMIC64_DECL(set);
12104+ATOMIC64_DECL(set_unchecked);
12105 ATOMIC64_DECL(xchg);
12106 ATOMIC64_DECL(add_return);
12107+ATOMIC64_DECL(add_return_unchecked);
12108 ATOMIC64_DECL(sub_return);
12109+ATOMIC64_DECL(sub_return_unchecked);
12110 ATOMIC64_DECL(inc_return);
12111+ATOMIC64_DECL(inc_return_unchecked);
12112 ATOMIC64_DECL(dec_return);
12113+ATOMIC64_DECL(dec_return_unchecked);
12114 ATOMIC64_DECL(dec_if_positive);
12115 ATOMIC64_DECL(inc_not_zero);
12116 ATOMIC64_DECL(add_unless);
12117@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
12118 }
12119
12120 /**
12121+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
12122+ * @p: pointer to type atomic64_unchecked_t
12123+ * @o: expected value
12124+ * @n: new value
12125+ *
12126+ * Atomically sets @v to @n if it was equal to @o and returns
12127+ * the old value.
12128+ */
12129+
12130+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
12131+{
12132+ return cmpxchg64(&v->counter, o, n);
12133+}
12134+
12135+/**
12136 * atomic64_xchg - xchg atomic64 variable
12137 * @v: pointer to type atomic64_t
12138 * @n: value to assign
12139@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
12140 }
12141
12142 /**
12143+ * atomic64_set_unchecked - set atomic64 variable
12144+ * @v: pointer to type atomic64_unchecked_t
12145+ * @n: value to assign
12146+ *
12147+ * Atomically sets the value of @v to @n.
12148+ */
12149+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
12150+{
12151+ unsigned high = (unsigned)(i >> 32);
12152+ unsigned low = (unsigned)i;
12153+ alternative_atomic64(set, /* no output */,
12154+ "S" (v), "b" (low), "c" (high)
12155+ : "eax", "edx", "memory");
12156+}
12157+
12158+/**
12159 * atomic64_read - read atomic64 variable
12160 * @v: pointer to type atomic64_t
12161 *
12162@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
12163 }
12164
12165 /**
12166+ * atomic64_read_unchecked - read atomic64 variable
12167+ * @v: pointer to type atomic64_unchecked_t
12168+ *
12169+ * Atomically reads the value of @v and returns it.
12170+ */
12171+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
12172+{
12173+ long long r;
12174+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
12175+ return r;
12176+ }
12177+
12178+/**
12179 * atomic64_add_return - add and return
12180 * @i: integer value to add
12181 * @v: pointer to type atomic64_t
12182@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
12183 return i;
12184 }
12185
12186+/**
12187+ * atomic64_add_return_unchecked - add and return
12188+ * @i: integer value to add
12189+ * @v: pointer to type atomic64_unchecked_t
12190+ *
12191+ * Atomically adds @i to @v and returns @i + *@v
12192+ */
12193+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
12194+{
12195+ alternative_atomic64(add_return_unchecked,
12196+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12197+ ASM_NO_INPUT_CLOBBER("memory"));
12198+ return i;
12199+}
12200+
12201 /*
12202 * Other variants with different arithmetic operators:
12203 */
12204@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
12205 return a;
12206 }
12207
12208+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12209+{
12210+ long long a;
12211+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
12212+ "S" (v) : "memory", "ecx");
12213+ return a;
12214+}
12215+
12216 static inline long long atomic64_dec_return(atomic64_t *v)
12217 {
12218 long long a;
12219@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
12220 }
12221
12222 /**
12223+ * atomic64_add_unchecked - add integer to atomic64 variable
12224+ * @i: integer value to add
12225+ * @v: pointer to type atomic64_unchecked_t
12226+ *
12227+ * Atomically adds @i to @v.
12228+ */
12229+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
12230+{
12231+ __alternative_atomic64(add_unchecked, add_return_unchecked,
12232+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12233+ ASM_NO_INPUT_CLOBBER("memory"));
12234+ return i;
12235+}
12236+
12237+/**
12238 * atomic64_sub - subtract the atomic64 variable
12239 * @i: integer value to subtract
12240 * @v: pointer to type atomic64_t
12241diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
12242index 0e1cbfc..5623683 100644
12243--- a/arch/x86/include/asm/atomic64_64.h
12244+++ b/arch/x86/include/asm/atomic64_64.h
12245@@ -18,7 +18,19 @@
12246 */
12247 static inline long atomic64_read(const atomic64_t *v)
12248 {
12249- return (*(volatile long *)&(v)->counter);
12250+ return (*(volatile const long *)&(v)->counter);
12251+}
12252+
12253+/**
12254+ * atomic64_read_unchecked - read atomic64 variable
12255+ * @v: pointer of type atomic64_unchecked_t
12256+ *
12257+ * Atomically reads the value of @v.
12258+ * Doesn't imply a read memory barrier.
12259+ */
12260+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
12261+{
12262+ return (*(volatile const long *)&(v)->counter);
12263 }
12264
12265 /**
12266@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
12267 }
12268
12269 /**
12270+ * atomic64_set_unchecked - set atomic64 variable
12271+ * @v: pointer to type atomic64_unchecked_t
12272+ * @i: required value
12273+ *
12274+ * Atomically sets the value of @v to @i.
12275+ */
12276+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
12277+{
12278+ v->counter = i;
12279+}
12280+
12281+/**
12282 * atomic64_add - add integer to atomic64 variable
12283 * @i: integer value to add
12284 * @v: pointer to type atomic64_t
12285@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
12286 */
12287 static inline void atomic64_add(long i, atomic64_t *v)
12288 {
12289+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
12290+
12291+#ifdef CONFIG_PAX_REFCOUNT
12292+ "jno 0f\n"
12293+ LOCK_PREFIX "subq %1,%0\n"
12294+ "int $4\n0:\n"
12295+ _ASM_EXTABLE(0b, 0b)
12296+#endif
12297+
12298+ : "=m" (v->counter)
12299+ : "er" (i), "m" (v->counter));
12300+}
12301+
12302+/**
12303+ * atomic64_add_unchecked - add integer to atomic64 variable
12304+ * @i: integer value to add
12305+ * @v: pointer to type atomic64_unchecked_t
12306+ *
12307+ * Atomically adds @i to @v.
12308+ */
12309+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
12310+{
12311 asm volatile(LOCK_PREFIX "addq %1,%0"
12312 : "=m" (v->counter)
12313 : "er" (i), "m" (v->counter));
12314@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
12315 */
12316 static inline void atomic64_sub(long i, atomic64_t *v)
12317 {
12318- asm volatile(LOCK_PREFIX "subq %1,%0"
12319+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12320+
12321+#ifdef CONFIG_PAX_REFCOUNT
12322+ "jno 0f\n"
12323+ LOCK_PREFIX "addq %1,%0\n"
12324+ "int $4\n0:\n"
12325+ _ASM_EXTABLE(0b, 0b)
12326+#endif
12327+
12328+ : "=m" (v->counter)
12329+ : "er" (i), "m" (v->counter));
12330+}
12331+
12332+/**
12333+ * atomic64_sub_unchecked - subtract the atomic64 variable
12334+ * @i: integer value to subtract
12335+ * @v: pointer to type atomic64_unchecked_t
12336+ *
12337+ * Atomically subtracts @i from @v.
12338+ */
12339+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
12340+{
12341+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12342 : "=m" (v->counter)
12343 : "er" (i), "m" (v->counter));
12344 }
12345@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12346 {
12347 unsigned char c;
12348
12349- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
12350+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
12351+
12352+#ifdef CONFIG_PAX_REFCOUNT
12353+ "jno 0f\n"
12354+ LOCK_PREFIX "addq %2,%0\n"
12355+ "int $4\n0:\n"
12356+ _ASM_EXTABLE(0b, 0b)
12357+#endif
12358+
12359+ "sete %1\n"
12360 : "=m" (v->counter), "=qm" (c)
12361 : "er" (i), "m" (v->counter) : "memory");
12362 return c;
12363@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12364 */
12365 static inline void atomic64_inc(atomic64_t *v)
12366 {
12367+ asm volatile(LOCK_PREFIX "incq %0\n"
12368+
12369+#ifdef CONFIG_PAX_REFCOUNT
12370+ "jno 0f\n"
12371+ LOCK_PREFIX "decq %0\n"
12372+ "int $4\n0:\n"
12373+ _ASM_EXTABLE(0b, 0b)
12374+#endif
12375+
12376+ : "=m" (v->counter)
12377+ : "m" (v->counter));
12378+}
12379+
12380+/**
12381+ * atomic64_inc_unchecked - increment atomic64 variable
12382+ * @v: pointer to type atomic64_unchecked_t
12383+ *
12384+ * Atomically increments @v by 1.
12385+ */
12386+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
12387+{
12388 asm volatile(LOCK_PREFIX "incq %0"
12389 : "=m" (v->counter)
12390 : "m" (v->counter));
12391@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
12392 */
12393 static inline void atomic64_dec(atomic64_t *v)
12394 {
12395- asm volatile(LOCK_PREFIX "decq %0"
12396+ asm volatile(LOCK_PREFIX "decq %0\n"
12397+
12398+#ifdef CONFIG_PAX_REFCOUNT
12399+ "jno 0f\n"
12400+ LOCK_PREFIX "incq %0\n"
12401+ "int $4\n0:\n"
12402+ _ASM_EXTABLE(0b, 0b)
12403+#endif
12404+
12405+ : "=m" (v->counter)
12406+ : "m" (v->counter));
12407+}
12408+
12409+/**
12410+ * atomic64_dec_unchecked - decrement atomic64 variable
12411+ * @v: pointer to type atomic64_t
12412+ *
12413+ * Atomically decrements @v by 1.
12414+ */
12415+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
12416+{
12417+ asm volatile(LOCK_PREFIX "decq %0\n"
12418 : "=m" (v->counter)
12419 : "m" (v->counter));
12420 }
12421@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
12422 {
12423 unsigned char c;
12424
12425- asm volatile(LOCK_PREFIX "decq %0; sete %1"
12426+ asm volatile(LOCK_PREFIX "decq %0\n"
12427+
12428+#ifdef CONFIG_PAX_REFCOUNT
12429+ "jno 0f\n"
12430+ LOCK_PREFIX "incq %0\n"
12431+ "int $4\n0:\n"
12432+ _ASM_EXTABLE(0b, 0b)
12433+#endif
12434+
12435+ "sete %1\n"
12436 : "=m" (v->counter), "=qm" (c)
12437 : "m" (v->counter) : "memory");
12438 return c != 0;
12439@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
12440 {
12441 unsigned char c;
12442
12443- asm volatile(LOCK_PREFIX "incq %0; sete %1"
12444+ asm volatile(LOCK_PREFIX "incq %0\n"
12445+
12446+#ifdef CONFIG_PAX_REFCOUNT
12447+ "jno 0f\n"
12448+ LOCK_PREFIX "decq %0\n"
12449+ "int $4\n0:\n"
12450+ _ASM_EXTABLE(0b, 0b)
12451+#endif
12452+
12453+ "sete %1\n"
12454 : "=m" (v->counter), "=qm" (c)
12455 : "m" (v->counter) : "memory");
12456 return c != 0;
12457@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12458 {
12459 unsigned char c;
12460
12461- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
12462+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
12463+
12464+#ifdef CONFIG_PAX_REFCOUNT
12465+ "jno 0f\n"
12466+ LOCK_PREFIX "subq %2,%0\n"
12467+ "int $4\n0:\n"
12468+ _ASM_EXTABLE(0b, 0b)
12469+#endif
12470+
12471+ "sets %1\n"
12472 : "=m" (v->counter), "=qm" (c)
12473 : "er" (i), "m" (v->counter) : "memory");
12474 return c;
12475@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12476 */
12477 static inline long atomic64_add_return(long i, atomic64_t *v)
12478 {
12479+ return i + xadd_check_overflow(&v->counter, i);
12480+}
12481+
12482+/**
12483+ * atomic64_add_return_unchecked - add and return
12484+ * @i: integer value to add
12485+ * @v: pointer to type atomic64_unchecked_t
12486+ *
12487+ * Atomically adds @i to @v and returns @i + @v
12488+ */
12489+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
12490+{
12491 return i + xadd(&v->counter, i);
12492 }
12493
12494@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
12495 }
12496
12497 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
12498+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12499+{
12500+ return atomic64_add_return_unchecked(1, v);
12501+}
12502 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
12503
12504 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12505@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12506 return cmpxchg(&v->counter, old, new);
12507 }
12508
12509+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
12510+{
12511+ return cmpxchg(&v->counter, old, new);
12512+}
12513+
12514 static inline long atomic64_xchg(atomic64_t *v, long new)
12515 {
12516 return xchg(&v->counter, new);
12517@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
12518 */
12519 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
12520 {
12521- long c, old;
12522+ long c, old, new;
12523 c = atomic64_read(v);
12524 for (;;) {
12525- if (unlikely(c == (u)))
12526+ if (unlikely(c == u))
12527 break;
12528- old = atomic64_cmpxchg((v), c, c + (a));
12529+
12530+ asm volatile("add %2,%0\n"
12531+
12532+#ifdef CONFIG_PAX_REFCOUNT
12533+ "jno 0f\n"
12534+ "sub %2,%0\n"
12535+ "int $4\n0:\n"
12536+ _ASM_EXTABLE(0b, 0b)
12537+#endif
12538+
12539+ : "=r" (new)
12540+ : "0" (c), "ir" (a));
12541+
12542+ old = atomic64_cmpxchg(v, c, new);
12543 if (likely(old == c))
12544 break;
12545 c = old;
12546 }
12547- return c != (u);
12548+ return c != u;
12549 }
12550
12551 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12552diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
12553index 6dfd019..0c6699f 100644
12554--- a/arch/x86/include/asm/bitops.h
12555+++ b/arch/x86/include/asm/bitops.h
12556@@ -40,7 +40,7 @@
12557 * a mask operation on a byte.
12558 */
12559 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
12560-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
12561+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
12562 #define CONST_MASK(nr) (1 << ((nr) & 7))
12563
12564 /**
12565diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
12566index 4fa687a..60f2d39 100644
12567--- a/arch/x86/include/asm/boot.h
12568+++ b/arch/x86/include/asm/boot.h
12569@@ -6,10 +6,15 @@
12570 #include <uapi/asm/boot.h>
12571
12572 /* Physical address where kernel should be loaded. */
12573-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12574+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12575 + (CONFIG_PHYSICAL_ALIGN - 1)) \
12576 & ~(CONFIG_PHYSICAL_ALIGN - 1))
12577
12578+#ifndef __ASSEMBLY__
12579+extern unsigned char __LOAD_PHYSICAL_ADDR[];
12580+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
12581+#endif
12582+
12583 /* Minimum kernel alignment, as a power of two */
12584 #ifdef CONFIG_X86_64
12585 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
12586diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
12587index 48f99f1..d78ebf9 100644
12588--- a/arch/x86/include/asm/cache.h
12589+++ b/arch/x86/include/asm/cache.h
12590@@ -5,12 +5,13 @@
12591
12592 /* L1 cache line size */
12593 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12594-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12595+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12596
12597 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
12598+#define __read_only __attribute__((__section__(".data..read_only")))
12599
12600 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
12601-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
12602+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
12603
12604 #ifdef CONFIG_X86_VSMP
12605 #ifdef CONFIG_SMP
12606diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
12607index 9863ee3..4a1f8e1 100644
12608--- a/arch/x86/include/asm/cacheflush.h
12609+++ b/arch/x86/include/asm/cacheflush.h
12610@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
12611 unsigned long pg_flags = pg->flags & _PGMT_MASK;
12612
12613 if (pg_flags == _PGMT_DEFAULT)
12614- return -1;
12615+ return ~0UL;
12616 else if (pg_flags == _PGMT_WC)
12617 return _PAGE_CACHE_WC;
12618 else if (pg_flags == _PGMT_UC_MINUS)
12619diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
12620index 46fc474..b02b0f9 100644
12621--- a/arch/x86/include/asm/checksum_32.h
12622+++ b/arch/x86/include/asm/checksum_32.h
12623@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
12624 int len, __wsum sum,
12625 int *src_err_ptr, int *dst_err_ptr);
12626
12627+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
12628+ int len, __wsum sum,
12629+ int *src_err_ptr, int *dst_err_ptr);
12630+
12631+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
12632+ int len, __wsum sum,
12633+ int *src_err_ptr, int *dst_err_ptr);
12634+
12635 /*
12636 * Note: when you get a NULL pointer exception here this means someone
12637 * passed in an incorrect kernel address to one of these functions.
12638@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
12639 int *err_ptr)
12640 {
12641 might_sleep();
12642- return csum_partial_copy_generic((__force void *)src, dst,
12643+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
12644 len, sum, err_ptr, NULL);
12645 }
12646
12647@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
12648 {
12649 might_sleep();
12650 if (access_ok(VERIFY_WRITE, dst, len))
12651- return csum_partial_copy_generic(src, (__force void *)dst,
12652+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
12653 len, sum, NULL, err_ptr);
12654
12655 if (len)
12656diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
12657index 8d871ea..c1a0dc9 100644
12658--- a/arch/x86/include/asm/cmpxchg.h
12659+++ b/arch/x86/include/asm/cmpxchg.h
12660@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
12661 __compiletime_error("Bad argument size for cmpxchg");
12662 extern void __xadd_wrong_size(void)
12663 __compiletime_error("Bad argument size for xadd");
12664+extern void __xadd_check_overflow_wrong_size(void)
12665+ __compiletime_error("Bad argument size for xadd_check_overflow");
12666 extern void __add_wrong_size(void)
12667 __compiletime_error("Bad argument size for add");
12668+extern void __add_check_overflow_wrong_size(void)
12669+ __compiletime_error("Bad argument size for add_check_overflow");
12670
12671 /*
12672 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
12673@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
12674 __ret; \
12675 })
12676
12677+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
12678+ ({ \
12679+ __typeof__ (*(ptr)) __ret = (arg); \
12680+ switch (sizeof(*(ptr))) { \
12681+ case __X86_CASE_L: \
12682+ asm volatile (lock #op "l %0, %1\n" \
12683+ "jno 0f\n" \
12684+ "mov %0,%1\n" \
12685+ "int $4\n0:\n" \
12686+ _ASM_EXTABLE(0b, 0b) \
12687+ : "+r" (__ret), "+m" (*(ptr)) \
12688+ : : "memory", "cc"); \
12689+ break; \
12690+ case __X86_CASE_Q: \
12691+ asm volatile (lock #op "q %q0, %1\n" \
12692+ "jno 0f\n" \
12693+ "mov %0,%1\n" \
12694+ "int $4\n0:\n" \
12695+ _ASM_EXTABLE(0b, 0b) \
12696+ : "+r" (__ret), "+m" (*(ptr)) \
12697+ : : "memory", "cc"); \
12698+ break; \
12699+ default: \
12700+ __ ## op ## _check_overflow_wrong_size(); \
12701+ } \
12702+ __ret; \
12703+ })
12704+
12705 /*
12706 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
12707 * Since this is generally used to protect other memory information, we
12708@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
12709 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
12710 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
12711
12712+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
12713+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
12714+
12715 #define __add(ptr, inc, lock) \
12716 ({ \
12717 __typeof__ (*(ptr)) __ret = (inc); \
12718diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
12719index 2d9075e..b75a844 100644
12720--- a/arch/x86/include/asm/cpufeature.h
12721+++ b/arch/x86/include/asm/cpufeature.h
12722@@ -206,7 +206,7 @@
12723 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
12724 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
12725 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
12726-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
12727+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
12728 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
12729 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
12730 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
12731@@ -375,7 +375,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
12732 ".section .discard,\"aw\",@progbits\n"
12733 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
12734 ".previous\n"
12735- ".section .altinstr_replacement,\"ax\"\n"
12736+ ".section .altinstr_replacement,\"a\"\n"
12737 "3: movb $1,%0\n"
12738 "4:\n"
12739 ".previous\n"
12740diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
12741index 8bf1c06..f723dfd 100644
12742--- a/arch/x86/include/asm/desc.h
12743+++ b/arch/x86/include/asm/desc.h
12744@@ -4,6 +4,7 @@
12745 #include <asm/desc_defs.h>
12746 #include <asm/ldt.h>
12747 #include <asm/mmu.h>
12748+#include <asm/pgtable.h>
12749
12750 #include <linux/smp.h>
12751 #include <linux/percpu.h>
12752@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
12753
12754 desc->type = (info->read_exec_only ^ 1) << 1;
12755 desc->type |= info->contents << 2;
12756+ desc->type |= info->seg_not_present ^ 1;
12757
12758 desc->s = 1;
12759 desc->dpl = 0x3;
12760@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
12761 }
12762
12763 extern struct desc_ptr idt_descr;
12764-extern gate_desc idt_table[];
12765 extern struct desc_ptr nmi_idt_descr;
12766-extern gate_desc nmi_idt_table[];
12767-
12768-struct gdt_page {
12769- struct desc_struct gdt[GDT_ENTRIES];
12770-} __attribute__((aligned(PAGE_SIZE)));
12771-
12772-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
12773+extern gate_desc idt_table[256];
12774+extern gate_desc nmi_idt_table[256];
12775
12776+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
12777 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
12778 {
12779- return per_cpu(gdt_page, cpu).gdt;
12780+ return cpu_gdt_table[cpu];
12781 }
12782
12783 #ifdef CONFIG_X86_64
12784@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
12785 unsigned long base, unsigned dpl, unsigned flags,
12786 unsigned short seg)
12787 {
12788- gate->a = (seg << 16) | (base & 0xffff);
12789- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
12790+ gate->gate.offset_low = base;
12791+ gate->gate.seg = seg;
12792+ gate->gate.reserved = 0;
12793+ gate->gate.type = type;
12794+ gate->gate.s = 0;
12795+ gate->gate.dpl = dpl;
12796+ gate->gate.p = 1;
12797+ gate->gate.offset_high = base >> 16;
12798 }
12799
12800 #endif
12801@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
12802
12803 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
12804 {
12805+ pax_open_kernel();
12806 memcpy(&idt[entry], gate, sizeof(*gate));
12807+ pax_close_kernel();
12808 }
12809
12810 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
12811 {
12812+ pax_open_kernel();
12813 memcpy(&ldt[entry], desc, 8);
12814+ pax_close_kernel();
12815 }
12816
12817 static inline void
12818@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
12819 default: size = sizeof(*gdt); break;
12820 }
12821
12822+ pax_open_kernel();
12823 memcpy(&gdt[entry], desc, size);
12824+ pax_close_kernel();
12825 }
12826
12827 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
12828@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
12829
12830 static inline void native_load_tr_desc(void)
12831 {
12832+ pax_open_kernel();
12833 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
12834+ pax_close_kernel();
12835 }
12836
12837 static inline void native_load_gdt(const struct desc_ptr *dtr)
12838@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
12839 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
12840 unsigned int i;
12841
12842+ pax_open_kernel();
12843 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
12844 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
12845+ pax_close_kernel();
12846 }
12847
12848 #define _LDT_empty(info) \
12849@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
12850 }
12851
12852 #ifdef CONFIG_X86_64
12853-static inline void set_nmi_gate(int gate, void *addr)
12854+static inline void set_nmi_gate(int gate, const void *addr)
12855 {
12856 gate_desc s;
12857
12858@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
12859 }
12860 #endif
12861
12862-static inline void _set_gate(int gate, unsigned type, void *addr,
12863+static inline void _set_gate(int gate, unsigned type, const void *addr,
12864 unsigned dpl, unsigned ist, unsigned seg)
12865 {
12866 gate_desc s;
12867@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
12868 * Pentium F0 0F bugfix can have resulted in the mapped
12869 * IDT being write-protected.
12870 */
12871-static inline void set_intr_gate(unsigned int n, void *addr)
12872+static inline void set_intr_gate(unsigned int n, const void *addr)
12873 {
12874 BUG_ON((unsigned)n > 0xFF);
12875 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
12876@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
12877 /*
12878 * This routine sets up an interrupt gate at directory privilege level 3.
12879 */
12880-static inline void set_system_intr_gate(unsigned int n, void *addr)
12881+static inline void set_system_intr_gate(unsigned int n, const void *addr)
12882 {
12883 BUG_ON((unsigned)n > 0xFF);
12884 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
12885 }
12886
12887-static inline void set_system_trap_gate(unsigned int n, void *addr)
12888+static inline void set_system_trap_gate(unsigned int n, const void *addr)
12889 {
12890 BUG_ON((unsigned)n > 0xFF);
12891 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
12892 }
12893
12894-static inline void set_trap_gate(unsigned int n, void *addr)
12895+static inline void set_trap_gate(unsigned int n, const void *addr)
12896 {
12897 BUG_ON((unsigned)n > 0xFF);
12898 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
12899@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
12900 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
12901 {
12902 BUG_ON((unsigned)n > 0xFF);
12903- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
12904+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
12905 }
12906
12907-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
12908+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
12909 {
12910 BUG_ON((unsigned)n > 0xFF);
12911 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
12912 }
12913
12914-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
12915+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
12916 {
12917 BUG_ON((unsigned)n > 0xFF);
12918 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
12919 }
12920
12921+#ifdef CONFIG_X86_32
12922+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
12923+{
12924+ struct desc_struct d;
12925+
12926+ if (likely(limit))
12927+ limit = (limit - 1UL) >> PAGE_SHIFT;
12928+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
12929+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
12930+}
12931+#endif
12932+
12933 #endif /* _ASM_X86_DESC_H */
12934diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
12935index 278441f..b95a174 100644
12936--- a/arch/x86/include/asm/desc_defs.h
12937+++ b/arch/x86/include/asm/desc_defs.h
12938@@ -31,6 +31,12 @@ struct desc_struct {
12939 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
12940 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
12941 };
12942+ struct {
12943+ u16 offset_low;
12944+ u16 seg;
12945+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
12946+ unsigned offset_high: 16;
12947+ } gate;
12948 };
12949 } __attribute__((packed));
12950
12951diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
12952index 9c999c1..3860cb8 100644
12953--- a/arch/x86/include/asm/elf.h
12954+++ b/arch/x86/include/asm/elf.h
12955@@ -243,7 +243,25 @@ extern int force_personality32;
12956 the loader. We need to make sure that it is out of the way of the program
12957 that it will "exec", and that there is sufficient room for the brk. */
12958
12959+#ifdef CONFIG_PAX_SEGMEXEC
12960+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
12961+#else
12962 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
12963+#endif
12964+
12965+#ifdef CONFIG_PAX_ASLR
12966+#ifdef CONFIG_X86_32
12967+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
12968+
12969+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
12970+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
12971+#else
12972+#define PAX_ELF_ET_DYN_BASE 0x400000UL
12973+
12974+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
12975+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
12976+#endif
12977+#endif
12978
12979 /* This yields a mask that user programs can use to figure out what
12980 instruction set this CPU supports. This could be done in user space,
12981@@ -296,16 +314,12 @@ do { \
12982
12983 #define ARCH_DLINFO \
12984 do { \
12985- if (vdso_enabled) \
12986- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
12987- (unsigned long)current->mm->context.vdso); \
12988+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
12989 } while (0)
12990
12991 #define ARCH_DLINFO_X32 \
12992 do { \
12993- if (vdso_enabled) \
12994- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
12995- (unsigned long)current->mm->context.vdso); \
12996+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
12997 } while (0)
12998
12999 #define AT_SYSINFO 32
13000@@ -320,7 +334,7 @@ else \
13001
13002 #endif /* !CONFIG_X86_32 */
13003
13004-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
13005+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
13006
13007 #define VDSO_ENTRY \
13008 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
13009@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
13010 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
13011 #define compat_arch_setup_additional_pages syscall32_setup_pages
13012
13013-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
13014-#define arch_randomize_brk arch_randomize_brk
13015-
13016 /*
13017 * True on X86_32 or when emulating IA32 on X86_64
13018 */
13019diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
13020index 75ce3f4..882e801 100644
13021--- a/arch/x86/include/asm/emergency-restart.h
13022+++ b/arch/x86/include/asm/emergency-restart.h
13023@@ -13,6 +13,6 @@ enum reboot_type {
13024
13025 extern enum reboot_type reboot_type;
13026
13027-extern void machine_emergency_restart(void);
13028+extern void machine_emergency_restart(void) __noreturn;
13029
13030 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
13031diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
13032index 41ab26e..a88c9e6 100644
13033--- a/arch/x86/include/asm/fpu-internal.h
13034+++ b/arch/x86/include/asm/fpu-internal.h
13035@@ -126,7 +126,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
13036 ({ \
13037 int err; \
13038 asm volatile(ASM_STAC "\n" \
13039- "1:" #insn "\n\t" \
13040+ "1:" \
13041+ __copyuser_seg \
13042+ #insn "\n\t" \
13043 "2: " ASM_CLAC "\n" \
13044 ".section .fixup,\"ax\"\n" \
13045 "3: movl $-1,%[err]\n" \
13046@@ -299,7 +301,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
13047 "emms\n\t" /* clear stack tags */
13048 "fildl %P[addr]", /* set F?P to defined value */
13049 X86_FEATURE_FXSAVE_LEAK,
13050- [addr] "m" (tsk->thread.fpu.has_fpu));
13051+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
13052
13053 return fpu_restore_checking(&tsk->thread.fpu);
13054 }
13055diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
13056index be27ba1..8f13ff9 100644
13057--- a/arch/x86/include/asm/futex.h
13058+++ b/arch/x86/include/asm/futex.h
13059@@ -12,6 +12,7 @@
13060 #include <asm/smap.h>
13061
13062 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13063+ typecheck(u32 __user *, uaddr); \
13064 asm volatile("\t" ASM_STAC "\n" \
13065 "1:\t" insn "\n" \
13066 "2:\t" ASM_CLAC "\n" \
13067@@ -20,15 +21,16 @@
13068 "\tjmp\t2b\n" \
13069 "\t.previous\n" \
13070 _ASM_EXTABLE(1b, 3b) \
13071- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
13072+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
13073 : "i" (-EFAULT), "0" (oparg), "1" (0))
13074
13075 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
13076+ typecheck(u32 __user *, uaddr); \
13077 asm volatile("\t" ASM_STAC "\n" \
13078 "1:\tmovl %2, %0\n" \
13079 "\tmovl\t%0, %3\n" \
13080 "\t" insn "\n" \
13081- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
13082+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
13083 "\tjnz\t1b\n" \
13084 "3:\t" ASM_CLAC "\n" \
13085 "\t.section .fixup,\"ax\"\n" \
13086@@ -38,7 +40,7 @@
13087 _ASM_EXTABLE(1b, 4b) \
13088 _ASM_EXTABLE(2b, 4b) \
13089 : "=&a" (oldval), "=&r" (ret), \
13090- "+m" (*uaddr), "=&r" (tem) \
13091+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
13092 : "r" (oparg), "i" (-EFAULT), "1" (0))
13093
13094 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13095@@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13096
13097 switch (op) {
13098 case FUTEX_OP_SET:
13099- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
13100+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
13101 break;
13102 case FUTEX_OP_ADD:
13103- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
13104+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
13105 uaddr, oparg);
13106 break;
13107 case FUTEX_OP_OR:
13108@@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
13109 return -EFAULT;
13110
13111 asm volatile("\t" ASM_STAC "\n"
13112- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
13113+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
13114 "2:\t" ASM_CLAC "\n"
13115 "\t.section .fixup, \"ax\"\n"
13116 "3:\tmov %3, %0\n"
13117 "\tjmp 2b\n"
13118 "\t.previous\n"
13119 _ASM_EXTABLE(1b, 3b)
13120- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
13121+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
13122 : "i" (-EFAULT), "r" (newval), "1" (oldval)
13123 : "memory"
13124 );
13125diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
13126index eb92a6e..b98b2f4 100644
13127--- a/arch/x86/include/asm/hw_irq.h
13128+++ b/arch/x86/include/asm/hw_irq.h
13129@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
13130 extern void enable_IO_APIC(void);
13131
13132 /* Statistics */
13133-extern atomic_t irq_err_count;
13134-extern atomic_t irq_mis_count;
13135+extern atomic_unchecked_t irq_err_count;
13136+extern atomic_unchecked_t irq_mis_count;
13137
13138 /* EISA */
13139 extern void eisa_set_level_irq(unsigned int irq);
13140diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
13141index a203659..9889f1c 100644
13142--- a/arch/x86/include/asm/i8259.h
13143+++ b/arch/x86/include/asm/i8259.h
13144@@ -62,7 +62,7 @@ struct legacy_pic {
13145 void (*init)(int auto_eoi);
13146 int (*irq_pending)(unsigned int irq);
13147 void (*make_irq)(unsigned int irq);
13148-};
13149+} __do_const;
13150
13151 extern struct legacy_pic *legacy_pic;
13152 extern struct legacy_pic null_legacy_pic;
13153diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
13154index d8e8eef..15b1179 100644
13155--- a/arch/x86/include/asm/io.h
13156+++ b/arch/x86/include/asm/io.h
13157@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
13158 return ioremap_nocache(offset, size);
13159 }
13160
13161-extern void iounmap(volatile void __iomem *addr);
13162+extern void iounmap(const volatile void __iomem *addr);
13163
13164 extern void set_iounmap_nonlazy(void);
13165
13166@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
13167
13168 #include <linux/vmalloc.h>
13169
13170+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
13171+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
13172+{
13173+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13174+}
13175+
13176+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
13177+{
13178+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13179+}
13180+
13181 /*
13182 * Convert a virtual cached pointer to an uncached pointer
13183 */
13184diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
13185index bba3cf8..06bc8da 100644
13186--- a/arch/x86/include/asm/irqflags.h
13187+++ b/arch/x86/include/asm/irqflags.h
13188@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
13189 sti; \
13190 sysexit
13191
13192+#define GET_CR0_INTO_RDI mov %cr0, %rdi
13193+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
13194+#define GET_CR3_INTO_RDI mov %cr3, %rdi
13195+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
13196+
13197 #else
13198 #define INTERRUPT_RETURN iret
13199 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
13200diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
13201index d3ddd17..c9fb0cc 100644
13202--- a/arch/x86/include/asm/kprobes.h
13203+++ b/arch/x86/include/asm/kprobes.h
13204@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
13205 #define RELATIVEJUMP_SIZE 5
13206 #define RELATIVECALL_OPCODE 0xe8
13207 #define RELATIVE_ADDR_SIZE 4
13208-#define MAX_STACK_SIZE 64
13209-#define MIN_STACK_SIZE(ADDR) \
13210- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
13211- THREAD_SIZE - (unsigned long)(ADDR))) \
13212- ? (MAX_STACK_SIZE) \
13213- : (((unsigned long)current_thread_info()) + \
13214- THREAD_SIZE - (unsigned long)(ADDR)))
13215+#define MAX_STACK_SIZE 64UL
13216+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
13217
13218 #define flush_insn_slot(p) do { } while (0)
13219
13220diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
13221index 2d89e39..baee879 100644
13222--- a/arch/x86/include/asm/local.h
13223+++ b/arch/x86/include/asm/local.h
13224@@ -10,33 +10,97 @@ typedef struct {
13225 atomic_long_t a;
13226 } local_t;
13227
13228+typedef struct {
13229+ atomic_long_unchecked_t a;
13230+} local_unchecked_t;
13231+
13232 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13233
13234 #define local_read(l) atomic_long_read(&(l)->a)
13235+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
13236 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
13237+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
13238
13239 static inline void local_inc(local_t *l)
13240 {
13241- asm volatile(_ASM_INC "%0"
13242+ asm volatile(_ASM_INC "%0\n"
13243+
13244+#ifdef CONFIG_PAX_REFCOUNT
13245+ "jno 0f\n"
13246+ _ASM_DEC "%0\n"
13247+ "int $4\n0:\n"
13248+ _ASM_EXTABLE(0b, 0b)
13249+#endif
13250+
13251+ : "+m" (l->a.counter));
13252+}
13253+
13254+static inline void local_inc_unchecked(local_unchecked_t *l)
13255+{
13256+ asm volatile(_ASM_INC "%0\n"
13257 : "+m" (l->a.counter));
13258 }
13259
13260 static inline void local_dec(local_t *l)
13261 {
13262- asm volatile(_ASM_DEC "%0"
13263+ asm volatile(_ASM_DEC "%0\n"
13264+
13265+#ifdef CONFIG_PAX_REFCOUNT
13266+ "jno 0f\n"
13267+ _ASM_INC "%0\n"
13268+ "int $4\n0:\n"
13269+ _ASM_EXTABLE(0b, 0b)
13270+#endif
13271+
13272+ : "+m" (l->a.counter));
13273+}
13274+
13275+static inline void local_dec_unchecked(local_unchecked_t *l)
13276+{
13277+ asm volatile(_ASM_DEC "%0\n"
13278 : "+m" (l->a.counter));
13279 }
13280
13281 static inline void local_add(long i, local_t *l)
13282 {
13283- asm volatile(_ASM_ADD "%1,%0"
13284+ asm volatile(_ASM_ADD "%1,%0\n"
13285+
13286+#ifdef CONFIG_PAX_REFCOUNT
13287+ "jno 0f\n"
13288+ _ASM_SUB "%1,%0\n"
13289+ "int $4\n0:\n"
13290+ _ASM_EXTABLE(0b, 0b)
13291+#endif
13292+
13293+ : "+m" (l->a.counter)
13294+ : "ir" (i));
13295+}
13296+
13297+static inline void local_add_unchecked(long i, local_unchecked_t *l)
13298+{
13299+ asm volatile(_ASM_ADD "%1,%0\n"
13300 : "+m" (l->a.counter)
13301 : "ir" (i));
13302 }
13303
13304 static inline void local_sub(long i, local_t *l)
13305 {
13306- asm volatile(_ASM_SUB "%1,%0"
13307+ asm volatile(_ASM_SUB "%1,%0\n"
13308+
13309+#ifdef CONFIG_PAX_REFCOUNT
13310+ "jno 0f\n"
13311+ _ASM_ADD "%1,%0\n"
13312+ "int $4\n0:\n"
13313+ _ASM_EXTABLE(0b, 0b)
13314+#endif
13315+
13316+ : "+m" (l->a.counter)
13317+ : "ir" (i));
13318+}
13319+
13320+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
13321+{
13322+ asm volatile(_ASM_SUB "%1,%0\n"
13323 : "+m" (l->a.counter)
13324 : "ir" (i));
13325 }
13326@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
13327 {
13328 unsigned char c;
13329
13330- asm volatile(_ASM_SUB "%2,%0; sete %1"
13331+ asm volatile(_ASM_SUB "%2,%0\n"
13332+
13333+#ifdef CONFIG_PAX_REFCOUNT
13334+ "jno 0f\n"
13335+ _ASM_ADD "%2,%0\n"
13336+ "int $4\n0:\n"
13337+ _ASM_EXTABLE(0b, 0b)
13338+#endif
13339+
13340+ "sete %1\n"
13341 : "+m" (l->a.counter), "=qm" (c)
13342 : "ir" (i) : "memory");
13343 return c;
13344@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
13345 {
13346 unsigned char c;
13347
13348- asm volatile(_ASM_DEC "%0; sete %1"
13349+ asm volatile(_ASM_DEC "%0\n"
13350+
13351+#ifdef CONFIG_PAX_REFCOUNT
13352+ "jno 0f\n"
13353+ _ASM_INC "%0\n"
13354+ "int $4\n0:\n"
13355+ _ASM_EXTABLE(0b, 0b)
13356+#endif
13357+
13358+ "sete %1\n"
13359 : "+m" (l->a.counter), "=qm" (c)
13360 : : "memory");
13361 return c != 0;
13362@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
13363 {
13364 unsigned char c;
13365
13366- asm volatile(_ASM_INC "%0; sete %1"
13367+ asm volatile(_ASM_INC "%0\n"
13368+
13369+#ifdef CONFIG_PAX_REFCOUNT
13370+ "jno 0f\n"
13371+ _ASM_DEC "%0\n"
13372+ "int $4\n0:\n"
13373+ _ASM_EXTABLE(0b, 0b)
13374+#endif
13375+
13376+ "sete %1\n"
13377 : "+m" (l->a.counter), "=qm" (c)
13378 : : "memory");
13379 return c != 0;
13380@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
13381 {
13382 unsigned char c;
13383
13384- asm volatile(_ASM_ADD "%2,%0; sets %1"
13385+ asm volatile(_ASM_ADD "%2,%0\n"
13386+
13387+#ifdef CONFIG_PAX_REFCOUNT
13388+ "jno 0f\n"
13389+ _ASM_SUB "%2,%0\n"
13390+ "int $4\n0:\n"
13391+ _ASM_EXTABLE(0b, 0b)
13392+#endif
13393+
13394+ "sets %1\n"
13395 : "+m" (l->a.counter), "=qm" (c)
13396 : "ir" (i) : "memory");
13397 return c;
13398@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
13399 static inline long local_add_return(long i, local_t *l)
13400 {
13401 long __i = i;
13402+ asm volatile(_ASM_XADD "%0, %1\n"
13403+
13404+#ifdef CONFIG_PAX_REFCOUNT
13405+ "jno 0f\n"
13406+ _ASM_MOV "%0,%1\n"
13407+ "int $4\n0:\n"
13408+ _ASM_EXTABLE(0b, 0b)
13409+#endif
13410+
13411+ : "+r" (i), "+m" (l->a.counter)
13412+ : : "memory");
13413+ return i + __i;
13414+}
13415+
13416+/**
13417+ * local_add_return_unchecked - add and return
13418+ * @i: integer value to add
13419+ * @l: pointer to type local_unchecked_t
13420+ *
13421+ * Atomically adds @i to @l and returns @i + @l
13422+ */
13423+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
13424+{
13425+ long __i = i;
13426 asm volatile(_ASM_XADD "%0, %1;"
13427 : "+r" (i), "+m" (l->a.counter)
13428 : : "memory");
13429@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
13430
13431 #define local_cmpxchg(l, o, n) \
13432 (cmpxchg_local(&((l)->a.counter), (o), (n)))
13433+#define local_cmpxchg_unchecked(l, o, n) \
13434+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
13435 /* Always has a lock prefix */
13436 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
13437
13438diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
13439new file mode 100644
13440index 0000000..2bfd3ba
13441--- /dev/null
13442+++ b/arch/x86/include/asm/mman.h
13443@@ -0,0 +1,15 @@
13444+#ifndef _X86_MMAN_H
13445+#define _X86_MMAN_H
13446+
13447+#include <uapi/asm/mman.h>
13448+
13449+#ifdef __KERNEL__
13450+#ifndef __ASSEMBLY__
13451+#ifdef CONFIG_X86_32
13452+#define arch_mmap_check i386_mmap_check
13453+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
13454+#endif
13455+#endif
13456+#endif
13457+
13458+#endif /* X86_MMAN_H */
13459diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
13460index 5f55e69..e20bfb1 100644
13461--- a/arch/x86/include/asm/mmu.h
13462+++ b/arch/x86/include/asm/mmu.h
13463@@ -9,7 +9,7 @@
13464 * we put the segment information here.
13465 */
13466 typedef struct {
13467- void *ldt;
13468+ struct desc_struct *ldt;
13469 int size;
13470
13471 #ifdef CONFIG_X86_64
13472@@ -18,7 +18,19 @@ typedef struct {
13473 #endif
13474
13475 struct mutex lock;
13476- void *vdso;
13477+ unsigned long vdso;
13478+
13479+#ifdef CONFIG_X86_32
13480+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13481+ unsigned long user_cs_base;
13482+ unsigned long user_cs_limit;
13483+
13484+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13485+ cpumask_t cpu_user_cs_mask;
13486+#endif
13487+
13488+#endif
13489+#endif
13490 } mm_context_t;
13491
13492 #ifdef CONFIG_SMP
13493diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
13494index cdbf367..adb37ac 100644
13495--- a/arch/x86/include/asm/mmu_context.h
13496+++ b/arch/x86/include/asm/mmu_context.h
13497@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
13498
13499 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
13500 {
13501+
13502+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13503+ unsigned int i;
13504+ pgd_t *pgd;
13505+
13506+ pax_open_kernel();
13507+ pgd = get_cpu_pgd(smp_processor_id());
13508+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
13509+ set_pgd_batched(pgd+i, native_make_pgd(0));
13510+ pax_close_kernel();
13511+#endif
13512+
13513 #ifdef CONFIG_SMP
13514 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
13515 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
13516@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13517 struct task_struct *tsk)
13518 {
13519 unsigned cpu = smp_processor_id();
13520+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13521+ int tlbstate = TLBSTATE_OK;
13522+#endif
13523
13524 if (likely(prev != next)) {
13525 #ifdef CONFIG_SMP
13526+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13527+ tlbstate = this_cpu_read(cpu_tlbstate.state);
13528+#endif
13529 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13530 this_cpu_write(cpu_tlbstate.active_mm, next);
13531 #endif
13532 cpumask_set_cpu(cpu, mm_cpumask(next));
13533
13534 /* Re-load page tables */
13535+#ifdef CONFIG_PAX_PER_CPU_PGD
13536+ pax_open_kernel();
13537+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13538+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13539+ pax_close_kernel();
13540+ load_cr3(get_cpu_pgd(cpu));
13541+#else
13542 load_cr3(next->pgd);
13543+#endif
13544
13545 /* stop flush ipis for the previous mm */
13546 cpumask_clear_cpu(cpu, mm_cpumask(prev));
13547@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13548 */
13549 if (unlikely(prev->context.ldt != next->context.ldt))
13550 load_LDT_nolock(&next->context);
13551- }
13552+
13553+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13554+ if (!(__supported_pte_mask & _PAGE_NX)) {
13555+ smp_mb__before_clear_bit();
13556+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
13557+ smp_mb__after_clear_bit();
13558+ cpu_set(cpu, next->context.cpu_user_cs_mask);
13559+ }
13560+#endif
13561+
13562+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13563+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
13564+ prev->context.user_cs_limit != next->context.user_cs_limit))
13565+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13566 #ifdef CONFIG_SMP
13567+ else if (unlikely(tlbstate != TLBSTATE_OK))
13568+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13569+#endif
13570+#endif
13571+
13572+ }
13573 else {
13574+
13575+#ifdef CONFIG_PAX_PER_CPU_PGD
13576+ pax_open_kernel();
13577+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13578+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13579+ pax_close_kernel();
13580+ load_cr3(get_cpu_pgd(cpu));
13581+#endif
13582+
13583+#ifdef CONFIG_SMP
13584 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13585 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
13586
13587@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13588 * tlb flush IPI delivery. We must reload CR3
13589 * to make sure to use no freed page tables.
13590 */
13591+
13592+#ifndef CONFIG_PAX_PER_CPU_PGD
13593 load_cr3(next->pgd);
13594+#endif
13595+
13596 load_LDT_nolock(&next->context);
13597+
13598+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
13599+ if (!(__supported_pte_mask & _PAGE_NX))
13600+ cpu_set(cpu, next->context.cpu_user_cs_mask);
13601+#endif
13602+
13603+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13604+#ifdef CONFIG_PAX_PAGEEXEC
13605+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
13606+#endif
13607+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13608+#endif
13609+
13610 }
13611+#endif
13612 }
13613-#endif
13614 }
13615
13616 #define activate_mm(prev, next) \
13617diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
13618index e3b7819..b257c64 100644
13619--- a/arch/x86/include/asm/module.h
13620+++ b/arch/x86/include/asm/module.h
13621@@ -5,6 +5,7 @@
13622
13623 #ifdef CONFIG_X86_64
13624 /* X86_64 does not define MODULE_PROC_FAMILY */
13625+#define MODULE_PROC_FAMILY ""
13626 #elif defined CONFIG_M486
13627 #define MODULE_PROC_FAMILY "486 "
13628 #elif defined CONFIG_M586
13629@@ -57,8 +58,20 @@
13630 #error unknown processor family
13631 #endif
13632
13633-#ifdef CONFIG_X86_32
13634-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
13635+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
13636+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
13637+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
13638+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
13639+#else
13640+#define MODULE_PAX_KERNEXEC ""
13641 #endif
13642
13643+#ifdef CONFIG_PAX_MEMORY_UDEREF
13644+#define MODULE_PAX_UDEREF "UDEREF "
13645+#else
13646+#define MODULE_PAX_UDEREF ""
13647+#endif
13648+
13649+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
13650+
13651 #endif /* _ASM_X86_MODULE_H */
13652diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
13653index c0fa356..07a498a 100644
13654--- a/arch/x86/include/asm/nmi.h
13655+++ b/arch/x86/include/asm/nmi.h
13656@@ -42,11 +42,11 @@ struct nmiaction {
13657 nmi_handler_t handler;
13658 unsigned long flags;
13659 const char *name;
13660-};
13661+} __do_const;
13662
13663 #define register_nmi_handler(t, fn, fg, n, init...) \
13664 ({ \
13665- static struct nmiaction init fn##_na = { \
13666+ static const struct nmiaction init fn##_na = { \
13667 .handler = (fn), \
13668 .name = (n), \
13669 .flags = (fg), \
13670@@ -54,7 +54,7 @@ struct nmiaction {
13671 __register_nmi_handler((t), &fn##_na); \
13672 })
13673
13674-int __register_nmi_handler(unsigned int, struct nmiaction *);
13675+int __register_nmi_handler(unsigned int, const struct nmiaction *);
13676
13677 void unregister_nmi_handler(unsigned int, const char *);
13678
13679diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
13680index 320f7bb..e89f8f8 100644
13681--- a/arch/x86/include/asm/page_64_types.h
13682+++ b/arch/x86/include/asm/page_64_types.h
13683@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
13684
13685 /* duplicated to the one in bootmem.h */
13686 extern unsigned long max_pfn;
13687-extern unsigned long phys_base;
13688+extern const unsigned long phys_base;
13689
13690 extern unsigned long __phys_addr(unsigned long);
13691 #define __phys_reloc_hide(x) (x)
13692diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
13693index 5edd174..9cf5821 100644
13694--- a/arch/x86/include/asm/paravirt.h
13695+++ b/arch/x86/include/asm/paravirt.h
13696@@ -630,6 +630,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
13697 val);
13698 }
13699
13700+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
13701+{
13702+ pgdval_t val = native_pgd_val(pgd);
13703+
13704+ if (sizeof(pgdval_t) > sizeof(long))
13705+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
13706+ val, (u64)val >> 32);
13707+ else
13708+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
13709+ val);
13710+}
13711+
13712 static inline void pgd_clear(pgd_t *pgdp)
13713 {
13714 set_pgd(pgdp, __pgd(0));
13715@@ -711,6 +723,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
13716 pv_mmu_ops.set_fixmap(idx, phys, flags);
13717 }
13718
13719+#ifdef CONFIG_PAX_KERNEXEC
13720+static inline unsigned long pax_open_kernel(void)
13721+{
13722+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
13723+}
13724+
13725+static inline unsigned long pax_close_kernel(void)
13726+{
13727+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
13728+}
13729+#else
13730+static inline unsigned long pax_open_kernel(void) { return 0; }
13731+static inline unsigned long pax_close_kernel(void) { return 0; }
13732+#endif
13733+
13734 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
13735
13736 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
13737@@ -927,7 +954,7 @@ extern void default_banner(void);
13738
13739 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
13740 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
13741-#define PARA_INDIRECT(addr) *%cs:addr
13742+#define PARA_INDIRECT(addr) *%ss:addr
13743 #endif
13744
13745 #define INTERRUPT_RETURN \
13746@@ -1002,6 +1029,21 @@ extern void default_banner(void);
13747 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
13748 CLBR_NONE, \
13749 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
13750+
13751+#define GET_CR0_INTO_RDI \
13752+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
13753+ mov %rax,%rdi
13754+
13755+#define SET_RDI_INTO_CR0 \
13756+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13757+
13758+#define GET_CR3_INTO_RDI \
13759+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
13760+ mov %rax,%rdi
13761+
13762+#define SET_RDI_INTO_CR3 \
13763+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
13764+
13765 #endif /* CONFIG_X86_32 */
13766
13767 #endif /* __ASSEMBLY__ */
13768diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
13769index 142236e..5446ffbc 100644
13770--- a/arch/x86/include/asm/paravirt_types.h
13771+++ b/arch/x86/include/asm/paravirt_types.h
13772@@ -84,7 +84,7 @@ struct pv_init_ops {
13773 */
13774 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
13775 unsigned long addr, unsigned len);
13776-};
13777+} __no_const;
13778
13779
13780 struct pv_lazy_ops {
13781@@ -97,7 +97,7 @@ struct pv_time_ops {
13782 unsigned long long (*sched_clock)(void);
13783 unsigned long long (*steal_clock)(int cpu);
13784 unsigned long (*get_tsc_khz)(void);
13785-};
13786+} __no_const;
13787
13788 struct pv_cpu_ops {
13789 /* hooks for various privileged instructions */
13790@@ -191,7 +191,7 @@ struct pv_cpu_ops {
13791
13792 void (*start_context_switch)(struct task_struct *prev);
13793 void (*end_context_switch)(struct task_struct *next);
13794-};
13795+} __no_const;
13796
13797 struct pv_irq_ops {
13798 /*
13799@@ -222,7 +222,7 @@ struct pv_apic_ops {
13800 unsigned long start_eip,
13801 unsigned long start_esp);
13802 #endif
13803-};
13804+} __no_const;
13805
13806 struct pv_mmu_ops {
13807 unsigned long (*read_cr2)(void);
13808@@ -312,6 +312,7 @@ struct pv_mmu_ops {
13809 struct paravirt_callee_save make_pud;
13810
13811 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
13812+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
13813 #endif /* PAGETABLE_LEVELS == 4 */
13814 #endif /* PAGETABLE_LEVELS >= 3 */
13815
13816@@ -323,6 +324,12 @@ struct pv_mmu_ops {
13817 an mfn. We can tell which is which from the index. */
13818 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
13819 phys_addr_t phys, pgprot_t flags);
13820+
13821+#ifdef CONFIG_PAX_KERNEXEC
13822+ unsigned long (*pax_open_kernel)(void);
13823+ unsigned long (*pax_close_kernel)(void);
13824+#endif
13825+
13826 };
13827
13828 struct arch_spinlock;
13829@@ -333,7 +340,7 @@ struct pv_lock_ops {
13830 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
13831 int (*spin_trylock)(struct arch_spinlock *lock);
13832 void (*spin_unlock)(struct arch_spinlock *lock);
13833-};
13834+} __no_const;
13835
13836 /* This contains all the paravirt structures: we get a convenient
13837 * number for each function using the offset which we use to indicate
13838diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
13839index b4389a4..7024269 100644
13840--- a/arch/x86/include/asm/pgalloc.h
13841+++ b/arch/x86/include/asm/pgalloc.h
13842@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
13843 pmd_t *pmd, pte_t *pte)
13844 {
13845 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
13846+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
13847+}
13848+
13849+static inline void pmd_populate_user(struct mm_struct *mm,
13850+ pmd_t *pmd, pte_t *pte)
13851+{
13852+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
13853 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
13854 }
13855
13856@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
13857
13858 #ifdef CONFIG_X86_PAE
13859 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
13860+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
13861+{
13862+ pud_populate(mm, pudp, pmd);
13863+}
13864 #else /* !CONFIG_X86_PAE */
13865 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
13866 {
13867 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
13868 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
13869 }
13870+
13871+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
13872+{
13873+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
13874+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
13875+}
13876 #endif /* CONFIG_X86_PAE */
13877
13878 #if PAGETABLE_LEVELS > 3
13879@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
13880 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
13881 }
13882
13883+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
13884+{
13885+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
13886+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
13887+}
13888+
13889 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
13890 {
13891 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
13892diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
13893index f2b489c..4f7e2e5 100644
13894--- a/arch/x86/include/asm/pgtable-2level.h
13895+++ b/arch/x86/include/asm/pgtable-2level.h
13896@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
13897
13898 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
13899 {
13900+ pax_open_kernel();
13901 *pmdp = pmd;
13902+ pax_close_kernel();
13903 }
13904
13905 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
13906diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
13907index 4cc9f2b..5fd9226 100644
13908--- a/arch/x86/include/asm/pgtable-3level.h
13909+++ b/arch/x86/include/asm/pgtable-3level.h
13910@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
13911
13912 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
13913 {
13914+ pax_open_kernel();
13915 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
13916+ pax_close_kernel();
13917 }
13918
13919 static inline void native_set_pud(pud_t *pudp, pud_t pud)
13920 {
13921+ pax_open_kernel();
13922 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
13923+ pax_close_kernel();
13924 }
13925
13926 /*
13927diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
13928index 1c1a955..50f828c 100644
13929--- a/arch/x86/include/asm/pgtable.h
13930+++ b/arch/x86/include/asm/pgtable.h
13931@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
13932
13933 #ifndef __PAGETABLE_PUD_FOLDED
13934 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
13935+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
13936 #define pgd_clear(pgd) native_pgd_clear(pgd)
13937 #endif
13938
13939@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
13940
13941 #define arch_end_context_switch(prev) do {} while(0)
13942
13943+#define pax_open_kernel() native_pax_open_kernel()
13944+#define pax_close_kernel() native_pax_close_kernel()
13945 #endif /* CONFIG_PARAVIRT */
13946
13947+#define __HAVE_ARCH_PAX_OPEN_KERNEL
13948+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
13949+
13950+#ifdef CONFIG_PAX_KERNEXEC
13951+static inline unsigned long native_pax_open_kernel(void)
13952+{
13953+ unsigned long cr0;
13954+
13955+ preempt_disable();
13956+ barrier();
13957+ cr0 = read_cr0() ^ X86_CR0_WP;
13958+ BUG_ON(cr0 & X86_CR0_WP);
13959+ write_cr0(cr0);
13960+ return cr0 ^ X86_CR0_WP;
13961+}
13962+
13963+static inline unsigned long native_pax_close_kernel(void)
13964+{
13965+ unsigned long cr0;
13966+
13967+ cr0 = read_cr0() ^ X86_CR0_WP;
13968+ BUG_ON(!(cr0 & X86_CR0_WP));
13969+ write_cr0(cr0);
13970+ barrier();
13971+ preempt_enable_no_resched();
13972+ return cr0 ^ X86_CR0_WP;
13973+}
13974+#else
13975+static inline unsigned long native_pax_open_kernel(void) { return 0; }
13976+static inline unsigned long native_pax_close_kernel(void) { return 0; }
13977+#endif
13978+
13979 /*
13980 * The following only work if pte_present() is true.
13981 * Undefined behaviour if not..
13982 */
13983+static inline int pte_user(pte_t pte)
13984+{
13985+ return pte_val(pte) & _PAGE_USER;
13986+}
13987+
13988 static inline int pte_dirty(pte_t pte)
13989 {
13990 return pte_flags(pte) & _PAGE_DIRTY;
13991@@ -200,9 +240,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
13992 return pte_clear_flags(pte, _PAGE_RW);
13993 }
13994
13995+static inline pte_t pte_mkread(pte_t pte)
13996+{
13997+ return __pte(pte_val(pte) | _PAGE_USER);
13998+}
13999+
14000 static inline pte_t pte_mkexec(pte_t pte)
14001 {
14002- return pte_clear_flags(pte, _PAGE_NX);
14003+#ifdef CONFIG_X86_PAE
14004+ if (__supported_pte_mask & _PAGE_NX)
14005+ return pte_clear_flags(pte, _PAGE_NX);
14006+ else
14007+#endif
14008+ return pte_set_flags(pte, _PAGE_USER);
14009+}
14010+
14011+static inline pte_t pte_exprotect(pte_t pte)
14012+{
14013+#ifdef CONFIG_X86_PAE
14014+ if (__supported_pte_mask & _PAGE_NX)
14015+ return pte_set_flags(pte, _PAGE_NX);
14016+ else
14017+#endif
14018+ return pte_clear_flags(pte, _PAGE_USER);
14019 }
14020
14021 static inline pte_t pte_mkdirty(pte_t pte)
14022@@ -394,6 +454,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
14023 #endif
14024
14025 #ifndef __ASSEMBLY__
14026+
14027+#ifdef CONFIG_PAX_PER_CPU_PGD
14028+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
14029+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
14030+{
14031+ return cpu_pgd[cpu];
14032+}
14033+#endif
14034+
14035 #include <linux/mm_types.h>
14036
14037 static inline int pte_none(pte_t pte)
14038@@ -583,7 +652,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
14039
14040 static inline int pgd_bad(pgd_t pgd)
14041 {
14042- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
14043+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
14044 }
14045
14046 static inline int pgd_none(pgd_t pgd)
14047@@ -606,7 +675,12 @@ static inline int pgd_none(pgd_t pgd)
14048 * pgd_offset() returns a (pgd_t *)
14049 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
14050 */
14051-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
14052+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
14053+
14054+#ifdef CONFIG_PAX_PER_CPU_PGD
14055+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
14056+#endif
14057+
14058 /*
14059 * a shortcut which implies the use of the kernel's pgd, instead
14060 * of a process's
14061@@ -617,6 +691,20 @@ static inline int pgd_none(pgd_t pgd)
14062 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
14063 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
14064
14065+#ifdef CONFIG_X86_32
14066+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
14067+#else
14068+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
14069+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
14070+
14071+#ifdef CONFIG_PAX_MEMORY_UDEREF
14072+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
14073+#else
14074+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
14075+#endif
14076+
14077+#endif
14078+
14079 #ifndef __ASSEMBLY__
14080
14081 extern int direct_gbpages;
14082@@ -781,11 +869,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
14083 * dst and src can be on the same page, but the range must not overlap,
14084 * and must not cross a page boundary.
14085 */
14086-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
14087+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
14088 {
14089- memcpy(dst, src, count * sizeof(pgd_t));
14090+ pax_open_kernel();
14091+ while (count--)
14092+ *dst++ = *src++;
14093+ pax_close_kernel();
14094 }
14095
14096+#ifdef CONFIG_PAX_PER_CPU_PGD
14097+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
14098+#endif
14099+
14100+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14101+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
14102+#else
14103+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
14104+#endif
14105
14106 #include <asm-generic/pgtable.h>
14107 #endif /* __ASSEMBLY__ */
14108diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
14109index 8faa215..a8a17ea 100644
14110--- a/arch/x86/include/asm/pgtable_32.h
14111+++ b/arch/x86/include/asm/pgtable_32.h
14112@@ -25,9 +25,6 @@
14113 struct mm_struct;
14114 struct vm_area_struct;
14115
14116-extern pgd_t swapper_pg_dir[1024];
14117-extern pgd_t initial_page_table[1024];
14118-
14119 static inline void pgtable_cache_init(void) { }
14120 static inline void check_pgt_cache(void) { }
14121 void paging_init(void);
14122@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14123 # include <asm/pgtable-2level.h>
14124 #endif
14125
14126+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
14127+extern pgd_t initial_page_table[PTRS_PER_PGD];
14128+#ifdef CONFIG_X86_PAE
14129+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
14130+#endif
14131+
14132 #if defined(CONFIG_HIGHPTE)
14133 #define pte_offset_map(dir, address) \
14134 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
14135@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14136 /* Clear a kernel PTE and flush it from the TLB */
14137 #define kpte_clear_flush(ptep, vaddr) \
14138 do { \
14139+ pax_open_kernel(); \
14140 pte_clear(&init_mm, (vaddr), (ptep)); \
14141+ pax_close_kernel(); \
14142 __flush_tlb_one((vaddr)); \
14143 } while (0)
14144
14145@@ -75,6 +80,9 @@ do { \
14146
14147 #endif /* !__ASSEMBLY__ */
14148
14149+#define HAVE_ARCH_UNMAPPED_AREA
14150+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
14151+
14152 /*
14153 * kern_addr_valid() is (1) for FLATMEM and (0) for
14154 * SPARSEMEM and DISCONTIGMEM
14155diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
14156index ed5903b..c7fe163 100644
14157--- a/arch/x86/include/asm/pgtable_32_types.h
14158+++ b/arch/x86/include/asm/pgtable_32_types.h
14159@@ -8,7 +8,7 @@
14160 */
14161 #ifdef CONFIG_X86_PAE
14162 # include <asm/pgtable-3level_types.h>
14163-# define PMD_SIZE (1UL << PMD_SHIFT)
14164+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
14165 # define PMD_MASK (~(PMD_SIZE - 1))
14166 #else
14167 # include <asm/pgtable-2level_types.h>
14168@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
14169 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
14170 #endif
14171
14172+#ifdef CONFIG_PAX_KERNEXEC
14173+#ifndef __ASSEMBLY__
14174+extern unsigned char MODULES_EXEC_VADDR[];
14175+extern unsigned char MODULES_EXEC_END[];
14176+#endif
14177+#include <asm/boot.h>
14178+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
14179+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
14180+#else
14181+#define ktla_ktva(addr) (addr)
14182+#define ktva_ktla(addr) (addr)
14183+#endif
14184+
14185 #define MODULES_VADDR VMALLOC_START
14186 #define MODULES_END VMALLOC_END
14187 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
14188diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
14189index 47356f9..deb94a2 100644
14190--- a/arch/x86/include/asm/pgtable_64.h
14191+++ b/arch/x86/include/asm/pgtable_64.h
14192@@ -16,10 +16,14 @@
14193
14194 extern pud_t level3_kernel_pgt[512];
14195 extern pud_t level3_ident_pgt[512];
14196+extern pud_t level3_vmalloc_start_pgt[512];
14197+extern pud_t level3_vmalloc_end_pgt[512];
14198+extern pud_t level3_vmemmap_pgt[512];
14199+extern pud_t level2_vmemmap_pgt[512];
14200 extern pmd_t level2_kernel_pgt[512];
14201 extern pmd_t level2_fixmap_pgt[512];
14202-extern pmd_t level2_ident_pgt[512];
14203-extern pgd_t init_level4_pgt[];
14204+extern pmd_t level2_ident_pgt[512*2];
14205+extern pgd_t init_level4_pgt[512];
14206
14207 #define swapper_pg_dir init_level4_pgt
14208
14209@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14210
14211 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14212 {
14213+ pax_open_kernel();
14214 *pmdp = pmd;
14215+ pax_close_kernel();
14216 }
14217
14218 static inline void native_pmd_clear(pmd_t *pmd)
14219@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
14220
14221 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14222 {
14223+ pax_open_kernel();
14224 *pudp = pud;
14225+ pax_close_kernel();
14226 }
14227
14228 static inline void native_pud_clear(pud_t *pud)
14229@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
14230
14231 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
14232 {
14233+ pax_open_kernel();
14234+ *pgdp = pgd;
14235+ pax_close_kernel();
14236+}
14237+
14238+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14239+{
14240 *pgdp = pgd;
14241 }
14242
14243diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
14244index 766ea16..5b96cb3 100644
14245--- a/arch/x86/include/asm/pgtable_64_types.h
14246+++ b/arch/x86/include/asm/pgtable_64_types.h
14247@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
14248 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
14249 #define MODULES_END _AC(0xffffffffff000000, UL)
14250 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
14251+#define MODULES_EXEC_VADDR MODULES_VADDR
14252+#define MODULES_EXEC_END MODULES_END
14253+
14254+#define ktla_ktva(addr) (addr)
14255+#define ktva_ktla(addr) (addr)
14256
14257 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
14258diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
14259index 3c32db8..1ddccf5 100644
14260--- a/arch/x86/include/asm/pgtable_types.h
14261+++ b/arch/x86/include/asm/pgtable_types.h
14262@@ -16,13 +16,12 @@
14263 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14264 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
14265 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
14266-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
14267+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
14268 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
14269 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
14270 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
14271-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
14272-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
14273-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
14274+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
14275+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
14276 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
14277
14278 /* If _PAGE_BIT_PRESENT is clear, we use these: */
14279@@ -40,7 +39,6 @@
14280 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
14281 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
14282 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
14283-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
14284 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
14285 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
14286 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
14287@@ -57,8 +55,10 @@
14288
14289 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
14290 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
14291-#else
14292+#elif defined(CONFIG_KMEMCHECK)
14293 #define _PAGE_NX (_AT(pteval_t, 0))
14294+#else
14295+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
14296 #endif
14297
14298 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
14299@@ -116,6 +116,9 @@
14300 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
14301 _PAGE_ACCESSED)
14302
14303+#define PAGE_READONLY_NOEXEC PAGE_READONLY
14304+#define PAGE_SHARED_NOEXEC PAGE_SHARED
14305+
14306 #define __PAGE_KERNEL_EXEC \
14307 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
14308 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
14309@@ -126,7 +129,7 @@
14310 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
14311 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
14312 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
14313-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
14314+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
14315 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
14316 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
14317 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
14318@@ -188,8 +191,8 @@
14319 * bits are combined, this will alow user to access the high address mapped
14320 * VDSO in the presence of CONFIG_COMPAT_VDSO
14321 */
14322-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
14323-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
14324+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14325+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14326 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
14327 #endif
14328
14329@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
14330 {
14331 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
14332 }
14333+#endif
14334
14335+#if PAGETABLE_LEVELS == 3
14336+#include <asm-generic/pgtable-nopud.h>
14337+#endif
14338+
14339+#if PAGETABLE_LEVELS == 2
14340+#include <asm-generic/pgtable-nopmd.h>
14341+#endif
14342+
14343+#ifndef __ASSEMBLY__
14344 #if PAGETABLE_LEVELS > 3
14345 typedef struct { pudval_t pud; } pud_t;
14346
14347@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
14348 return pud.pud;
14349 }
14350 #else
14351-#include <asm-generic/pgtable-nopud.h>
14352-
14353 static inline pudval_t native_pud_val(pud_t pud)
14354 {
14355 return native_pgd_val(pud.pgd);
14356@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
14357 return pmd.pmd;
14358 }
14359 #else
14360-#include <asm-generic/pgtable-nopmd.h>
14361-
14362 static inline pmdval_t native_pmd_val(pmd_t pmd)
14363 {
14364 return native_pgd_val(pmd.pud.pgd);
14365@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
14366
14367 extern pteval_t __supported_pte_mask;
14368 extern void set_nx(void);
14369-extern int nx_enabled;
14370
14371 #define pgprot_writecombine pgprot_writecombine
14372 extern pgprot_t pgprot_writecombine(pgprot_t prot);
14373diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
14374index 888184b..a07ac89 100644
14375--- a/arch/x86/include/asm/processor.h
14376+++ b/arch/x86/include/asm/processor.h
14377@@ -287,7 +287,7 @@ struct tss_struct {
14378
14379 } ____cacheline_aligned;
14380
14381-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
14382+extern struct tss_struct init_tss[NR_CPUS];
14383
14384 /*
14385 * Save the original ist values for checking stack pointers during debugging
14386@@ -827,11 +827,18 @@ static inline void spin_lock_prefetch(const void *x)
14387 */
14388 #define TASK_SIZE PAGE_OFFSET
14389 #define TASK_SIZE_MAX TASK_SIZE
14390+
14391+#ifdef CONFIG_PAX_SEGMEXEC
14392+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
14393+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
14394+#else
14395 #define STACK_TOP TASK_SIZE
14396-#define STACK_TOP_MAX STACK_TOP
14397+#endif
14398+
14399+#define STACK_TOP_MAX TASK_SIZE
14400
14401 #define INIT_THREAD { \
14402- .sp0 = sizeof(init_stack) + (long)&init_stack, \
14403+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14404 .vm86_info = NULL, \
14405 .sysenter_cs = __KERNEL_CS, \
14406 .io_bitmap_ptr = NULL, \
14407@@ -845,7 +852,7 @@ static inline void spin_lock_prefetch(const void *x)
14408 */
14409 #define INIT_TSS { \
14410 .x86_tss = { \
14411- .sp0 = sizeof(init_stack) + (long)&init_stack, \
14412+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14413 .ss0 = __KERNEL_DS, \
14414 .ss1 = __KERNEL_CS, \
14415 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
14416@@ -856,11 +863,7 @@ static inline void spin_lock_prefetch(const void *x)
14417 extern unsigned long thread_saved_pc(struct task_struct *tsk);
14418
14419 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
14420-#define KSTK_TOP(info) \
14421-({ \
14422- unsigned long *__ptr = (unsigned long *)(info); \
14423- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
14424-})
14425+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
14426
14427 /*
14428 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
14429@@ -875,7 +878,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14430 #define task_pt_regs(task) \
14431 ({ \
14432 struct pt_regs *__regs__; \
14433- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
14434+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
14435 __regs__ - 1; \
14436 })
14437
14438@@ -885,13 +888,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14439 /*
14440 * User space process size. 47bits minus one guard page.
14441 */
14442-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
14443+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
14444
14445 /* This decides where the kernel will search for a free chunk of vm
14446 * space during mmap's.
14447 */
14448 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
14449- 0xc0000000 : 0xFFFFe000)
14450+ 0xc0000000 : 0xFFFFf000)
14451
14452 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
14453 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
14454@@ -902,11 +905,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14455 #define STACK_TOP_MAX TASK_SIZE_MAX
14456
14457 #define INIT_THREAD { \
14458- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14459+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14460 }
14461
14462 #define INIT_TSS { \
14463- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14464+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14465 }
14466
14467 /*
14468@@ -934,6 +937,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
14469 */
14470 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
14471
14472+#ifdef CONFIG_PAX_SEGMEXEC
14473+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
14474+#endif
14475+
14476 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
14477
14478 /* Get/set a process' ability to use the timestamp counter instruction */
14479@@ -994,12 +1001,12 @@ extern bool cpu_has_amd_erratum(const int *);
14480 #define cpu_has_amd_erratum(x) (false)
14481 #endif /* CONFIG_CPU_SUP_AMD */
14482
14483-extern unsigned long arch_align_stack(unsigned long sp);
14484+#define arch_align_stack(x) ((x) & ~0xfUL)
14485 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
14486
14487 void default_idle(void);
14488 bool set_pm_idle_to_default(void);
14489
14490-void stop_this_cpu(void *dummy);
14491+void stop_this_cpu(void *dummy) __noreturn;
14492
14493 #endif /* _ASM_X86_PROCESSOR_H */
14494diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
14495index 942a086..6c26446 100644
14496--- a/arch/x86/include/asm/ptrace.h
14497+++ b/arch/x86/include/asm/ptrace.h
14498@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
14499 }
14500
14501 /*
14502- * user_mode_vm(regs) determines whether a register set came from user mode.
14503+ * user_mode(regs) determines whether a register set came from user mode.
14504 * This is true if V8086 mode was enabled OR if the register set was from
14505 * protected mode with RPL-3 CS value. This tricky test checks that with
14506 * one comparison. Many places in the kernel can bypass this full check
14507- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
14508+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
14509+ * be used.
14510 */
14511-static inline int user_mode(struct pt_regs *regs)
14512+static inline int user_mode_novm(struct pt_regs *regs)
14513 {
14514 #ifdef CONFIG_X86_32
14515 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
14516 #else
14517- return !!(regs->cs & 3);
14518+ return !!(regs->cs & SEGMENT_RPL_MASK);
14519 #endif
14520 }
14521
14522-static inline int user_mode_vm(struct pt_regs *regs)
14523+static inline int user_mode(struct pt_regs *regs)
14524 {
14525 #ifdef CONFIG_X86_32
14526 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
14527 USER_RPL;
14528 #else
14529- return user_mode(regs);
14530+ return user_mode_novm(regs);
14531 #endif
14532 }
14533
14534@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
14535 #ifdef CONFIG_X86_64
14536 static inline bool user_64bit_mode(struct pt_regs *regs)
14537 {
14538+ unsigned long cs = regs->cs & 0xffff;
14539 #ifndef CONFIG_PARAVIRT
14540 /*
14541 * On non-paravirt systems, this is the only long mode CPL 3
14542 * selector. We do not allow long mode selectors in the LDT.
14543 */
14544- return regs->cs == __USER_CS;
14545+ return cs == __USER_CS;
14546 #else
14547 /* Headers are too twisted for this to go in paravirt.h. */
14548- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
14549+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
14550 #endif
14551 }
14552
14553@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
14554 * Traps from the kernel do not save sp and ss.
14555 * Use the helper function to retrieve sp.
14556 */
14557- if (offset == offsetof(struct pt_regs, sp) &&
14558- regs->cs == __KERNEL_CS)
14559- return kernel_stack_pointer(regs);
14560+ if (offset == offsetof(struct pt_regs, sp)) {
14561+ unsigned long cs = regs->cs & 0xffff;
14562+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
14563+ return kernel_stack_pointer(regs);
14564+ }
14565 #endif
14566 return *(unsigned long *)((unsigned long)regs + offset);
14567 }
14568diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
14569index fe1ec5b..dc5c3fe 100644
14570--- a/arch/x86/include/asm/realmode.h
14571+++ b/arch/x86/include/asm/realmode.h
14572@@ -22,16 +22,14 @@ struct real_mode_header {
14573 #endif
14574 /* APM/BIOS reboot */
14575 u32 machine_real_restart_asm;
14576-#ifdef CONFIG_X86_64
14577 u32 machine_real_restart_seg;
14578-#endif
14579 };
14580
14581 /* This must match data at trampoline_32/64.S */
14582 struct trampoline_header {
14583 #ifdef CONFIG_X86_32
14584 u32 start;
14585- u16 gdt_pad;
14586+ u16 boot_cs;
14587 u16 gdt_limit;
14588 u32 gdt_base;
14589 #else
14590diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
14591index a82c4f1..ac45053 100644
14592--- a/arch/x86/include/asm/reboot.h
14593+++ b/arch/x86/include/asm/reboot.h
14594@@ -6,13 +6,13 @@
14595 struct pt_regs;
14596
14597 struct machine_ops {
14598- void (*restart)(char *cmd);
14599- void (*halt)(void);
14600- void (*power_off)(void);
14601+ void (* __noreturn restart)(char *cmd);
14602+ void (* __noreturn halt)(void);
14603+ void (* __noreturn power_off)(void);
14604 void (*shutdown)(void);
14605 void (*crash_shutdown)(struct pt_regs *);
14606- void (*emergency_restart)(void);
14607-};
14608+ void (* __noreturn emergency_restart)(void);
14609+} __no_const;
14610
14611 extern struct machine_ops machine_ops;
14612
14613diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
14614index 2dbe4a7..ce1db00 100644
14615--- a/arch/x86/include/asm/rwsem.h
14616+++ b/arch/x86/include/asm/rwsem.h
14617@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
14618 {
14619 asm volatile("# beginning down_read\n\t"
14620 LOCK_PREFIX _ASM_INC "(%1)\n\t"
14621+
14622+#ifdef CONFIG_PAX_REFCOUNT
14623+ "jno 0f\n"
14624+ LOCK_PREFIX _ASM_DEC "(%1)\n"
14625+ "int $4\n0:\n"
14626+ _ASM_EXTABLE(0b, 0b)
14627+#endif
14628+
14629 /* adds 0x00000001 */
14630 " jns 1f\n"
14631 " call call_rwsem_down_read_failed\n"
14632@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
14633 "1:\n\t"
14634 " mov %1,%2\n\t"
14635 " add %3,%2\n\t"
14636+
14637+#ifdef CONFIG_PAX_REFCOUNT
14638+ "jno 0f\n"
14639+ "sub %3,%2\n"
14640+ "int $4\n0:\n"
14641+ _ASM_EXTABLE(0b, 0b)
14642+#endif
14643+
14644 " jle 2f\n\t"
14645 LOCK_PREFIX " cmpxchg %2,%0\n\t"
14646 " jnz 1b\n\t"
14647@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
14648 long tmp;
14649 asm volatile("# beginning down_write\n\t"
14650 LOCK_PREFIX " xadd %1,(%2)\n\t"
14651+
14652+#ifdef CONFIG_PAX_REFCOUNT
14653+ "jno 0f\n"
14654+ "mov %1,(%2)\n"
14655+ "int $4\n0:\n"
14656+ _ASM_EXTABLE(0b, 0b)
14657+#endif
14658+
14659 /* adds 0xffff0001, returns the old value */
14660 " test %1,%1\n\t"
14661 /* was the count 0 before? */
14662@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
14663 long tmp;
14664 asm volatile("# beginning __up_read\n\t"
14665 LOCK_PREFIX " xadd %1,(%2)\n\t"
14666+
14667+#ifdef CONFIG_PAX_REFCOUNT
14668+ "jno 0f\n"
14669+ "mov %1,(%2)\n"
14670+ "int $4\n0:\n"
14671+ _ASM_EXTABLE(0b, 0b)
14672+#endif
14673+
14674 /* subtracts 1, returns the old value */
14675 " jns 1f\n\t"
14676 " call call_rwsem_wake\n" /* expects old value in %edx */
14677@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
14678 long tmp;
14679 asm volatile("# beginning __up_write\n\t"
14680 LOCK_PREFIX " xadd %1,(%2)\n\t"
14681+
14682+#ifdef CONFIG_PAX_REFCOUNT
14683+ "jno 0f\n"
14684+ "mov %1,(%2)\n"
14685+ "int $4\n0:\n"
14686+ _ASM_EXTABLE(0b, 0b)
14687+#endif
14688+
14689 /* subtracts 0xffff0001, returns the old value */
14690 " jns 1f\n\t"
14691 " call call_rwsem_wake\n" /* expects old value in %edx */
14692@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
14693 {
14694 asm volatile("# beginning __downgrade_write\n\t"
14695 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
14696+
14697+#ifdef CONFIG_PAX_REFCOUNT
14698+ "jno 0f\n"
14699+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
14700+ "int $4\n0:\n"
14701+ _ASM_EXTABLE(0b, 0b)
14702+#endif
14703+
14704 /*
14705 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
14706 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
14707@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
14708 */
14709 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
14710 {
14711- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
14712+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
14713+
14714+#ifdef CONFIG_PAX_REFCOUNT
14715+ "jno 0f\n"
14716+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
14717+ "int $4\n0:\n"
14718+ _ASM_EXTABLE(0b, 0b)
14719+#endif
14720+
14721 : "+m" (sem->count)
14722 : "er" (delta));
14723 }
14724@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
14725 */
14726 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
14727 {
14728- return delta + xadd(&sem->count, delta);
14729+ return delta + xadd_check_overflow(&sem->count, delta);
14730 }
14731
14732 #endif /* __KERNEL__ */
14733diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
14734index c48a950..c6d7468 100644
14735--- a/arch/x86/include/asm/segment.h
14736+++ b/arch/x86/include/asm/segment.h
14737@@ -64,10 +64,15 @@
14738 * 26 - ESPFIX small SS
14739 * 27 - per-cpu [ offset to per-cpu data area ]
14740 * 28 - stack_canary-20 [ for stack protector ]
14741- * 29 - unused
14742- * 30 - unused
14743+ * 29 - PCI BIOS CS
14744+ * 30 - PCI BIOS DS
14745 * 31 - TSS for double fault handler
14746 */
14747+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
14748+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
14749+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
14750+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
14751+
14752 #define GDT_ENTRY_TLS_MIN 6
14753 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
14754
14755@@ -79,6 +84,8 @@
14756
14757 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
14758
14759+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
14760+
14761 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
14762
14763 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
14764@@ -104,6 +111,12 @@
14765 #define __KERNEL_STACK_CANARY 0
14766 #endif
14767
14768+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
14769+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
14770+
14771+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
14772+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
14773+
14774 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
14775
14776 /*
14777@@ -141,7 +154,7 @@
14778 */
14779
14780 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
14781-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
14782+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
14783
14784
14785 #else
14786@@ -165,6 +178,8 @@
14787 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
14788 #define __USER32_DS __USER_DS
14789
14790+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
14791+
14792 #define GDT_ENTRY_TSS 8 /* needs two entries */
14793 #define GDT_ENTRY_LDT 10 /* needs two entries */
14794 #define GDT_ENTRY_TLS_MIN 12
14795@@ -185,6 +200,7 @@
14796 #endif
14797
14798 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
14799+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
14800 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
14801 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
14802 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
14803@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
14804 {
14805 unsigned long __limit;
14806 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
14807- return __limit + 1;
14808+ return __limit;
14809 }
14810
14811 #endif /* !__ASSEMBLY__ */
14812diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
14813index b073aae..39f9bdd 100644
14814--- a/arch/x86/include/asm/smp.h
14815+++ b/arch/x86/include/asm/smp.h
14816@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
14817 /* cpus sharing the last level cache: */
14818 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
14819 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
14820-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
14821+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
14822
14823 static inline struct cpumask *cpu_sibling_mask(int cpu)
14824 {
14825@@ -79,7 +79,7 @@ struct smp_ops {
14826
14827 void (*send_call_func_ipi)(const struct cpumask *mask);
14828 void (*send_call_func_single_ipi)(int cpu);
14829-};
14830+} __no_const;
14831
14832 /* Globals due to paravirt */
14833 extern void set_cpu_sibling_map(int cpu);
14834@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
14835 extern int safe_smp_processor_id(void);
14836
14837 #elif defined(CONFIG_X86_64_SMP)
14838-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
14839-
14840-#define stack_smp_processor_id() \
14841-({ \
14842- struct thread_info *ti; \
14843- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
14844- ti->cpu; \
14845-})
14846+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
14847+#define stack_smp_processor_id() raw_smp_processor_id()
14848 #define safe_smp_processor_id() smp_processor_id()
14849
14850 #endif
14851diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
14852index 33692ea..350a534 100644
14853--- a/arch/x86/include/asm/spinlock.h
14854+++ b/arch/x86/include/asm/spinlock.h
14855@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
14856 static inline void arch_read_lock(arch_rwlock_t *rw)
14857 {
14858 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
14859+
14860+#ifdef CONFIG_PAX_REFCOUNT
14861+ "jno 0f\n"
14862+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
14863+ "int $4\n0:\n"
14864+ _ASM_EXTABLE(0b, 0b)
14865+#endif
14866+
14867 "jns 1f\n"
14868 "call __read_lock_failed\n\t"
14869 "1:\n"
14870@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
14871 static inline void arch_write_lock(arch_rwlock_t *rw)
14872 {
14873 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
14874+
14875+#ifdef CONFIG_PAX_REFCOUNT
14876+ "jno 0f\n"
14877+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
14878+ "int $4\n0:\n"
14879+ _ASM_EXTABLE(0b, 0b)
14880+#endif
14881+
14882 "jz 1f\n"
14883 "call __write_lock_failed\n\t"
14884 "1:\n"
14885@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
14886
14887 static inline void arch_read_unlock(arch_rwlock_t *rw)
14888 {
14889- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
14890+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
14891+
14892+#ifdef CONFIG_PAX_REFCOUNT
14893+ "jno 0f\n"
14894+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
14895+ "int $4\n0:\n"
14896+ _ASM_EXTABLE(0b, 0b)
14897+#endif
14898+
14899 :"+m" (rw->lock) : : "memory");
14900 }
14901
14902 static inline void arch_write_unlock(arch_rwlock_t *rw)
14903 {
14904- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
14905+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
14906+
14907+#ifdef CONFIG_PAX_REFCOUNT
14908+ "jno 0f\n"
14909+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
14910+ "int $4\n0:\n"
14911+ _ASM_EXTABLE(0b, 0b)
14912+#endif
14913+
14914 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
14915 }
14916
14917diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
14918index 6a99859..03cb807 100644
14919--- a/arch/x86/include/asm/stackprotector.h
14920+++ b/arch/x86/include/asm/stackprotector.h
14921@@ -47,7 +47,7 @@
14922 * head_32 for boot CPU and setup_per_cpu_areas() for others.
14923 */
14924 #define GDT_STACK_CANARY_INIT \
14925- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
14926+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
14927
14928 /*
14929 * Initialize the stackprotector canary value.
14930@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
14931
14932 static inline void load_stack_canary_segment(void)
14933 {
14934-#ifdef CONFIG_X86_32
14935+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14936 asm volatile ("mov %0, %%gs" : : "r" (0));
14937 #endif
14938 }
14939diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
14940index 70bbe39..4ae2bd4 100644
14941--- a/arch/x86/include/asm/stacktrace.h
14942+++ b/arch/x86/include/asm/stacktrace.h
14943@@ -11,28 +11,20 @@
14944
14945 extern int kstack_depth_to_print;
14946
14947-struct thread_info;
14948+struct task_struct;
14949 struct stacktrace_ops;
14950
14951-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
14952- unsigned long *stack,
14953- unsigned long bp,
14954- const struct stacktrace_ops *ops,
14955- void *data,
14956- unsigned long *end,
14957- int *graph);
14958+typedef unsigned long walk_stack_t(struct task_struct *task,
14959+ void *stack_start,
14960+ unsigned long *stack,
14961+ unsigned long bp,
14962+ const struct stacktrace_ops *ops,
14963+ void *data,
14964+ unsigned long *end,
14965+ int *graph);
14966
14967-extern unsigned long
14968-print_context_stack(struct thread_info *tinfo,
14969- unsigned long *stack, unsigned long bp,
14970- const struct stacktrace_ops *ops, void *data,
14971- unsigned long *end, int *graph);
14972-
14973-extern unsigned long
14974-print_context_stack_bp(struct thread_info *tinfo,
14975- unsigned long *stack, unsigned long bp,
14976- const struct stacktrace_ops *ops, void *data,
14977- unsigned long *end, int *graph);
14978+extern walk_stack_t print_context_stack;
14979+extern walk_stack_t print_context_stack_bp;
14980
14981 /* Generic stack tracer with callbacks */
14982
14983@@ -40,7 +32,7 @@ struct stacktrace_ops {
14984 void (*address)(void *data, unsigned long address, int reliable);
14985 /* On negative return stop dumping */
14986 int (*stack)(void *data, char *name);
14987- walk_stack_t walk_stack;
14988+ walk_stack_t *walk_stack;
14989 };
14990
14991 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
14992diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
14993index 4ec45b3..a4f0a8a 100644
14994--- a/arch/x86/include/asm/switch_to.h
14995+++ b/arch/x86/include/asm/switch_to.h
14996@@ -108,7 +108,7 @@ do { \
14997 "call __switch_to\n\t" \
14998 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
14999 __switch_canary \
15000- "movq %P[thread_info](%%rsi),%%r8\n\t" \
15001+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
15002 "movq %%rax,%%rdi\n\t" \
15003 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
15004 "jnz ret_from_fork\n\t" \
15005@@ -119,7 +119,7 @@ do { \
15006 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
15007 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
15008 [_tif_fork] "i" (_TIF_FORK), \
15009- [thread_info] "i" (offsetof(struct task_struct, stack)), \
15010+ [thread_info] "m" (current_tinfo), \
15011 [current_task] "m" (current_task) \
15012 __switch_canary_iparam \
15013 : "memory", "cc" __EXTRA_CLOBBER)
15014diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
15015index 2d946e6..e453ec4 100644
15016--- a/arch/x86/include/asm/thread_info.h
15017+++ b/arch/x86/include/asm/thread_info.h
15018@@ -10,6 +10,7 @@
15019 #include <linux/compiler.h>
15020 #include <asm/page.h>
15021 #include <asm/types.h>
15022+#include <asm/percpu.h>
15023
15024 /*
15025 * low level task data that entry.S needs immediate access to
15026@@ -24,7 +25,6 @@ struct exec_domain;
15027 #include <linux/atomic.h>
15028
15029 struct thread_info {
15030- struct task_struct *task; /* main task structure */
15031 struct exec_domain *exec_domain; /* execution domain */
15032 __u32 flags; /* low level flags */
15033 __u32 status; /* thread synchronous flags */
15034@@ -34,19 +34,13 @@ struct thread_info {
15035 mm_segment_t addr_limit;
15036 struct restart_block restart_block;
15037 void __user *sysenter_return;
15038-#ifdef CONFIG_X86_32
15039- unsigned long previous_esp; /* ESP of the previous stack in
15040- case of nested (IRQ) stacks
15041- */
15042- __u8 supervisor_stack[0];
15043-#endif
15044+ unsigned long lowest_stack;
15045 unsigned int sig_on_uaccess_error:1;
15046 unsigned int uaccess_err:1; /* uaccess failed */
15047 };
15048
15049-#define INIT_THREAD_INFO(tsk) \
15050+#define INIT_THREAD_INFO \
15051 { \
15052- .task = &tsk, \
15053 .exec_domain = &default_exec_domain, \
15054 .flags = 0, \
15055 .cpu = 0, \
15056@@ -57,7 +51,7 @@ struct thread_info {
15057 }, \
15058 }
15059
15060-#define init_thread_info (init_thread_union.thread_info)
15061+#define init_thread_info (init_thread_union.stack)
15062 #define init_stack (init_thread_union.stack)
15063
15064 #else /* !__ASSEMBLY__ */
15065@@ -98,6 +92,7 @@ struct thread_info {
15066 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
15067 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
15068 #define TIF_X32 30 /* 32-bit native x86-64 binary */
15069+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
15070
15071 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
15072 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
15073@@ -122,17 +117,18 @@ struct thread_info {
15074 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
15075 #define _TIF_ADDR32 (1 << TIF_ADDR32)
15076 #define _TIF_X32 (1 << TIF_X32)
15077+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
15078
15079 /* work to do in syscall_trace_enter() */
15080 #define _TIF_WORK_SYSCALL_ENTRY \
15081 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
15082 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
15083- _TIF_NOHZ)
15084+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15085
15086 /* work to do in syscall_trace_leave() */
15087 #define _TIF_WORK_SYSCALL_EXIT \
15088 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
15089- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
15090+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
15091
15092 /* work to do on interrupt/exception return */
15093 #define _TIF_WORK_MASK \
15094@@ -143,7 +139,7 @@ struct thread_info {
15095 /* work to do on any return to user space */
15096 #define _TIF_ALLWORK_MASK \
15097 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
15098- _TIF_NOHZ)
15099+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15100
15101 /* Only used for 64 bit */
15102 #define _TIF_DO_NOTIFY_MASK \
15103@@ -159,45 +155,40 @@ struct thread_info {
15104
15105 #define PREEMPT_ACTIVE 0x10000000
15106
15107-#ifdef CONFIG_X86_32
15108-
15109-#define STACK_WARN (THREAD_SIZE/8)
15110-/*
15111- * macros/functions for gaining access to the thread information structure
15112- *
15113- * preempt_count needs to be 1 initially, until the scheduler is functional.
15114- */
15115-#ifndef __ASSEMBLY__
15116-
15117-
15118-/* how to get the current stack pointer from C */
15119-register unsigned long current_stack_pointer asm("esp") __used;
15120-
15121-/* how to get the thread information struct from C */
15122-static inline struct thread_info *current_thread_info(void)
15123-{
15124- return (struct thread_info *)
15125- (current_stack_pointer & ~(THREAD_SIZE - 1));
15126-}
15127-
15128-#else /* !__ASSEMBLY__ */
15129-
15130+#ifdef __ASSEMBLY__
15131 /* how to get the thread information struct from ASM */
15132 #define GET_THREAD_INFO(reg) \
15133- movl $-THREAD_SIZE, reg; \
15134- andl %esp, reg
15135+ mov PER_CPU_VAR(current_tinfo), reg
15136
15137 /* use this one if reg already contains %esp */
15138-#define GET_THREAD_INFO_WITH_ESP(reg) \
15139- andl $-THREAD_SIZE, reg
15140+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
15141+#else
15142+/* how to get the thread information struct from C */
15143+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
15144+
15145+static __always_inline struct thread_info *current_thread_info(void)
15146+{
15147+ return this_cpu_read_stable(current_tinfo);
15148+}
15149+#endif
15150+
15151+#ifdef CONFIG_X86_32
15152+
15153+#define STACK_WARN (THREAD_SIZE/8)
15154+/*
15155+ * macros/functions for gaining access to the thread information structure
15156+ *
15157+ * preempt_count needs to be 1 initially, until the scheduler is functional.
15158+ */
15159+#ifndef __ASSEMBLY__
15160+
15161+/* how to get the current stack pointer from C */
15162+register unsigned long current_stack_pointer asm("esp") __used;
15163
15164 #endif
15165
15166 #else /* X86_32 */
15167
15168-#include <asm/percpu.h>
15169-#define KERNEL_STACK_OFFSET (5*8)
15170-
15171 /*
15172 * macros/functions for gaining access to the thread information structure
15173 * preempt_count needs to be 1 initially, until the scheduler is functional.
15174@@ -205,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
15175 #ifndef __ASSEMBLY__
15176 DECLARE_PER_CPU(unsigned long, kernel_stack);
15177
15178-static inline struct thread_info *current_thread_info(void)
15179-{
15180- struct thread_info *ti;
15181- ti = (void *)(this_cpu_read_stable(kernel_stack) +
15182- KERNEL_STACK_OFFSET - THREAD_SIZE);
15183- return ti;
15184-}
15185-
15186-#else /* !__ASSEMBLY__ */
15187-
15188-/* how to get the thread information struct from ASM */
15189-#define GET_THREAD_INFO(reg) \
15190- movq PER_CPU_VAR(kernel_stack),reg ; \
15191- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
15192-
15193-/*
15194- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
15195- * a certain register (to be used in assembler memory operands).
15196- */
15197-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
15198-
15199+/* how to get the current stack pointer from C */
15200+register unsigned long current_stack_pointer asm("rsp") __used;
15201 #endif
15202
15203 #endif /* !X86_32 */
15204@@ -286,5 +258,12 @@ static inline bool is_ia32_task(void)
15205 extern void arch_task_cache_init(void);
15206 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
15207 extern void arch_release_task_struct(struct task_struct *tsk);
15208+
15209+#define __HAVE_THREAD_FUNCTIONS
15210+#define task_thread_info(task) (&(task)->tinfo)
15211+#define task_stack_page(task) ((task)->stack)
15212+#define setup_thread_stack(p, org) do {} while (0)
15213+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
15214+
15215 #endif
15216 #endif /* _ASM_X86_THREAD_INFO_H */
15217diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
15218index 1709801..0a60f2f 100644
15219--- a/arch/x86/include/asm/uaccess.h
15220+++ b/arch/x86/include/asm/uaccess.h
15221@@ -7,6 +7,7 @@
15222 #include <linux/compiler.h>
15223 #include <linux/thread_info.h>
15224 #include <linux/string.h>
15225+#include <linux/sched.h>
15226 #include <asm/asm.h>
15227 #include <asm/page.h>
15228 #include <asm/smap.h>
15229@@ -29,7 +30,12 @@
15230
15231 #define get_ds() (KERNEL_DS)
15232 #define get_fs() (current_thread_info()->addr_limit)
15233+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15234+void __set_fs(mm_segment_t x);
15235+void set_fs(mm_segment_t x);
15236+#else
15237 #define set_fs(x) (current_thread_info()->addr_limit = (x))
15238+#endif
15239
15240 #define segment_eq(a, b) ((a).seg == (b).seg)
15241
15242@@ -77,8 +83,33 @@
15243 * checks that the pointer is in the user space range - after calling
15244 * this function, memory access functions may still return -EFAULT.
15245 */
15246-#define access_ok(type, addr, size) \
15247- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15248+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15249+#define access_ok(type, addr, size) \
15250+({ \
15251+ long __size = size; \
15252+ unsigned long __addr = (unsigned long)addr; \
15253+ unsigned long __addr_ao = __addr & PAGE_MASK; \
15254+ unsigned long __end_ao = __addr + __size - 1; \
15255+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
15256+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
15257+ while(__addr_ao <= __end_ao) { \
15258+ char __c_ao; \
15259+ __addr_ao += PAGE_SIZE; \
15260+ if (__size > PAGE_SIZE) \
15261+ cond_resched(); \
15262+ if (__get_user(__c_ao, (char __user *)__addr)) \
15263+ break; \
15264+ if (type != VERIFY_WRITE) { \
15265+ __addr = __addr_ao; \
15266+ continue; \
15267+ } \
15268+ if (__put_user(__c_ao, (char __user *)__addr)) \
15269+ break; \
15270+ __addr = __addr_ao; \
15271+ } \
15272+ } \
15273+ __ret_ao; \
15274+})
15275
15276 /*
15277 * The exception table consists of pairs of addresses relative to the
15278@@ -189,13 +220,21 @@ extern int __get_user_bad(void);
15279 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
15280 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
15281
15282-
15283+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15284+#define __copyuser_seg "gs;"
15285+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
15286+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
15287+#else
15288+#define __copyuser_seg
15289+#define __COPYUSER_SET_ES
15290+#define __COPYUSER_RESTORE_ES
15291+#endif
15292
15293 #ifdef CONFIG_X86_32
15294 #define __put_user_asm_u64(x, addr, err, errret) \
15295 asm volatile(ASM_STAC "\n" \
15296- "1: movl %%eax,0(%2)\n" \
15297- "2: movl %%edx,4(%2)\n" \
15298+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
15299+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
15300 "3: " ASM_CLAC "\n" \
15301 ".section .fixup,\"ax\"\n" \
15302 "4: movl %3,%0\n" \
15303@@ -208,8 +247,8 @@ extern int __get_user_bad(void);
15304
15305 #define __put_user_asm_ex_u64(x, addr) \
15306 asm volatile(ASM_STAC "\n" \
15307- "1: movl %%eax,0(%1)\n" \
15308- "2: movl %%edx,4(%1)\n" \
15309+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
15310+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
15311 "3: " ASM_CLAC "\n" \
15312 _ASM_EXTABLE_EX(1b, 2b) \
15313 _ASM_EXTABLE_EX(2b, 3b) \
15314@@ -259,7 +298,7 @@ extern void __put_user_8(void);
15315 __typeof__(*(ptr)) __pu_val; \
15316 __chk_user_ptr(ptr); \
15317 might_fault(); \
15318- __pu_val = x; \
15319+ __pu_val = (x); \
15320 switch (sizeof(*(ptr))) { \
15321 case 1: \
15322 __put_user_x(1, __pu_val, ptr, __ret_pu); \
15323@@ -358,7 +397,7 @@ do { \
15324
15325 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15326 asm volatile(ASM_STAC "\n" \
15327- "1: mov"itype" %2,%"rtype"1\n" \
15328+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
15329 "2: " ASM_CLAC "\n" \
15330 ".section .fixup,\"ax\"\n" \
15331 "3: mov %3,%0\n" \
15332@@ -366,7 +405,7 @@ do { \
15333 " jmp 2b\n" \
15334 ".previous\n" \
15335 _ASM_EXTABLE(1b, 3b) \
15336- : "=r" (err), ltype(x) \
15337+ : "=r" (err), ltype (x) \
15338 : "m" (__m(addr)), "i" (errret), "0" (err))
15339
15340 #define __get_user_size_ex(x, ptr, size) \
15341@@ -391,7 +430,7 @@ do { \
15342 } while (0)
15343
15344 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
15345- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
15346+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
15347 "2:\n" \
15348 _ASM_EXTABLE_EX(1b, 2b) \
15349 : ltype(x) : "m" (__m(addr)))
15350@@ -408,13 +447,24 @@ do { \
15351 int __gu_err; \
15352 unsigned long __gu_val; \
15353 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
15354- (x) = (__force __typeof__(*(ptr)))__gu_val; \
15355+ (x) = (__typeof__(*(ptr)))__gu_val; \
15356 __gu_err; \
15357 })
15358
15359 /* FIXME: this hack is definitely wrong -AK */
15360 struct __large_struct { unsigned long buf[100]; };
15361-#define __m(x) (*(struct __large_struct __user *)(x))
15362+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15363+#define ____m(x) \
15364+({ \
15365+ unsigned long ____x = (unsigned long)(x); \
15366+ if (____x < PAX_USER_SHADOW_BASE) \
15367+ ____x += PAX_USER_SHADOW_BASE; \
15368+ (void __user *)____x; \
15369+})
15370+#else
15371+#define ____m(x) (x)
15372+#endif
15373+#define __m(x) (*(struct __large_struct __user *)____m(x))
15374
15375 /*
15376 * Tell gcc we read from memory instead of writing: this is because
15377@@ -423,7 +473,7 @@ struct __large_struct { unsigned long buf[100]; };
15378 */
15379 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15380 asm volatile(ASM_STAC "\n" \
15381- "1: mov"itype" %"rtype"1,%2\n" \
15382+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
15383 "2: " ASM_CLAC "\n" \
15384 ".section .fixup,\"ax\"\n" \
15385 "3: mov %3,%0\n" \
15386@@ -431,10 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
15387 ".previous\n" \
15388 _ASM_EXTABLE(1b, 3b) \
15389 : "=r"(err) \
15390- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
15391+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
15392
15393 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
15394- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
15395+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
15396 "2:\n" \
15397 _ASM_EXTABLE_EX(1b, 2b) \
15398 : : ltype(x), "m" (__m(addr)))
15399@@ -473,8 +523,12 @@ struct __large_struct { unsigned long buf[100]; };
15400 * On error, the variable @x is set to zero.
15401 */
15402
15403+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15404+#define __get_user(x, ptr) get_user((x), (ptr))
15405+#else
15406 #define __get_user(x, ptr) \
15407 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
15408+#endif
15409
15410 /**
15411 * __put_user: - Write a simple value into user space, with less checking.
15412@@ -496,8 +550,12 @@ struct __large_struct { unsigned long buf[100]; };
15413 * Returns zero on success, or -EFAULT on error.
15414 */
15415
15416+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15417+#define __put_user(x, ptr) put_user((x), (ptr))
15418+#else
15419 #define __put_user(x, ptr) \
15420 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
15421+#endif
15422
15423 #define __get_user_unaligned __get_user
15424 #define __put_user_unaligned __put_user
15425@@ -515,7 +573,7 @@ struct __large_struct { unsigned long buf[100]; };
15426 #define get_user_ex(x, ptr) do { \
15427 unsigned long __gue_val; \
15428 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
15429- (x) = (__force __typeof__(*(ptr)))__gue_val; \
15430+ (x) = (__typeof__(*(ptr)))__gue_val; \
15431 } while (0)
15432
15433 #define put_user_try uaccess_try
15434@@ -532,8 +590,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
15435 extern __must_check long strlen_user(const char __user *str);
15436 extern __must_check long strnlen_user(const char __user *str, long n);
15437
15438-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
15439-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
15440+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15441+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15442
15443 /*
15444 * movsl can be slow when source and dest are not both 8-byte aligned
15445diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
15446index 7f760a9..04b1c65 100644
15447--- a/arch/x86/include/asm/uaccess_32.h
15448+++ b/arch/x86/include/asm/uaccess_32.h
15449@@ -11,15 +11,15 @@
15450 #include <asm/page.h>
15451
15452 unsigned long __must_check __copy_to_user_ll
15453- (void __user *to, const void *from, unsigned long n);
15454+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
15455 unsigned long __must_check __copy_from_user_ll
15456- (void *to, const void __user *from, unsigned long n);
15457+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15458 unsigned long __must_check __copy_from_user_ll_nozero
15459- (void *to, const void __user *from, unsigned long n);
15460+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15461 unsigned long __must_check __copy_from_user_ll_nocache
15462- (void *to, const void __user *from, unsigned long n);
15463+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15464 unsigned long __must_check __copy_from_user_ll_nocache_nozero
15465- (void *to, const void __user *from, unsigned long n);
15466+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15467
15468 /**
15469 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
15470@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
15471 static __always_inline unsigned long __must_check
15472 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
15473 {
15474+ if ((long)n < 0)
15475+ return n;
15476+
15477+ check_object_size(from, n, true);
15478+
15479 if (__builtin_constant_p(n)) {
15480 unsigned long ret;
15481
15482@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
15483 __copy_to_user(void __user *to, const void *from, unsigned long n)
15484 {
15485 might_fault();
15486+
15487 return __copy_to_user_inatomic(to, from, n);
15488 }
15489
15490 static __always_inline unsigned long
15491 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
15492 {
15493+ if ((long)n < 0)
15494+ return n;
15495+
15496 /* Avoid zeroing the tail if the copy fails..
15497 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
15498 * but as the zeroing behaviour is only significant when n is not
15499@@ -137,6 +146,12 @@ static __always_inline unsigned long
15500 __copy_from_user(void *to, const void __user *from, unsigned long n)
15501 {
15502 might_fault();
15503+
15504+ if ((long)n < 0)
15505+ return n;
15506+
15507+ check_object_size(to, n, false);
15508+
15509 if (__builtin_constant_p(n)) {
15510 unsigned long ret;
15511
15512@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
15513 const void __user *from, unsigned long n)
15514 {
15515 might_fault();
15516+
15517+ if ((long)n < 0)
15518+ return n;
15519+
15520 if (__builtin_constant_p(n)) {
15521 unsigned long ret;
15522
15523@@ -181,15 +200,19 @@ static __always_inline unsigned long
15524 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
15525 unsigned long n)
15526 {
15527- return __copy_from_user_ll_nocache_nozero(to, from, n);
15528+ if ((long)n < 0)
15529+ return n;
15530+
15531+ return __copy_from_user_ll_nocache_nozero(to, from, n);
15532 }
15533
15534-unsigned long __must_check copy_to_user(void __user *to,
15535- const void *from, unsigned long n);
15536-unsigned long __must_check _copy_from_user(void *to,
15537- const void __user *from,
15538- unsigned long n);
15539-
15540+extern void copy_to_user_overflow(void)
15541+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15542+ __compiletime_error("copy_to_user() buffer size is not provably correct")
15543+#else
15544+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
15545+#endif
15546+;
15547
15548 extern void copy_from_user_overflow(void)
15549 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15550@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
15551 #endif
15552 ;
15553
15554-static inline unsigned long __must_check copy_from_user(void *to,
15555- const void __user *from,
15556- unsigned long n)
15557+/**
15558+ * copy_to_user: - Copy a block of data into user space.
15559+ * @to: Destination address, in user space.
15560+ * @from: Source address, in kernel space.
15561+ * @n: Number of bytes to copy.
15562+ *
15563+ * Context: User context only. This function may sleep.
15564+ *
15565+ * Copy data from kernel space to user space.
15566+ *
15567+ * Returns number of bytes that could not be copied.
15568+ * On success, this will be zero.
15569+ */
15570+static inline unsigned long __must_check
15571+copy_to_user(void __user *to, const void *from, unsigned long n)
15572 {
15573- int sz = __compiletime_object_size(to);
15574+ size_t sz = __compiletime_object_size(from);
15575
15576- if (likely(sz == -1 || sz >= n))
15577- n = _copy_from_user(to, from, n);
15578- else
15579+ if (unlikely(sz != (size_t)-1 && sz < n))
15580+ copy_to_user_overflow();
15581+ else if (access_ok(VERIFY_WRITE, to, n))
15582+ n = __copy_to_user(to, from, n);
15583+ return n;
15584+}
15585+
15586+/**
15587+ * copy_from_user: - Copy a block of data from user space.
15588+ * @to: Destination address, in kernel space.
15589+ * @from: Source address, in user space.
15590+ * @n: Number of bytes to copy.
15591+ *
15592+ * Context: User context only. This function may sleep.
15593+ *
15594+ * Copy data from user space to kernel space.
15595+ *
15596+ * Returns number of bytes that could not be copied.
15597+ * On success, this will be zero.
15598+ *
15599+ * If some data could not be copied, this function will pad the copied
15600+ * data to the requested size using zero bytes.
15601+ */
15602+static inline unsigned long __must_check
15603+copy_from_user(void *to, const void __user *from, unsigned long n)
15604+{
15605+ size_t sz = __compiletime_object_size(to);
15606+
15607+ check_object_size(to, n, false);
15608+
15609+ if (unlikely(sz != (size_t)-1 && sz < n))
15610 copy_from_user_overflow();
15611-
15612+ else if (access_ok(VERIFY_READ, from, n))
15613+ n = __copy_from_user(to, from, n);
15614+ else if ((long)n > 0)
15615+ memset(to, 0, n);
15616 return n;
15617 }
15618
15619diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
15620index 142810c..747941a 100644
15621--- a/arch/x86/include/asm/uaccess_64.h
15622+++ b/arch/x86/include/asm/uaccess_64.h
15623@@ -10,6 +10,9 @@
15624 #include <asm/alternative.h>
15625 #include <asm/cpufeature.h>
15626 #include <asm/page.h>
15627+#include <asm/pgtable.h>
15628+
15629+#define set_fs(x) (current_thread_info()->addr_limit = (x))
15630
15631 /*
15632 * Copy To/From Userspace
15633@@ -17,13 +20,13 @@
15634
15635 /* Handles exceptions in both to and from, but doesn't do access_ok */
15636 __must_check unsigned long
15637-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
15638+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
15639 __must_check unsigned long
15640-copy_user_generic_string(void *to, const void *from, unsigned len);
15641+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
15642 __must_check unsigned long
15643-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
15644+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
15645
15646-static __always_inline __must_check unsigned long
15647+static __always_inline __must_check __size_overflow(3) unsigned long
15648 copy_user_generic(void *to, const void *from, unsigned len)
15649 {
15650 unsigned ret;
15651@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
15652 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
15653 "=d" (len)),
15654 "1" (to), "2" (from), "3" (len)
15655- : "memory", "rcx", "r8", "r9", "r10", "r11");
15656+ : "memory", "rcx", "r8", "r9", "r11");
15657 return ret;
15658 }
15659
15660+static __always_inline __must_check unsigned long
15661+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
15662+static __always_inline __must_check unsigned long
15663+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
15664 __must_check unsigned long
15665-_copy_to_user(void __user *to, const void *from, unsigned len);
15666-__must_check unsigned long
15667-_copy_from_user(void *to, const void __user *from, unsigned len);
15668-__must_check unsigned long
15669-copy_in_user(void __user *to, const void __user *from, unsigned len);
15670+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
15671+
15672+extern void copy_to_user_overflow(void)
15673+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15674+ __compiletime_error("copy_to_user() buffer size is not provably correct")
15675+#else
15676+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
15677+#endif
15678+;
15679+
15680+extern void copy_from_user_overflow(void)
15681+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15682+ __compiletime_error("copy_from_user() buffer size is not provably correct")
15683+#else
15684+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
15685+#endif
15686+;
15687
15688 static inline unsigned long __must_check copy_from_user(void *to,
15689 const void __user *from,
15690 unsigned long n)
15691 {
15692- int sz = __compiletime_object_size(to);
15693-
15694 might_fault();
15695- if (likely(sz == -1 || sz >= n))
15696- n = _copy_from_user(to, from, n);
15697-#ifdef CONFIG_DEBUG_VM
15698- else
15699- WARN(1, "Buffer overflow detected!\n");
15700-#endif
15701+
15702+ check_object_size(to, n, false);
15703+
15704+ if (access_ok(VERIFY_READ, from, n))
15705+ n = __copy_from_user(to, from, n);
15706+ else if (n < INT_MAX)
15707+ memset(to, 0, n);
15708 return n;
15709 }
15710
15711 static __always_inline __must_check
15712-int copy_to_user(void __user *dst, const void *src, unsigned size)
15713+int copy_to_user(void __user *dst, const void *src, unsigned long size)
15714 {
15715 might_fault();
15716
15717- return _copy_to_user(dst, src, size);
15718+ if (access_ok(VERIFY_WRITE, dst, size))
15719+ size = __copy_to_user(dst, src, size);
15720+ return size;
15721 }
15722
15723 static __always_inline __must_check
15724-int __copy_from_user(void *dst, const void __user *src, unsigned size)
15725+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
15726 {
15727- int ret = 0;
15728+ size_t sz = __compiletime_object_size(dst);
15729+ unsigned ret = 0;
15730
15731 might_fault();
15732+
15733+ if (size > INT_MAX)
15734+ return size;
15735+
15736+ check_object_size(dst, size, false);
15737+
15738+#ifdef CONFIG_PAX_MEMORY_UDEREF
15739+ if (!__access_ok(VERIFY_READ, src, size))
15740+ return size;
15741+#endif
15742+
15743+ if (unlikely(sz != (size_t)-1 && sz < size)) {
15744+ copy_from_user_overflow();
15745+ return size;
15746+ }
15747+
15748 if (!__builtin_constant_p(size))
15749- return copy_user_generic(dst, (__force void *)src, size);
15750+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
15751 switch (size) {
15752- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
15753+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
15754 ret, "b", "b", "=q", 1);
15755 return ret;
15756- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
15757+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
15758 ret, "w", "w", "=r", 2);
15759 return ret;
15760- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
15761+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
15762 ret, "l", "k", "=r", 4);
15763 return ret;
15764- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
15765+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
15766 ret, "q", "", "=r", 8);
15767 return ret;
15768 case 10:
15769- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
15770+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
15771 ret, "q", "", "=r", 10);
15772 if (unlikely(ret))
15773 return ret;
15774 __get_user_asm(*(u16 *)(8 + (char *)dst),
15775- (u16 __user *)(8 + (char __user *)src),
15776+ (const u16 __user *)(8 + (const char __user *)src),
15777 ret, "w", "w", "=r", 2);
15778 return ret;
15779 case 16:
15780- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
15781+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
15782 ret, "q", "", "=r", 16);
15783 if (unlikely(ret))
15784 return ret;
15785 __get_user_asm(*(u64 *)(8 + (char *)dst),
15786- (u64 __user *)(8 + (char __user *)src),
15787+ (const u64 __user *)(8 + (const char __user *)src),
15788 ret, "q", "", "=r", 8);
15789 return ret;
15790 default:
15791- return copy_user_generic(dst, (__force void *)src, size);
15792+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
15793 }
15794 }
15795
15796 static __always_inline __must_check
15797-int __copy_to_user(void __user *dst, const void *src, unsigned size)
15798+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
15799 {
15800- int ret = 0;
15801+ size_t sz = __compiletime_object_size(src);
15802+ unsigned ret = 0;
15803
15804 might_fault();
15805+
15806+ if (size > INT_MAX)
15807+ return size;
15808+
15809+ check_object_size(src, size, true);
15810+
15811+#ifdef CONFIG_PAX_MEMORY_UDEREF
15812+ if (!__access_ok(VERIFY_WRITE, dst, size))
15813+ return size;
15814+#endif
15815+
15816+ if (unlikely(sz != (size_t)-1 && sz < size)) {
15817+ copy_to_user_overflow();
15818+ return size;
15819+ }
15820+
15821 if (!__builtin_constant_p(size))
15822- return copy_user_generic((__force void *)dst, src, size);
15823+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
15824 switch (size) {
15825- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
15826+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
15827 ret, "b", "b", "iq", 1);
15828 return ret;
15829- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
15830+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
15831 ret, "w", "w", "ir", 2);
15832 return ret;
15833- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
15834+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
15835 ret, "l", "k", "ir", 4);
15836 return ret;
15837- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
15838+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
15839 ret, "q", "", "er", 8);
15840 return ret;
15841 case 10:
15842- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
15843+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
15844 ret, "q", "", "er", 10);
15845 if (unlikely(ret))
15846 return ret;
15847 asm("":::"memory");
15848- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
15849+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
15850 ret, "w", "w", "ir", 2);
15851 return ret;
15852 case 16:
15853- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
15854+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
15855 ret, "q", "", "er", 16);
15856 if (unlikely(ret))
15857 return ret;
15858 asm("":::"memory");
15859- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
15860+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
15861 ret, "q", "", "er", 8);
15862 return ret;
15863 default:
15864- return copy_user_generic((__force void *)dst, src, size);
15865+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
15866 }
15867 }
15868
15869 static __always_inline __must_check
15870-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15871+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
15872 {
15873- int ret = 0;
15874+ unsigned ret = 0;
15875
15876 might_fault();
15877+
15878+ if (size > INT_MAX)
15879+ return size;
15880+
15881+#ifdef CONFIG_PAX_MEMORY_UDEREF
15882+ if (!__access_ok(VERIFY_READ, src, size))
15883+ return size;
15884+ if (!__access_ok(VERIFY_WRITE, dst, size))
15885+ return size;
15886+#endif
15887+
15888 if (!__builtin_constant_p(size))
15889- return copy_user_generic((__force void *)dst,
15890- (__force void *)src, size);
15891+ return copy_user_generic((__force_kernel void *)____m(dst),
15892+ (__force_kernel const void *)____m(src), size);
15893 switch (size) {
15894 case 1: {
15895 u8 tmp;
15896- __get_user_asm(tmp, (u8 __user *)src,
15897+ __get_user_asm(tmp, (const u8 __user *)src,
15898 ret, "b", "b", "=q", 1);
15899 if (likely(!ret))
15900 __put_user_asm(tmp, (u8 __user *)dst,
15901@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15902 }
15903 case 2: {
15904 u16 tmp;
15905- __get_user_asm(tmp, (u16 __user *)src,
15906+ __get_user_asm(tmp, (const u16 __user *)src,
15907 ret, "w", "w", "=r", 2);
15908 if (likely(!ret))
15909 __put_user_asm(tmp, (u16 __user *)dst,
15910@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15911
15912 case 4: {
15913 u32 tmp;
15914- __get_user_asm(tmp, (u32 __user *)src,
15915+ __get_user_asm(tmp, (const u32 __user *)src,
15916 ret, "l", "k", "=r", 4);
15917 if (likely(!ret))
15918 __put_user_asm(tmp, (u32 __user *)dst,
15919@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15920 }
15921 case 8: {
15922 u64 tmp;
15923- __get_user_asm(tmp, (u64 __user *)src,
15924+ __get_user_asm(tmp, (const u64 __user *)src,
15925 ret, "q", "", "=r", 8);
15926 if (likely(!ret))
15927 __put_user_asm(tmp, (u64 __user *)dst,
15928@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15929 return ret;
15930 }
15931 default:
15932- return copy_user_generic((__force void *)dst,
15933- (__force void *)src, size);
15934+ return copy_user_generic((__force_kernel void *)____m(dst),
15935+ (__force_kernel const void *)____m(src), size);
15936 }
15937 }
15938
15939 static __must_check __always_inline int
15940-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
15941+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
15942 {
15943- return copy_user_generic(dst, (__force const void *)src, size);
15944+ if (size > INT_MAX)
15945+ return size;
15946+
15947+#ifdef CONFIG_PAX_MEMORY_UDEREF
15948+ if (!__access_ok(VERIFY_READ, src, size))
15949+ return size;
15950+#endif
15951+
15952+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
15953 }
15954
15955-static __must_check __always_inline int
15956-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
15957+static __must_check __always_inline unsigned long
15958+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
15959 {
15960- return copy_user_generic((__force void *)dst, src, size);
15961+ if (size > INT_MAX)
15962+ return size;
15963+
15964+#ifdef CONFIG_PAX_MEMORY_UDEREF
15965+ if (!__access_ok(VERIFY_WRITE, dst, size))
15966+ return size;
15967+#endif
15968+
15969+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
15970 }
15971
15972-extern long __copy_user_nocache(void *dst, const void __user *src,
15973- unsigned size, int zerorest);
15974+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
15975+ unsigned long size, int zerorest) __size_overflow(3);
15976
15977-static inline int
15978-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
15979+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
15980 {
15981 might_sleep();
15982+
15983+ if (size > INT_MAX)
15984+ return size;
15985+
15986+#ifdef CONFIG_PAX_MEMORY_UDEREF
15987+ if (!__access_ok(VERIFY_READ, src, size))
15988+ return size;
15989+#endif
15990+
15991 return __copy_user_nocache(dst, src, size, 1);
15992 }
15993
15994-static inline int
15995-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
15996- unsigned size)
15997+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
15998+ unsigned long size)
15999 {
16000+ if (size > INT_MAX)
16001+ return size;
16002+
16003+#ifdef CONFIG_PAX_MEMORY_UDEREF
16004+ if (!__access_ok(VERIFY_READ, src, size))
16005+ return size;
16006+#endif
16007+
16008 return __copy_user_nocache(dst, src, size, 0);
16009 }
16010
16011-unsigned long
16012-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
16013+extern unsigned long
16014+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
16015
16016 #endif /* _ASM_X86_UACCESS_64_H */
16017diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
16018index 5b238981..77fdd78 100644
16019--- a/arch/x86/include/asm/word-at-a-time.h
16020+++ b/arch/x86/include/asm/word-at-a-time.h
16021@@ -11,7 +11,7 @@
16022 * and shift, for example.
16023 */
16024 struct word_at_a_time {
16025- const unsigned long one_bits, high_bits;
16026+ unsigned long one_bits, high_bits;
16027 };
16028
16029 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
16030diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
16031index 5769349..a3d3e2a 100644
16032--- a/arch/x86/include/asm/x86_init.h
16033+++ b/arch/x86/include/asm/x86_init.h
16034@@ -141,7 +141,7 @@ struct x86_init_ops {
16035 struct x86_init_timers timers;
16036 struct x86_init_iommu iommu;
16037 struct x86_init_pci pci;
16038-};
16039+} __no_const;
16040
16041 /**
16042 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
16043@@ -152,7 +152,7 @@ struct x86_cpuinit_ops {
16044 void (*setup_percpu_clockev)(void);
16045 void (*early_percpu_clock_init)(void);
16046 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
16047-};
16048+} __no_const;
16049
16050 /**
16051 * struct x86_platform_ops - platform specific runtime functions
16052@@ -178,7 +178,7 @@ struct x86_platform_ops {
16053 void (*save_sched_clock_state)(void);
16054 void (*restore_sched_clock_state)(void);
16055 void (*apic_post_init)(void);
16056-};
16057+} __no_const;
16058
16059 struct pci_dev;
16060
16061@@ -187,14 +187,14 @@ struct x86_msi_ops {
16062 void (*teardown_msi_irq)(unsigned int irq);
16063 void (*teardown_msi_irqs)(struct pci_dev *dev);
16064 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
16065-};
16066+} __no_const;
16067
16068 struct x86_io_apic_ops {
16069 void (*init) (void);
16070 unsigned int (*read) (unsigned int apic, unsigned int reg);
16071 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
16072 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
16073-};
16074+} __no_const;
16075
16076 extern struct x86_init_ops x86_init;
16077 extern struct x86_cpuinit_ops x86_cpuinit;
16078diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
16079index 0415cda..b43d877 100644
16080--- a/arch/x86/include/asm/xsave.h
16081+++ b/arch/x86/include/asm/xsave.h
16082@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16083 return -EFAULT;
16084
16085 __asm__ __volatile__(ASM_STAC "\n"
16086- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
16087+ "1:"
16088+ __copyuser_seg
16089+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
16090 "2: " ASM_CLAC "\n"
16091 ".section .fixup,\"ax\"\n"
16092 "3: movl $-1,%[err]\n"
16093@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16094 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
16095 {
16096 int err;
16097- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
16098+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
16099 u32 lmask = mask;
16100 u32 hmask = mask >> 32;
16101
16102 __asm__ __volatile__(ASM_STAC "\n"
16103- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16104+ "1:"
16105+ __copyuser_seg
16106+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16107 "2: " ASM_CLAC "\n"
16108 ".section .fixup,\"ax\"\n"
16109 "3: movl $-1,%[err]\n"
16110diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
16111index bbae024..e1528f9 100644
16112--- a/arch/x86/include/uapi/asm/e820.h
16113+++ b/arch/x86/include/uapi/asm/e820.h
16114@@ -63,7 +63,7 @@ struct e820map {
16115 #define ISA_START_ADDRESS 0xa0000
16116 #define ISA_END_ADDRESS 0x100000
16117
16118-#define BIOS_BEGIN 0x000a0000
16119+#define BIOS_BEGIN 0x000c0000
16120 #define BIOS_END 0x00100000
16121
16122 #define BIOS_ROM_BASE 0xffe00000
16123diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
16124index 34e923a..0c6bb6e 100644
16125--- a/arch/x86/kernel/Makefile
16126+++ b/arch/x86/kernel/Makefile
16127@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
16128 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
16129 obj-$(CONFIG_IRQ_WORK) += irq_work.o
16130 obj-y += probe_roms.o
16131-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
16132+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
16133 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
16134 obj-y += syscall_$(BITS).o
16135 obj-$(CONFIG_X86_64) += vsyscall_64.o
16136diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
16137index bacf4b0..4ede72e 100644
16138--- a/arch/x86/kernel/acpi/boot.c
16139+++ b/arch/x86/kernel/acpi/boot.c
16140@@ -1358,7 +1358,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
16141 * If your system is blacklisted here, but you find that acpi=force
16142 * works for you, please contact linux-acpi@vger.kernel.org
16143 */
16144-static struct dmi_system_id __initdata acpi_dmi_table[] = {
16145+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
16146 /*
16147 * Boxes that need ACPI disabled
16148 */
16149@@ -1433,7 +1433,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
16150 };
16151
16152 /* second table for DMI checks that should run after early-quirks */
16153-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
16154+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
16155 /*
16156 * HP laptops which use a DSDT reporting as HP/SB400/10000,
16157 * which includes some code which overrides all temperature
16158diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
16159index d5e0d71..6533e08 100644
16160--- a/arch/x86/kernel/acpi/sleep.c
16161+++ b/arch/x86/kernel/acpi/sleep.c
16162@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
16163 #else /* CONFIG_64BIT */
16164 #ifdef CONFIG_SMP
16165 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
16166+
16167+ pax_open_kernel();
16168 early_gdt_descr.address =
16169 (unsigned long)get_cpu_gdt_table(smp_processor_id());
16170+ pax_close_kernel();
16171+
16172 initial_gs = per_cpu_offset(smp_processor_id());
16173 #endif
16174 initial_code = (unsigned long)wakeup_long64;
16175diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
16176index 13ab720..95d5442 100644
16177--- a/arch/x86/kernel/acpi/wakeup_32.S
16178+++ b/arch/x86/kernel/acpi/wakeup_32.S
16179@@ -30,13 +30,11 @@ wakeup_pmode_return:
16180 # and restore the stack ... but you need gdt for this to work
16181 movl saved_context_esp, %esp
16182
16183- movl %cs:saved_magic, %eax
16184- cmpl $0x12345678, %eax
16185+ cmpl $0x12345678, saved_magic
16186 jne bogus_magic
16187
16188 # jump to place where we left off
16189- movl saved_eip, %eax
16190- jmp *%eax
16191+ jmp *(saved_eip)
16192
16193 bogus_magic:
16194 jmp bogus_magic
16195diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
16196index ef5ccca..bd83949 100644
16197--- a/arch/x86/kernel/alternative.c
16198+++ b/arch/x86/kernel/alternative.c
16199@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
16200 */
16201 for (a = start; a < end; a++) {
16202 instr = (u8 *)&a->instr_offset + a->instr_offset;
16203+
16204+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16205+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16206+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
16207+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16208+#endif
16209+
16210 replacement = (u8 *)&a->repl_offset + a->repl_offset;
16211 BUG_ON(a->replacementlen > a->instrlen);
16212 BUG_ON(a->instrlen > sizeof(insnbuf));
16213@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
16214 for (poff = start; poff < end; poff++) {
16215 u8 *ptr = (u8 *)poff + *poff;
16216
16217+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16218+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16219+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16220+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16221+#endif
16222+
16223 if (!*poff || ptr < text || ptr >= text_end)
16224 continue;
16225 /* turn DS segment override prefix into lock prefix */
16226- if (*ptr == 0x3e)
16227+ if (*ktla_ktva(ptr) == 0x3e)
16228 text_poke(ptr, ((unsigned char []){0xf0}), 1);
16229 }
16230 mutex_unlock(&text_mutex);
16231@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
16232 for (poff = start; poff < end; poff++) {
16233 u8 *ptr = (u8 *)poff + *poff;
16234
16235+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16236+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16237+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16238+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16239+#endif
16240+
16241 if (!*poff || ptr < text || ptr >= text_end)
16242 continue;
16243 /* turn lock prefix into DS segment override prefix */
16244- if (*ptr == 0xf0)
16245+ if (*ktla_ktva(ptr) == 0xf0)
16246 text_poke(ptr, ((unsigned char []){0x3E}), 1);
16247 }
16248 mutex_unlock(&text_mutex);
16249@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
16250
16251 BUG_ON(p->len > MAX_PATCH_LEN);
16252 /* prep the buffer with the original instructions */
16253- memcpy(insnbuf, p->instr, p->len);
16254+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
16255 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
16256 (unsigned long)p->instr, p->len);
16257
16258@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
16259 if (!uniproc_patched || num_possible_cpus() == 1)
16260 free_init_pages("SMP alternatives",
16261 (unsigned long)__smp_locks,
16262- (unsigned long)__smp_locks_end);
16263+ PAGE_ALIGN((unsigned long)__smp_locks_end));
16264 #endif
16265
16266 apply_paravirt(__parainstructions, __parainstructions_end);
16267@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
16268 * instructions. And on the local CPU you need to be protected again NMI or MCE
16269 * handlers seeing an inconsistent instruction while you patch.
16270 */
16271-void *__init_or_module text_poke_early(void *addr, const void *opcode,
16272+void *__kprobes text_poke_early(void *addr, const void *opcode,
16273 size_t len)
16274 {
16275 unsigned long flags;
16276 local_irq_save(flags);
16277- memcpy(addr, opcode, len);
16278+
16279+ pax_open_kernel();
16280+ memcpy(ktla_ktva(addr), opcode, len);
16281 sync_core();
16282+ pax_close_kernel();
16283+
16284 local_irq_restore(flags);
16285 /* Could also do a CLFLUSH here to speed up CPU recovery; but
16286 that causes hangs on some VIA CPUs. */
16287@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
16288 */
16289 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
16290 {
16291- unsigned long flags;
16292- char *vaddr;
16293+ unsigned char *vaddr = ktla_ktva(addr);
16294 struct page *pages[2];
16295- int i;
16296+ size_t i;
16297
16298 if (!core_kernel_text((unsigned long)addr)) {
16299- pages[0] = vmalloc_to_page(addr);
16300- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
16301+ pages[0] = vmalloc_to_page(vaddr);
16302+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
16303 } else {
16304- pages[0] = virt_to_page(addr);
16305+ pages[0] = virt_to_page(vaddr);
16306 WARN_ON(!PageReserved(pages[0]));
16307- pages[1] = virt_to_page(addr + PAGE_SIZE);
16308+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
16309 }
16310 BUG_ON(!pages[0]);
16311- local_irq_save(flags);
16312- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
16313- if (pages[1])
16314- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
16315- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
16316- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
16317- clear_fixmap(FIX_TEXT_POKE0);
16318- if (pages[1])
16319- clear_fixmap(FIX_TEXT_POKE1);
16320- local_flush_tlb();
16321- sync_core();
16322- /* Could also do a CLFLUSH here to speed up CPU recovery; but
16323- that causes hangs on some VIA CPUs. */
16324+ text_poke_early(addr, opcode, len);
16325 for (i = 0; i < len; i++)
16326- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
16327- local_irq_restore(flags);
16328+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
16329 return addr;
16330 }
16331
16332diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
16333index cbf5121..812b537 100644
16334--- a/arch/x86/kernel/apic/apic.c
16335+++ b/arch/x86/kernel/apic/apic.c
16336@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
16337 /*
16338 * Debug level, exported for io_apic.c
16339 */
16340-unsigned int apic_verbosity;
16341+int apic_verbosity;
16342
16343 int pic_mode;
16344
16345@@ -1956,7 +1956,7 @@ void smp_error_interrupt(struct pt_regs *regs)
16346 apic_write(APIC_ESR, 0);
16347 v1 = apic_read(APIC_ESR);
16348 ack_APIC_irq();
16349- atomic_inc(&irq_err_count);
16350+ atomic_inc_unchecked(&irq_err_count);
16351
16352 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
16353 smp_processor_id(), v0 , v1);
16354diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
16355index 00c77cf..2dc6a2d 100644
16356--- a/arch/x86/kernel/apic/apic_flat_64.c
16357+++ b/arch/x86/kernel/apic/apic_flat_64.c
16358@@ -157,7 +157,7 @@ static int flat_probe(void)
16359 return 1;
16360 }
16361
16362-static struct apic apic_flat = {
16363+static struct apic apic_flat __read_only = {
16364 .name = "flat",
16365 .probe = flat_probe,
16366 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
16367@@ -271,7 +271,7 @@ static int physflat_probe(void)
16368 return 0;
16369 }
16370
16371-static struct apic apic_physflat = {
16372+static struct apic apic_physflat __read_only = {
16373
16374 .name = "physical flat",
16375 .probe = physflat_probe,
16376diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
16377index e145f28..2752888 100644
16378--- a/arch/x86/kernel/apic/apic_noop.c
16379+++ b/arch/x86/kernel/apic/apic_noop.c
16380@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
16381 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
16382 }
16383
16384-struct apic apic_noop = {
16385+struct apic apic_noop __read_only = {
16386 .name = "noop",
16387 .probe = noop_probe,
16388 .acpi_madt_oem_check = NULL,
16389diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
16390index d50e364..543bee3 100644
16391--- a/arch/x86/kernel/apic/bigsmp_32.c
16392+++ b/arch/x86/kernel/apic/bigsmp_32.c
16393@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
16394 return dmi_bigsmp;
16395 }
16396
16397-static struct apic apic_bigsmp = {
16398+static struct apic apic_bigsmp __read_only = {
16399
16400 .name = "bigsmp",
16401 .probe = probe_bigsmp,
16402diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
16403index 0874799..a7a7892 100644
16404--- a/arch/x86/kernel/apic/es7000_32.c
16405+++ b/arch/x86/kernel/apic/es7000_32.c
16406@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
16407 return ret && es7000_apic_is_cluster();
16408 }
16409
16410-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
16411-static struct apic __refdata apic_es7000_cluster = {
16412+static struct apic apic_es7000_cluster __read_only = {
16413
16414 .name = "es7000",
16415 .probe = probe_es7000,
16416@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
16417 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
16418 };
16419
16420-static struct apic __refdata apic_es7000 = {
16421+static struct apic apic_es7000 __read_only = {
16422
16423 .name = "es7000",
16424 .probe = probe_es7000,
16425diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
16426index b739d39..aebc14c 100644
16427--- a/arch/x86/kernel/apic/io_apic.c
16428+++ b/arch/x86/kernel/apic/io_apic.c
16429@@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
16430 }
16431 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
16432
16433-void lock_vector_lock(void)
16434+void lock_vector_lock(void) __acquires(vector_lock)
16435 {
16436 /* Used to the online set of cpus does not change
16437 * during assign_irq_vector.
16438@@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
16439 raw_spin_lock(&vector_lock);
16440 }
16441
16442-void unlock_vector_lock(void)
16443+void unlock_vector_lock(void) __releases(vector_lock)
16444 {
16445 raw_spin_unlock(&vector_lock);
16446 }
16447@@ -2399,7 +2399,7 @@ static void ack_apic_edge(struct irq_data *data)
16448 ack_APIC_irq();
16449 }
16450
16451-atomic_t irq_mis_count;
16452+atomic_unchecked_t irq_mis_count;
16453
16454 #ifdef CONFIG_GENERIC_PENDING_IRQ
16455 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
16456@@ -2540,7 +2540,7 @@ static void ack_apic_level(struct irq_data *data)
16457 * at the cpu.
16458 */
16459 if (!(v & (1 << (i & 0x1f)))) {
16460- atomic_inc(&irq_mis_count);
16461+ atomic_inc_unchecked(&irq_mis_count);
16462
16463 eoi_ioapic_irq(irq, cfg);
16464 }
16465@@ -2567,11 +2567,13 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
16466
16467 static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
16468 {
16469- chip->irq_print_chip = ir_print_prefix;
16470- chip->irq_ack = ir_ack_apic_edge;
16471- chip->irq_eoi = ir_ack_apic_level;
16472+ pax_open_kernel();
16473+ *(void **)&chip->irq_print_chip = ir_print_prefix;
16474+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
16475+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
16476
16477- chip->irq_set_affinity = set_remapped_irq_affinity;
16478+ *(void **)&chip->irq_set_affinity = set_remapped_irq_affinity;
16479+ pax_close_kernel();
16480 }
16481 #endif /* CONFIG_IRQ_REMAP */
16482
16483diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
16484index d661ee9..791fd33 100644
16485--- a/arch/x86/kernel/apic/numaq_32.c
16486+++ b/arch/x86/kernel/apic/numaq_32.c
16487@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
16488 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
16489 }
16490
16491-/* Use __refdata to keep false positive warning calm. */
16492-static struct apic __refdata apic_numaq = {
16493+static struct apic apic_numaq __read_only = {
16494
16495 .name = "NUMAQ",
16496 .probe = probe_numaq,
16497diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
16498index eb35ef9..f184a21 100644
16499--- a/arch/x86/kernel/apic/probe_32.c
16500+++ b/arch/x86/kernel/apic/probe_32.c
16501@@ -72,7 +72,7 @@ static int probe_default(void)
16502 return 1;
16503 }
16504
16505-static struct apic apic_default = {
16506+static struct apic apic_default __read_only = {
16507
16508 .name = "default",
16509 .probe = probe_default,
16510diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
16511index 77c95c0..434f8a4 100644
16512--- a/arch/x86/kernel/apic/summit_32.c
16513+++ b/arch/x86/kernel/apic/summit_32.c
16514@@ -486,7 +486,7 @@ void setup_summit(void)
16515 }
16516 #endif
16517
16518-static struct apic apic_summit = {
16519+static struct apic apic_summit __read_only = {
16520
16521 .name = "summit",
16522 .probe = probe_summit,
16523diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
16524index c88baa4..757aee1 100644
16525--- a/arch/x86/kernel/apic/x2apic_cluster.c
16526+++ b/arch/x86/kernel/apic/x2apic_cluster.c
16527@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
16528 return notifier_from_errno(err);
16529 }
16530
16531-static struct notifier_block __refdata x2apic_cpu_notifier = {
16532+static struct notifier_block x2apic_cpu_notifier = {
16533 .notifier_call = update_clusterinfo,
16534 };
16535
16536@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
16537 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
16538 }
16539
16540-static struct apic apic_x2apic_cluster = {
16541+static struct apic apic_x2apic_cluster __read_only = {
16542
16543 .name = "cluster x2apic",
16544 .probe = x2apic_cluster_probe,
16545diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
16546index 562a76d..a003c0f 100644
16547--- a/arch/x86/kernel/apic/x2apic_phys.c
16548+++ b/arch/x86/kernel/apic/x2apic_phys.c
16549@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
16550 return apic == &apic_x2apic_phys;
16551 }
16552
16553-static struct apic apic_x2apic_phys = {
16554+static struct apic apic_x2apic_phys __read_only = {
16555
16556 .name = "physical x2apic",
16557 .probe = x2apic_phys_probe,
16558diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
16559index 8cfade9..b9d04fc 100644
16560--- a/arch/x86/kernel/apic/x2apic_uv_x.c
16561+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
16562@@ -333,7 +333,7 @@ static int uv_probe(void)
16563 return apic == &apic_x2apic_uv_x;
16564 }
16565
16566-static struct apic __refdata apic_x2apic_uv_x = {
16567+static struct apic apic_x2apic_uv_x __read_only = {
16568
16569 .name = "UV large system",
16570 .probe = uv_probe,
16571diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
16572index d65464e..1035d31 100644
16573--- a/arch/x86/kernel/apm_32.c
16574+++ b/arch/x86/kernel/apm_32.c
16575@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
16576 * This is for buggy BIOS's that refer to (real mode) segment 0x40
16577 * even though they are called in protected mode.
16578 */
16579-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
16580+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
16581 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
16582
16583 static const char driver_version[] = "1.16ac"; /* no spaces */
16584@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
16585 BUG_ON(cpu != 0);
16586 gdt = get_cpu_gdt_table(cpu);
16587 save_desc_40 = gdt[0x40 / 8];
16588+
16589+ pax_open_kernel();
16590 gdt[0x40 / 8] = bad_bios_desc;
16591+ pax_close_kernel();
16592
16593 apm_irq_save(flags);
16594 APM_DO_SAVE_SEGS;
16595@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
16596 &call->esi);
16597 APM_DO_RESTORE_SEGS;
16598 apm_irq_restore(flags);
16599+
16600+ pax_open_kernel();
16601 gdt[0x40 / 8] = save_desc_40;
16602+ pax_close_kernel();
16603+
16604 put_cpu();
16605
16606 return call->eax & 0xff;
16607@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void *_call)
16608 BUG_ON(cpu != 0);
16609 gdt = get_cpu_gdt_table(cpu);
16610 save_desc_40 = gdt[0x40 / 8];
16611+
16612+ pax_open_kernel();
16613 gdt[0x40 / 8] = bad_bios_desc;
16614+ pax_close_kernel();
16615
16616 apm_irq_save(flags);
16617 APM_DO_SAVE_SEGS;
16618@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void *_call)
16619 &call->eax);
16620 APM_DO_RESTORE_SEGS;
16621 apm_irq_restore(flags);
16622+
16623+ pax_open_kernel();
16624 gdt[0x40 / 8] = save_desc_40;
16625+ pax_close_kernel();
16626+
16627 put_cpu();
16628 return error;
16629 }
16630@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
16631 * code to that CPU.
16632 */
16633 gdt = get_cpu_gdt_table(0);
16634+
16635+ pax_open_kernel();
16636 set_desc_base(&gdt[APM_CS >> 3],
16637 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
16638 set_desc_base(&gdt[APM_CS_16 >> 3],
16639 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
16640 set_desc_base(&gdt[APM_DS >> 3],
16641 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
16642+ pax_close_kernel();
16643
16644 proc_create("apm", 0, NULL, &apm_file_ops);
16645
16646diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
16647index 2861082..6d4718e 100644
16648--- a/arch/x86/kernel/asm-offsets.c
16649+++ b/arch/x86/kernel/asm-offsets.c
16650@@ -33,6 +33,8 @@ void common(void) {
16651 OFFSET(TI_status, thread_info, status);
16652 OFFSET(TI_addr_limit, thread_info, addr_limit);
16653 OFFSET(TI_preempt_count, thread_info, preempt_count);
16654+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
16655+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
16656
16657 BLANK();
16658 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
16659@@ -53,8 +55,26 @@ void common(void) {
16660 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
16661 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
16662 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
16663+
16664+#ifdef CONFIG_PAX_KERNEXEC
16665+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
16666 #endif
16667
16668+#ifdef CONFIG_PAX_MEMORY_UDEREF
16669+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
16670+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
16671+#ifdef CONFIG_X86_64
16672+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
16673+#endif
16674+#endif
16675+
16676+#endif
16677+
16678+ BLANK();
16679+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
16680+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
16681+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
16682+
16683 #ifdef CONFIG_XEN
16684 BLANK();
16685 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
16686diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
16687index 1b4754f..fbb4227 100644
16688--- a/arch/x86/kernel/asm-offsets_64.c
16689+++ b/arch/x86/kernel/asm-offsets_64.c
16690@@ -76,6 +76,7 @@ int main(void)
16691 BLANK();
16692 #undef ENTRY
16693
16694+ DEFINE(TSS_size, sizeof(struct tss_struct));
16695 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
16696 BLANK();
16697
16698diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
16699index a0e067d..9c7db16 100644
16700--- a/arch/x86/kernel/cpu/Makefile
16701+++ b/arch/x86/kernel/cpu/Makefile
16702@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
16703 CFLAGS_REMOVE_perf_event.o = -pg
16704 endif
16705
16706-# Make sure load_percpu_segment has no stackprotector
16707-nostackp := $(call cc-option, -fno-stack-protector)
16708-CFLAGS_common.o := $(nostackp)
16709-
16710 obj-y := intel_cacheinfo.o scattered.o topology.o
16711 obj-y += proc.o capflags.o powerflags.o common.o
16712 obj-y += vmware.o hypervisor.o mshyperv.o
16713diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
16714index 15239ff..e23e04e 100644
16715--- a/arch/x86/kernel/cpu/amd.c
16716+++ b/arch/x86/kernel/cpu/amd.c
16717@@ -733,7 +733,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
16718 unsigned int size)
16719 {
16720 /* AMD errata T13 (order #21922) */
16721- if ((c->x86 == 6)) {
16722+ if (c->x86 == 6) {
16723 /* Duron Rev A0 */
16724 if (c->x86_model == 3 && c->x86_mask == 0)
16725 size = 64;
16726diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
16727index 9c3ab43..51e6366 100644
16728--- a/arch/x86/kernel/cpu/common.c
16729+++ b/arch/x86/kernel/cpu/common.c
16730@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
16731
16732 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
16733
16734-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
16735-#ifdef CONFIG_X86_64
16736- /*
16737- * We need valid kernel segments for data and code in long mode too
16738- * IRET will check the segment types kkeil 2000/10/28
16739- * Also sysret mandates a special GDT layout
16740- *
16741- * TLS descriptors are currently at a different place compared to i386.
16742- * Hopefully nobody expects them at a fixed place (Wine?)
16743- */
16744- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
16745- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
16746- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
16747- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
16748- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
16749- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
16750-#else
16751- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
16752- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
16753- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
16754- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
16755- /*
16756- * Segments used for calling PnP BIOS have byte granularity.
16757- * They code segments and data segments have fixed 64k limits,
16758- * the transfer segment sizes are set at run time.
16759- */
16760- /* 32-bit code */
16761- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
16762- /* 16-bit code */
16763- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
16764- /* 16-bit data */
16765- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
16766- /* 16-bit data */
16767- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
16768- /* 16-bit data */
16769- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
16770- /*
16771- * The APM segments have byte granularity and their bases
16772- * are set at run time. All have 64k limits.
16773- */
16774- /* 32-bit code */
16775- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
16776- /* 16-bit code */
16777- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
16778- /* data */
16779- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
16780-
16781- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
16782- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
16783- GDT_STACK_CANARY_INIT
16784-#endif
16785-} };
16786-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
16787-
16788 static int __init x86_xsave_setup(char *s)
16789 {
16790 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
16791@@ -389,7 +335,7 @@ void switch_to_new_gdt(int cpu)
16792 {
16793 struct desc_ptr gdt_descr;
16794
16795- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
16796+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16797 gdt_descr.size = GDT_SIZE - 1;
16798 load_gdt(&gdt_descr);
16799 /* Reload the per-cpu base */
16800@@ -885,6 +831,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
16801 /* Filter out anything that depends on CPUID levels we don't have */
16802 filter_cpuid_features(c, true);
16803
16804+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16805+ setup_clear_cpu_cap(X86_FEATURE_SEP);
16806+#endif
16807+
16808 /* If the model name is still unset, do table lookup. */
16809 if (!c->x86_model_id[0]) {
16810 const char *p;
16811@@ -1068,10 +1018,12 @@ static __init int setup_disablecpuid(char *arg)
16812 }
16813 __setup("clearcpuid=", setup_disablecpuid);
16814
16815+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
16816+EXPORT_PER_CPU_SYMBOL(current_tinfo);
16817+
16818 #ifdef CONFIG_X86_64
16819 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
16820-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
16821- (unsigned long) nmi_idt_table };
16822+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
16823
16824 DEFINE_PER_CPU_FIRST(union irq_stack_union,
16825 irq_stack_union) __aligned(PAGE_SIZE);
16826@@ -1085,7 +1037,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
16827 EXPORT_PER_CPU_SYMBOL(current_task);
16828
16829 DEFINE_PER_CPU(unsigned long, kernel_stack) =
16830- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
16831+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
16832 EXPORT_PER_CPU_SYMBOL(kernel_stack);
16833
16834 DEFINE_PER_CPU(char *, irq_stack_ptr) =
16835@@ -1224,7 +1176,7 @@ void __cpuinit cpu_init(void)
16836 int i;
16837
16838 cpu = stack_smp_processor_id();
16839- t = &per_cpu(init_tss, cpu);
16840+ t = init_tss + cpu;
16841 oist = &per_cpu(orig_ist, cpu);
16842
16843 #ifdef CONFIG_NUMA
16844@@ -1250,7 +1202,7 @@ void __cpuinit cpu_init(void)
16845 switch_to_new_gdt(cpu);
16846 loadsegment(fs, 0);
16847
16848- load_idt((const struct desc_ptr *)&idt_descr);
16849+ load_idt(&idt_descr);
16850
16851 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
16852 syscall_init();
16853@@ -1259,7 +1211,6 @@ void __cpuinit cpu_init(void)
16854 wrmsrl(MSR_KERNEL_GS_BASE, 0);
16855 barrier();
16856
16857- x86_configure_nx();
16858 enable_x2apic();
16859
16860 /*
16861@@ -1311,7 +1262,7 @@ void __cpuinit cpu_init(void)
16862 {
16863 int cpu = smp_processor_id();
16864 struct task_struct *curr = current;
16865- struct tss_struct *t = &per_cpu(init_tss, cpu);
16866+ struct tss_struct *t = init_tss + cpu;
16867 struct thread_struct *thread = &curr->thread;
16868
16869 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
16870diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
16871index fcaabd0..7b55a26 100644
16872--- a/arch/x86/kernel/cpu/intel.c
16873+++ b/arch/x86/kernel/cpu/intel.c
16874@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
16875 * Update the IDT descriptor and reload the IDT so that
16876 * it uses the read-only mapped virtual address.
16877 */
16878- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
16879+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
16880 load_idt(&idt_descr);
16881 }
16882 #endif
16883diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
16884index 84c1309..39b7224 100644
16885--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
16886+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
16887@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
16888 };
16889
16890 #ifdef CONFIG_AMD_NB
16891+static struct attribute *default_attrs_amd_nb[] = {
16892+ &type.attr,
16893+ &level.attr,
16894+ &coherency_line_size.attr,
16895+ &physical_line_partition.attr,
16896+ &ways_of_associativity.attr,
16897+ &number_of_sets.attr,
16898+ &size.attr,
16899+ &shared_cpu_map.attr,
16900+ &shared_cpu_list.attr,
16901+ NULL,
16902+ NULL,
16903+ NULL,
16904+ NULL
16905+};
16906+
16907 static struct attribute ** __cpuinit amd_l3_attrs(void)
16908 {
16909 static struct attribute **attrs;
16910@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
16911
16912 n = ARRAY_SIZE(default_attrs);
16913
16914- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
16915- n += 2;
16916-
16917- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
16918- n += 1;
16919-
16920- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
16921- if (attrs == NULL)
16922- return attrs = default_attrs;
16923-
16924- for (n = 0; default_attrs[n]; n++)
16925- attrs[n] = default_attrs[n];
16926+ attrs = default_attrs_amd_nb;
16927
16928 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
16929 attrs[n++] = &cache_disable_0.attr;
16930@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
16931 .default_attrs = default_attrs,
16932 };
16933
16934+#ifdef CONFIG_AMD_NB
16935+static struct kobj_type ktype_cache_amd_nb = {
16936+ .sysfs_ops = &sysfs_ops,
16937+ .default_attrs = default_attrs_amd_nb,
16938+};
16939+#endif
16940+
16941 static struct kobj_type ktype_percpu_entry = {
16942 .sysfs_ops = &sysfs_ops,
16943 };
16944@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
16945 return retval;
16946 }
16947
16948+#ifdef CONFIG_AMD_NB
16949+ amd_l3_attrs();
16950+#endif
16951+
16952 for (i = 0; i < num_cache_leaves; i++) {
16953+ struct kobj_type *ktype;
16954+
16955 this_object = INDEX_KOBJECT_PTR(cpu, i);
16956 this_object->cpu = cpu;
16957 this_object->index = i;
16958
16959 this_leaf = CPUID4_INFO_IDX(cpu, i);
16960
16961- ktype_cache.default_attrs = default_attrs;
16962+ ktype = &ktype_cache;
16963 #ifdef CONFIG_AMD_NB
16964 if (this_leaf->base.nb)
16965- ktype_cache.default_attrs = amd_l3_attrs();
16966+ ktype = &ktype_cache_amd_nb;
16967 #endif
16968 retval = kobject_init_and_add(&(this_object->kobj),
16969- &ktype_cache,
16970+ ktype,
16971 per_cpu(ici_cache_kobject, cpu),
16972 "index%1lu", i);
16973 if (unlikely(retval)) {
16974@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
16975 return NOTIFY_OK;
16976 }
16977
16978-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
16979+static struct notifier_block cacheinfo_cpu_notifier = {
16980 .notifier_call = cacheinfo_cpu_callback,
16981 };
16982
16983diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
16984index 80dbda8..be16652 100644
16985--- a/arch/x86/kernel/cpu/mcheck/mce.c
16986+++ b/arch/x86/kernel/cpu/mcheck/mce.c
16987@@ -45,6 +45,7 @@
16988 #include <asm/processor.h>
16989 #include <asm/mce.h>
16990 #include <asm/msr.h>
16991+#include <asm/local.h>
16992
16993 #include "mce-internal.h"
16994
16995@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
16996 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
16997 m->cs, m->ip);
16998
16999- if (m->cs == __KERNEL_CS)
17000+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
17001 print_symbol("{%s}", m->ip);
17002 pr_cont("\n");
17003 }
17004@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
17005
17006 #define PANIC_TIMEOUT 5 /* 5 seconds */
17007
17008-static atomic_t mce_paniced;
17009+static atomic_unchecked_t mce_paniced;
17010
17011 static int fake_panic;
17012-static atomic_t mce_fake_paniced;
17013+static atomic_unchecked_t mce_fake_paniced;
17014
17015 /* Panic in progress. Enable interrupts and wait for final IPI */
17016 static void wait_for_panic(void)
17017@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17018 /*
17019 * Make sure only one CPU runs in machine check panic
17020 */
17021- if (atomic_inc_return(&mce_paniced) > 1)
17022+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
17023 wait_for_panic();
17024 barrier();
17025
17026@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17027 console_verbose();
17028 } else {
17029 /* Don't log too much for fake panic */
17030- if (atomic_inc_return(&mce_fake_paniced) > 1)
17031+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
17032 return;
17033 }
17034 /* First print corrected ones that are still unlogged */
17035@@ -686,7 +687,7 @@ static int mce_timed_out(u64 *t)
17036 * might have been modified by someone else.
17037 */
17038 rmb();
17039- if (atomic_read(&mce_paniced))
17040+ if (atomic_read_unchecked(&mce_paniced))
17041 wait_for_panic();
17042 if (!mca_cfg.monarch_timeout)
17043 goto out;
17044@@ -1662,7 +1663,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
17045 }
17046
17047 /* Call the installed machine check handler for this CPU setup. */
17048-void (*machine_check_vector)(struct pt_regs *, long error_code) =
17049+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
17050 unexpected_machine_check;
17051
17052 /*
17053@@ -1685,7 +1686,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17054 return;
17055 }
17056
17057+ pax_open_kernel();
17058 machine_check_vector = do_machine_check;
17059+ pax_close_kernel();
17060
17061 __mcheck_cpu_init_generic();
17062 __mcheck_cpu_init_vendor(c);
17063@@ -1699,7 +1702,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17064 */
17065
17066 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
17067-static int mce_chrdev_open_count; /* #times opened */
17068+static local_t mce_chrdev_open_count; /* #times opened */
17069 static int mce_chrdev_open_exclu; /* already open exclusive? */
17070
17071 static int mce_chrdev_open(struct inode *inode, struct file *file)
17072@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17073 spin_lock(&mce_chrdev_state_lock);
17074
17075 if (mce_chrdev_open_exclu ||
17076- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
17077+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
17078 spin_unlock(&mce_chrdev_state_lock);
17079
17080 return -EBUSY;
17081@@ -1715,7 +1718,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17082
17083 if (file->f_flags & O_EXCL)
17084 mce_chrdev_open_exclu = 1;
17085- mce_chrdev_open_count++;
17086+ local_inc(&mce_chrdev_open_count);
17087
17088 spin_unlock(&mce_chrdev_state_lock);
17089
17090@@ -1726,7 +1729,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
17091 {
17092 spin_lock(&mce_chrdev_state_lock);
17093
17094- mce_chrdev_open_count--;
17095+ local_dec(&mce_chrdev_open_count);
17096 mce_chrdev_open_exclu = 0;
17097
17098 spin_unlock(&mce_chrdev_state_lock);
17099@@ -2372,7 +2375,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
17100 return NOTIFY_OK;
17101 }
17102
17103-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
17104+static struct notifier_block mce_cpu_notifier = {
17105 .notifier_call = mce_cpu_callback,
17106 };
17107
17108@@ -2382,7 +2385,7 @@ static __init void mce_init_banks(void)
17109
17110 for (i = 0; i < mca_cfg.banks; i++) {
17111 struct mce_bank *b = &mce_banks[i];
17112- struct device_attribute *a = &b->attr;
17113+ device_attribute_no_const *a = &b->attr;
17114
17115 sysfs_attr_init(&a->attr);
17116 a->attr.name = b->attrname;
17117@@ -2450,7 +2453,7 @@ struct dentry *mce_get_debugfs_dir(void)
17118 static void mce_reset(void)
17119 {
17120 cpu_missing = 0;
17121- atomic_set(&mce_fake_paniced, 0);
17122+ atomic_set_unchecked(&mce_fake_paniced, 0);
17123 atomic_set(&mce_executing, 0);
17124 atomic_set(&mce_callin, 0);
17125 atomic_set(&global_nwo, 0);
17126diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
17127index 2d5454c..51987eb 100644
17128--- a/arch/x86/kernel/cpu/mcheck/p5.c
17129+++ b/arch/x86/kernel/cpu/mcheck/p5.c
17130@@ -11,6 +11,7 @@
17131 #include <asm/processor.h>
17132 #include <asm/mce.h>
17133 #include <asm/msr.h>
17134+#include <asm/pgtable.h>
17135
17136 /* By default disabled */
17137 int mce_p5_enabled __read_mostly;
17138@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
17139 if (!cpu_has(c, X86_FEATURE_MCE))
17140 return;
17141
17142+ pax_open_kernel();
17143 machine_check_vector = pentium_machine_check;
17144+ pax_close_kernel();
17145 /* Make sure the vector pointer is visible before we enable MCEs: */
17146 wmb();
17147
17148diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17149index 47a1870..8c019a7 100644
17150--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
17151+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17152@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
17153 return notifier_from_errno(err);
17154 }
17155
17156-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
17157+static struct notifier_block thermal_throttle_cpu_notifier =
17158 {
17159 .notifier_call = thermal_throttle_cpu_callback,
17160 };
17161diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
17162index 2d7998f..17c9de1 100644
17163--- a/arch/x86/kernel/cpu/mcheck/winchip.c
17164+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
17165@@ -10,6 +10,7 @@
17166 #include <asm/processor.h>
17167 #include <asm/mce.h>
17168 #include <asm/msr.h>
17169+#include <asm/pgtable.h>
17170
17171 /* Machine check handler for WinChip C6: */
17172 static void winchip_machine_check(struct pt_regs *regs, long error_code)
17173@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
17174 {
17175 u32 lo, hi;
17176
17177+ pax_open_kernel();
17178 machine_check_vector = winchip_machine_check;
17179+ pax_close_kernel();
17180 /* Make sure the vector pointer is visible before we enable MCEs: */
17181 wmb();
17182
17183diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
17184index 726bf96..81f0526 100644
17185--- a/arch/x86/kernel/cpu/mtrr/main.c
17186+++ b/arch/x86/kernel/cpu/mtrr/main.c
17187@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
17188 u64 size_or_mask, size_and_mask;
17189 static bool mtrr_aps_delayed_init;
17190
17191-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
17192+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
17193
17194 const struct mtrr_ops *mtrr_if;
17195
17196diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
17197index df5e41f..816c719 100644
17198--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
17199+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
17200@@ -25,7 +25,7 @@ struct mtrr_ops {
17201 int (*validate_add_page)(unsigned long base, unsigned long size,
17202 unsigned int type);
17203 int (*have_wrcomb)(void);
17204-};
17205+} __do_const;
17206
17207 extern int generic_get_free_region(unsigned long base, unsigned long size,
17208 int replace_reg);
17209diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
17210index 6774c17..72c1b22 100644
17211--- a/arch/x86/kernel/cpu/perf_event.c
17212+++ b/arch/x86/kernel/cpu/perf_event.c
17213@@ -1305,7 +1305,7 @@ static void __init pmu_check_apic(void)
17214 pr_info("no hardware sampling interrupt available.\n");
17215 }
17216
17217-static struct attribute_group x86_pmu_format_group = {
17218+static attribute_group_no_const x86_pmu_format_group = {
17219 .name = "format",
17220 .attrs = NULL,
17221 };
17222@@ -1313,7 +1313,7 @@ static struct attribute_group x86_pmu_format_group = {
17223 struct perf_pmu_events_attr {
17224 struct device_attribute attr;
17225 u64 id;
17226-};
17227+} __do_const;
17228
17229 /*
17230 * Remove all undefined events (x86_pmu.event_map(id) == 0)
17231@@ -1381,7 +1381,7 @@ static struct attribute *events_attr[] = {
17232 NULL,
17233 };
17234
17235-static struct attribute_group x86_pmu_events_group = {
17236+static attribute_group_no_const x86_pmu_events_group = {
17237 .name = "events",
17238 .attrs = events_attr,
17239 };
17240@@ -1880,7 +1880,7 @@ static unsigned long get_segment_base(unsigned int segment)
17241 if (idx > GDT_ENTRIES)
17242 return 0;
17243
17244- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
17245+ desc = get_cpu_gdt_table(smp_processor_id());
17246 }
17247
17248 return get_desc_base(desc + idx);
17249@@ -1970,7 +1970,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
17250 break;
17251
17252 perf_callchain_store(entry, frame.return_address);
17253- fp = frame.next_frame;
17254+ fp = (const void __force_user *)frame.next_frame;
17255 }
17256 }
17257
17258diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
17259index 4914e94..60b06e3 100644
17260--- a/arch/x86/kernel/cpu/perf_event_intel.c
17261+++ b/arch/x86/kernel/cpu/perf_event_intel.c
17262@@ -1958,10 +1958,10 @@ __init int intel_pmu_init(void)
17263 * v2 and above have a perf capabilities MSR
17264 */
17265 if (version > 1) {
17266- u64 capabilities;
17267+ u64 capabilities = x86_pmu.intel_cap.capabilities;
17268
17269- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
17270- x86_pmu.intel_cap.capabilities = capabilities;
17271+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
17272+ x86_pmu.intel_cap.capabilities = capabilities;
17273 }
17274
17275 intel_ds_init();
17276diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17277index b43200d..7fdcdbb 100644
17278--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17279+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17280@@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
17281 static int __init uncore_type_init(struct intel_uncore_type *type)
17282 {
17283 struct intel_uncore_pmu *pmus;
17284- struct attribute_group *events_group;
17285+ attribute_group_no_const *events_group;
17286 struct attribute **attrs;
17287 int i, j;
17288
17289@@ -2826,7 +2826,7 @@ static int
17290 return NOTIFY_OK;
17291 }
17292
17293-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
17294+static struct notifier_block uncore_cpu_nb = {
17295 .notifier_call = uncore_cpu_notifier,
17296 /*
17297 * to migrate uncore events, our notifier should be executed
17298diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17299index e68a455..975a932 100644
17300--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17301+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17302@@ -428,7 +428,7 @@ struct intel_uncore_box {
17303 struct uncore_event_desc {
17304 struct kobj_attribute attr;
17305 const char *config;
17306-};
17307+} __do_const;
17308
17309 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
17310 { \
17311diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
17312index 60c7891..9e911d3 100644
17313--- a/arch/x86/kernel/cpuid.c
17314+++ b/arch/x86/kernel/cpuid.c
17315@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
17316 return notifier_from_errno(err);
17317 }
17318
17319-static struct notifier_block __refdata cpuid_class_cpu_notifier =
17320+static struct notifier_block cpuid_class_cpu_notifier =
17321 {
17322 .notifier_call = cpuid_class_cpu_callback,
17323 };
17324diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
17325index 74467fe..18793d5 100644
17326--- a/arch/x86/kernel/crash.c
17327+++ b/arch/x86/kernel/crash.c
17328@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
17329 {
17330 #ifdef CONFIG_X86_32
17331 struct pt_regs fixed_regs;
17332-#endif
17333
17334-#ifdef CONFIG_X86_32
17335- if (!user_mode_vm(regs)) {
17336+ if (!user_mode(regs)) {
17337 crash_fixup_ss_esp(&fixed_regs, regs);
17338 regs = &fixed_regs;
17339 }
17340diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
17341index 37250fe..bf2ec74 100644
17342--- a/arch/x86/kernel/doublefault_32.c
17343+++ b/arch/x86/kernel/doublefault_32.c
17344@@ -11,7 +11,7 @@
17345
17346 #define DOUBLEFAULT_STACKSIZE (1024)
17347 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
17348-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
17349+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
17350
17351 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
17352
17353@@ -21,7 +21,7 @@ static void doublefault_fn(void)
17354 unsigned long gdt, tss;
17355
17356 store_gdt(&gdt_desc);
17357- gdt = gdt_desc.address;
17358+ gdt = (unsigned long)gdt_desc.address;
17359
17360 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
17361
17362@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
17363 /* 0x2 bit is always set */
17364 .flags = X86_EFLAGS_SF | 0x2,
17365 .sp = STACK_START,
17366- .es = __USER_DS,
17367+ .es = __KERNEL_DS,
17368 .cs = __KERNEL_CS,
17369 .ss = __KERNEL_DS,
17370- .ds = __USER_DS,
17371+ .ds = __KERNEL_DS,
17372 .fs = __KERNEL_PERCPU,
17373
17374 .__cr3 = __pa_nodebug(swapper_pg_dir),
17375diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
17376index ae42418b..787c16b 100644
17377--- a/arch/x86/kernel/dumpstack.c
17378+++ b/arch/x86/kernel/dumpstack.c
17379@@ -2,6 +2,9 @@
17380 * Copyright (C) 1991, 1992 Linus Torvalds
17381 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
17382 */
17383+#ifdef CONFIG_GRKERNSEC_HIDESYM
17384+#define __INCLUDED_BY_HIDESYM 1
17385+#endif
17386 #include <linux/kallsyms.h>
17387 #include <linux/kprobes.h>
17388 #include <linux/uaccess.h>
17389@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
17390 static void
17391 print_ftrace_graph_addr(unsigned long addr, void *data,
17392 const struct stacktrace_ops *ops,
17393- struct thread_info *tinfo, int *graph)
17394+ struct task_struct *task, int *graph)
17395 {
17396- struct task_struct *task;
17397 unsigned long ret_addr;
17398 int index;
17399
17400 if (addr != (unsigned long)return_to_handler)
17401 return;
17402
17403- task = tinfo->task;
17404 index = task->curr_ret_stack;
17405
17406 if (!task->ret_stack || index < *graph)
17407@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17408 static inline void
17409 print_ftrace_graph_addr(unsigned long addr, void *data,
17410 const struct stacktrace_ops *ops,
17411- struct thread_info *tinfo, int *graph)
17412+ struct task_struct *task, int *graph)
17413 { }
17414 #endif
17415
17416@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17417 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
17418 */
17419
17420-static inline int valid_stack_ptr(struct thread_info *tinfo,
17421- void *p, unsigned int size, void *end)
17422+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
17423 {
17424- void *t = tinfo;
17425 if (end) {
17426 if (p < end && p >= (end-THREAD_SIZE))
17427 return 1;
17428@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
17429 }
17430
17431 unsigned long
17432-print_context_stack(struct thread_info *tinfo,
17433+print_context_stack(struct task_struct *task, void *stack_start,
17434 unsigned long *stack, unsigned long bp,
17435 const struct stacktrace_ops *ops, void *data,
17436 unsigned long *end, int *graph)
17437 {
17438 struct stack_frame *frame = (struct stack_frame *)bp;
17439
17440- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
17441+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
17442 unsigned long addr;
17443
17444 addr = *stack;
17445@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
17446 } else {
17447 ops->address(data, addr, 0);
17448 }
17449- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17450+ print_ftrace_graph_addr(addr, data, ops, task, graph);
17451 }
17452 stack++;
17453 }
17454@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
17455 EXPORT_SYMBOL_GPL(print_context_stack);
17456
17457 unsigned long
17458-print_context_stack_bp(struct thread_info *tinfo,
17459+print_context_stack_bp(struct task_struct *task, void *stack_start,
17460 unsigned long *stack, unsigned long bp,
17461 const struct stacktrace_ops *ops, void *data,
17462 unsigned long *end, int *graph)
17463@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17464 struct stack_frame *frame = (struct stack_frame *)bp;
17465 unsigned long *ret_addr = &frame->return_address;
17466
17467- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
17468+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
17469 unsigned long addr = *ret_addr;
17470
17471 if (!__kernel_text_address(addr))
17472@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17473 ops->address(data, addr, 1);
17474 frame = frame->next_frame;
17475 ret_addr = &frame->return_address;
17476- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17477+ print_ftrace_graph_addr(addr, data, ops, task, graph);
17478 }
17479
17480 return (unsigned long)frame;
17481@@ -189,7 +188,7 @@ void dump_stack(void)
17482
17483 bp = stack_frame(current, NULL);
17484 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
17485- current->pid, current->comm, print_tainted(),
17486+ task_pid_nr(current), current->comm, print_tainted(),
17487 init_utsname()->release,
17488 (int)strcspn(init_utsname()->version, " "),
17489 init_utsname()->version);
17490@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
17491 }
17492 EXPORT_SYMBOL_GPL(oops_begin);
17493
17494+extern void gr_handle_kernel_exploit(void);
17495+
17496 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17497 {
17498 if (regs && kexec_should_crash(current))
17499@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17500 panic("Fatal exception in interrupt");
17501 if (panic_on_oops)
17502 panic("Fatal exception");
17503- do_exit(signr);
17504+
17505+ gr_handle_kernel_exploit();
17506+
17507+ do_group_exit(signr);
17508 }
17509
17510 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17511@@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17512 print_modules();
17513 show_regs(regs);
17514 #ifdef CONFIG_X86_32
17515- if (user_mode_vm(regs)) {
17516+ if (user_mode(regs)) {
17517 sp = regs->sp;
17518 ss = regs->ss & 0xffff;
17519 } else {
17520@@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
17521 unsigned long flags = oops_begin();
17522 int sig = SIGSEGV;
17523
17524- if (!user_mode_vm(regs))
17525+ if (!user_mode(regs))
17526 report_bug(regs->ip, regs);
17527
17528 if (__die(str, regs, err))
17529diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
17530index 1038a41..db2c12b 100644
17531--- a/arch/x86/kernel/dumpstack_32.c
17532+++ b/arch/x86/kernel/dumpstack_32.c
17533@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17534 bp = stack_frame(task, regs);
17535
17536 for (;;) {
17537- struct thread_info *context;
17538+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
17539
17540- context = (struct thread_info *)
17541- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
17542- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
17543+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
17544
17545- stack = (unsigned long *)context->previous_esp;
17546- if (!stack)
17547+ if (stack_start == task_stack_page(task))
17548 break;
17549+ stack = *(unsigned long **)stack_start;
17550 if (ops->stack(data, "IRQ") < 0)
17551 break;
17552 touch_nmi_watchdog();
17553@@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
17554 {
17555 int i;
17556
17557- __show_regs(regs, !user_mode_vm(regs));
17558+ __show_regs(regs, !user_mode(regs));
17559
17560 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
17561 TASK_COMM_LEN, current->comm, task_pid_nr(current),
17562@@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
17563 * When in-kernel, we also print out the stack and code at the
17564 * time of the fault..
17565 */
17566- if (!user_mode_vm(regs)) {
17567+ if (!user_mode(regs)) {
17568 unsigned int code_prologue = code_bytes * 43 / 64;
17569 unsigned int code_len = code_bytes;
17570 unsigned char c;
17571 u8 *ip;
17572+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
17573
17574 pr_emerg("Stack:\n");
17575 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
17576
17577 pr_emerg("Code:");
17578
17579- ip = (u8 *)regs->ip - code_prologue;
17580+ ip = (u8 *)regs->ip - code_prologue + cs_base;
17581 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
17582 /* try starting at IP */
17583- ip = (u8 *)regs->ip;
17584+ ip = (u8 *)regs->ip + cs_base;
17585 code_len = code_len - code_prologue + 1;
17586 }
17587 for (i = 0; i < code_len; i++, ip++) {
17588@@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
17589 pr_cont(" Bad EIP value.");
17590 break;
17591 }
17592- if (ip == (u8 *)regs->ip)
17593+ if (ip == (u8 *)regs->ip + cs_base)
17594 pr_cont(" <%02x>", c);
17595 else
17596 pr_cont(" %02x", c);
17597@@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
17598 {
17599 unsigned short ud2;
17600
17601+ ip = ktla_ktva(ip);
17602 if (ip < PAGE_OFFSET)
17603 return 0;
17604 if (probe_kernel_address((unsigned short *)ip, ud2))
17605@@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
17606
17607 return ud2 == 0x0b0f;
17608 }
17609+
17610+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17611+void pax_check_alloca(unsigned long size)
17612+{
17613+ unsigned long sp = (unsigned long)&sp, stack_left;
17614+
17615+ /* all kernel stacks are of the same size */
17616+ stack_left = sp & (THREAD_SIZE - 1);
17617+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
17618+}
17619+EXPORT_SYMBOL(pax_check_alloca);
17620+#endif
17621diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
17622index b653675..51cc8c0 100644
17623--- a/arch/x86/kernel/dumpstack_64.c
17624+++ b/arch/x86/kernel/dumpstack_64.c
17625@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17626 unsigned long *irq_stack_end =
17627 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
17628 unsigned used = 0;
17629- struct thread_info *tinfo;
17630 int graph = 0;
17631 unsigned long dummy;
17632+ void *stack_start;
17633
17634 if (!task)
17635 task = current;
17636@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17637 * current stack address. If the stacks consist of nested
17638 * exceptions
17639 */
17640- tinfo = task_thread_info(task);
17641 for (;;) {
17642 char *id;
17643 unsigned long *estack_end;
17644+
17645 estack_end = in_exception_stack(cpu, (unsigned long)stack,
17646 &used, &id);
17647
17648@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17649 if (ops->stack(data, id) < 0)
17650 break;
17651
17652- bp = ops->walk_stack(tinfo, stack, bp, ops,
17653+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
17654 data, estack_end, &graph);
17655 ops->stack(data, "<EOE>");
17656 /*
17657@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17658 * second-to-last pointer (index -2 to end) in the
17659 * exception stack:
17660 */
17661+ if ((u16)estack_end[-1] != __KERNEL_DS)
17662+ goto out;
17663 stack = (unsigned long *) estack_end[-2];
17664 continue;
17665 }
17666@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17667 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
17668 if (ops->stack(data, "IRQ") < 0)
17669 break;
17670- bp = ops->walk_stack(tinfo, stack, bp,
17671+ bp = ops->walk_stack(task, irq_stack, stack, bp,
17672 ops, data, irq_stack_end, &graph);
17673 /*
17674 * We link to the next stack (which would be
17675@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17676 /*
17677 * This handles the process stack:
17678 */
17679- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
17680+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
17681+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
17682+out:
17683 put_cpu();
17684 }
17685 EXPORT_SYMBOL(dump_trace);
17686@@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
17687 {
17688 int i;
17689 unsigned long sp;
17690- const int cpu = smp_processor_id();
17691+ const int cpu = raw_smp_processor_id();
17692 struct task_struct *cur = current;
17693
17694 sp = regs->sp;
17695@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
17696
17697 return ud2 == 0x0b0f;
17698 }
17699+
17700+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17701+void pax_check_alloca(unsigned long size)
17702+{
17703+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
17704+ unsigned cpu, used;
17705+ char *id;
17706+
17707+ /* check the process stack first */
17708+ stack_start = (unsigned long)task_stack_page(current);
17709+ stack_end = stack_start + THREAD_SIZE;
17710+ if (likely(stack_start <= sp && sp < stack_end)) {
17711+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
17712+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
17713+ return;
17714+ }
17715+
17716+ cpu = get_cpu();
17717+
17718+ /* check the irq stacks */
17719+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
17720+ stack_start = stack_end - IRQ_STACK_SIZE;
17721+ if (stack_start <= sp && sp < stack_end) {
17722+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
17723+ put_cpu();
17724+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
17725+ return;
17726+ }
17727+
17728+ /* check the exception stacks */
17729+ used = 0;
17730+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
17731+ stack_start = stack_end - EXCEPTION_STKSZ;
17732+ if (stack_end && stack_start <= sp && sp < stack_end) {
17733+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
17734+ put_cpu();
17735+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
17736+ return;
17737+ }
17738+
17739+ put_cpu();
17740+
17741+ /* unknown stack */
17742+ BUG();
17743+}
17744+EXPORT_SYMBOL(pax_check_alloca);
17745+#endif
17746diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
17747index 9b9f18b..9fcaa04 100644
17748--- a/arch/x86/kernel/early_printk.c
17749+++ b/arch/x86/kernel/early_printk.c
17750@@ -7,6 +7,7 @@
17751 #include <linux/pci_regs.h>
17752 #include <linux/pci_ids.h>
17753 #include <linux/errno.h>
17754+#include <linux/sched.h>
17755 #include <asm/io.h>
17756 #include <asm/processor.h>
17757 #include <asm/fcntl.h>
17758diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
17759index 6ed91d9..6cc365b 100644
17760--- a/arch/x86/kernel/entry_32.S
17761+++ b/arch/x86/kernel/entry_32.S
17762@@ -177,13 +177,153 @@
17763 /*CFI_REL_OFFSET gs, PT_GS*/
17764 .endm
17765 .macro SET_KERNEL_GS reg
17766+
17767+#ifdef CONFIG_CC_STACKPROTECTOR
17768 movl $(__KERNEL_STACK_CANARY), \reg
17769+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17770+ movl $(__USER_DS), \reg
17771+#else
17772+ xorl \reg, \reg
17773+#endif
17774+
17775 movl \reg, %gs
17776 .endm
17777
17778 #endif /* CONFIG_X86_32_LAZY_GS */
17779
17780-.macro SAVE_ALL
17781+.macro pax_enter_kernel
17782+#ifdef CONFIG_PAX_KERNEXEC
17783+ call pax_enter_kernel
17784+#endif
17785+.endm
17786+
17787+.macro pax_exit_kernel
17788+#ifdef CONFIG_PAX_KERNEXEC
17789+ call pax_exit_kernel
17790+#endif
17791+.endm
17792+
17793+#ifdef CONFIG_PAX_KERNEXEC
17794+ENTRY(pax_enter_kernel)
17795+#ifdef CONFIG_PARAVIRT
17796+ pushl %eax
17797+ pushl %ecx
17798+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
17799+ mov %eax, %esi
17800+#else
17801+ mov %cr0, %esi
17802+#endif
17803+ bts $16, %esi
17804+ jnc 1f
17805+ mov %cs, %esi
17806+ cmp $__KERNEL_CS, %esi
17807+ jz 3f
17808+ ljmp $__KERNEL_CS, $3f
17809+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
17810+2:
17811+#ifdef CONFIG_PARAVIRT
17812+ mov %esi, %eax
17813+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17814+#else
17815+ mov %esi, %cr0
17816+#endif
17817+3:
17818+#ifdef CONFIG_PARAVIRT
17819+ popl %ecx
17820+ popl %eax
17821+#endif
17822+ ret
17823+ENDPROC(pax_enter_kernel)
17824+
17825+ENTRY(pax_exit_kernel)
17826+#ifdef CONFIG_PARAVIRT
17827+ pushl %eax
17828+ pushl %ecx
17829+#endif
17830+ mov %cs, %esi
17831+ cmp $__KERNEXEC_KERNEL_CS, %esi
17832+ jnz 2f
17833+#ifdef CONFIG_PARAVIRT
17834+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
17835+ mov %eax, %esi
17836+#else
17837+ mov %cr0, %esi
17838+#endif
17839+ btr $16, %esi
17840+ ljmp $__KERNEL_CS, $1f
17841+1:
17842+#ifdef CONFIG_PARAVIRT
17843+ mov %esi, %eax
17844+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
17845+#else
17846+ mov %esi, %cr0
17847+#endif
17848+2:
17849+#ifdef CONFIG_PARAVIRT
17850+ popl %ecx
17851+ popl %eax
17852+#endif
17853+ ret
17854+ENDPROC(pax_exit_kernel)
17855+#endif
17856+
17857+.macro pax_erase_kstack
17858+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17859+ call pax_erase_kstack
17860+#endif
17861+.endm
17862+
17863+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17864+/*
17865+ * ebp: thread_info
17866+ */
17867+ENTRY(pax_erase_kstack)
17868+ pushl %edi
17869+ pushl %ecx
17870+ pushl %eax
17871+
17872+ mov TI_lowest_stack(%ebp), %edi
17873+ mov $-0xBEEF, %eax
17874+ std
17875+
17876+1: mov %edi, %ecx
17877+ and $THREAD_SIZE_asm - 1, %ecx
17878+ shr $2, %ecx
17879+ repne scasl
17880+ jecxz 2f
17881+
17882+ cmp $2*16, %ecx
17883+ jc 2f
17884+
17885+ mov $2*16, %ecx
17886+ repe scasl
17887+ jecxz 2f
17888+ jne 1b
17889+
17890+2: cld
17891+ mov %esp, %ecx
17892+ sub %edi, %ecx
17893+
17894+ cmp $THREAD_SIZE_asm, %ecx
17895+ jb 3f
17896+ ud2
17897+3:
17898+
17899+ shr $2, %ecx
17900+ rep stosl
17901+
17902+ mov TI_task_thread_sp0(%ebp), %edi
17903+ sub $128, %edi
17904+ mov %edi, TI_lowest_stack(%ebp)
17905+
17906+ popl %eax
17907+ popl %ecx
17908+ popl %edi
17909+ ret
17910+ENDPROC(pax_erase_kstack)
17911+#endif
17912+
17913+.macro __SAVE_ALL _DS
17914 cld
17915 PUSH_GS
17916 pushl_cfi %fs
17917@@ -206,7 +346,7 @@
17918 CFI_REL_OFFSET ecx, 0
17919 pushl_cfi %ebx
17920 CFI_REL_OFFSET ebx, 0
17921- movl $(__USER_DS), %edx
17922+ movl $\_DS, %edx
17923 movl %edx, %ds
17924 movl %edx, %es
17925 movl $(__KERNEL_PERCPU), %edx
17926@@ -214,6 +354,15 @@
17927 SET_KERNEL_GS %edx
17928 .endm
17929
17930+.macro SAVE_ALL
17931+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
17932+ __SAVE_ALL __KERNEL_DS
17933+ pax_enter_kernel
17934+#else
17935+ __SAVE_ALL __USER_DS
17936+#endif
17937+.endm
17938+
17939 .macro RESTORE_INT_REGS
17940 popl_cfi %ebx
17941 CFI_RESTORE ebx
17942@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
17943 popfl_cfi
17944 jmp syscall_exit
17945 CFI_ENDPROC
17946-END(ret_from_fork)
17947+ENDPROC(ret_from_fork)
17948
17949 ENTRY(ret_from_kernel_thread)
17950 CFI_STARTPROC
17951@@ -344,7 +493,15 @@ ret_from_intr:
17952 andl $SEGMENT_RPL_MASK, %eax
17953 #endif
17954 cmpl $USER_RPL, %eax
17955+
17956+#ifdef CONFIG_PAX_KERNEXEC
17957+ jae resume_userspace
17958+
17959+ pax_exit_kernel
17960+ jmp resume_kernel
17961+#else
17962 jb resume_kernel # not returning to v8086 or userspace
17963+#endif
17964
17965 ENTRY(resume_userspace)
17966 LOCKDEP_SYS_EXIT
17967@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
17968 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
17969 # int/exception return?
17970 jne work_pending
17971- jmp restore_all
17972-END(ret_from_exception)
17973+ jmp restore_all_pax
17974+ENDPROC(ret_from_exception)
17975
17976 #ifdef CONFIG_PREEMPT
17977 ENTRY(resume_kernel)
17978@@ -372,7 +529,7 @@ need_resched:
17979 jz restore_all
17980 call preempt_schedule_irq
17981 jmp need_resched
17982-END(resume_kernel)
17983+ENDPROC(resume_kernel)
17984 #endif
17985 CFI_ENDPROC
17986 /*
17987@@ -406,30 +563,45 @@ sysenter_past_esp:
17988 /*CFI_REL_OFFSET cs, 0*/
17989 /*
17990 * Push current_thread_info()->sysenter_return to the stack.
17991- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
17992- * pushed above; +8 corresponds to copy_thread's esp0 setting.
17993 */
17994- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
17995+ pushl_cfi $0
17996 CFI_REL_OFFSET eip, 0
17997
17998 pushl_cfi %eax
17999 SAVE_ALL
18000+ GET_THREAD_INFO(%ebp)
18001+ movl TI_sysenter_return(%ebp),%ebp
18002+ movl %ebp,PT_EIP(%esp)
18003 ENABLE_INTERRUPTS(CLBR_NONE)
18004
18005 /*
18006 * Load the potential sixth argument from user stack.
18007 * Careful about security.
18008 */
18009+ movl PT_OLDESP(%esp),%ebp
18010+
18011+#ifdef CONFIG_PAX_MEMORY_UDEREF
18012+ mov PT_OLDSS(%esp),%ds
18013+1: movl %ds:(%ebp),%ebp
18014+ push %ss
18015+ pop %ds
18016+#else
18017 cmpl $__PAGE_OFFSET-3,%ebp
18018 jae syscall_fault
18019 ASM_STAC
18020 1: movl (%ebp),%ebp
18021 ASM_CLAC
18022+#endif
18023+
18024 movl %ebp,PT_EBP(%esp)
18025 _ASM_EXTABLE(1b,syscall_fault)
18026
18027 GET_THREAD_INFO(%ebp)
18028
18029+#ifdef CONFIG_PAX_RANDKSTACK
18030+ pax_erase_kstack
18031+#endif
18032+
18033 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18034 jnz sysenter_audit
18035 sysenter_do_call:
18036@@ -444,12 +616,24 @@ sysenter_do_call:
18037 testl $_TIF_ALLWORK_MASK, %ecx
18038 jne sysexit_audit
18039 sysenter_exit:
18040+
18041+#ifdef CONFIG_PAX_RANDKSTACK
18042+ pushl_cfi %eax
18043+ movl %esp, %eax
18044+ call pax_randomize_kstack
18045+ popl_cfi %eax
18046+#endif
18047+
18048+ pax_erase_kstack
18049+
18050 /* if something modifies registers it must also disable sysexit */
18051 movl PT_EIP(%esp), %edx
18052 movl PT_OLDESP(%esp), %ecx
18053 xorl %ebp,%ebp
18054 TRACE_IRQS_ON
18055 1: mov PT_FS(%esp), %fs
18056+2: mov PT_DS(%esp), %ds
18057+3: mov PT_ES(%esp), %es
18058 PTGS_TO_GS
18059 ENABLE_INTERRUPTS_SYSEXIT
18060
18061@@ -466,6 +650,9 @@ sysenter_audit:
18062 movl %eax,%edx /* 2nd arg: syscall number */
18063 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
18064 call __audit_syscall_entry
18065+
18066+ pax_erase_kstack
18067+
18068 pushl_cfi %ebx
18069 movl PT_EAX(%esp),%eax /* reload syscall number */
18070 jmp sysenter_do_call
18071@@ -491,10 +678,16 @@ sysexit_audit:
18072
18073 CFI_ENDPROC
18074 .pushsection .fixup,"ax"
18075-2: movl $0,PT_FS(%esp)
18076+4: movl $0,PT_FS(%esp)
18077+ jmp 1b
18078+5: movl $0,PT_DS(%esp)
18079+ jmp 1b
18080+6: movl $0,PT_ES(%esp)
18081 jmp 1b
18082 .popsection
18083- _ASM_EXTABLE(1b,2b)
18084+ _ASM_EXTABLE(1b,4b)
18085+ _ASM_EXTABLE(2b,5b)
18086+ _ASM_EXTABLE(3b,6b)
18087 PTGS_TO_GS_EX
18088 ENDPROC(ia32_sysenter_target)
18089
18090@@ -509,6 +702,11 @@ ENTRY(system_call)
18091 pushl_cfi %eax # save orig_eax
18092 SAVE_ALL
18093 GET_THREAD_INFO(%ebp)
18094+
18095+#ifdef CONFIG_PAX_RANDKSTACK
18096+ pax_erase_kstack
18097+#endif
18098+
18099 # system call tracing in operation / emulation
18100 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18101 jnz syscall_trace_entry
18102@@ -527,6 +725,15 @@ syscall_exit:
18103 testl $_TIF_ALLWORK_MASK, %ecx # current->work
18104 jne syscall_exit_work
18105
18106+restore_all_pax:
18107+
18108+#ifdef CONFIG_PAX_RANDKSTACK
18109+ movl %esp, %eax
18110+ call pax_randomize_kstack
18111+#endif
18112+
18113+ pax_erase_kstack
18114+
18115 restore_all:
18116 TRACE_IRQS_IRET
18117 restore_all_notrace:
18118@@ -583,14 +790,34 @@ ldt_ss:
18119 * compensating for the offset by changing to the ESPFIX segment with
18120 * a base address that matches for the difference.
18121 */
18122-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
18123+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
18124 mov %esp, %edx /* load kernel esp */
18125 mov PT_OLDESP(%esp), %eax /* load userspace esp */
18126 mov %dx, %ax /* eax: new kernel esp */
18127 sub %eax, %edx /* offset (low word is 0) */
18128+#ifdef CONFIG_SMP
18129+ movl PER_CPU_VAR(cpu_number), %ebx
18130+ shll $PAGE_SHIFT_asm, %ebx
18131+ addl $cpu_gdt_table, %ebx
18132+#else
18133+ movl $cpu_gdt_table, %ebx
18134+#endif
18135 shr $16, %edx
18136- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
18137- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
18138+
18139+#ifdef CONFIG_PAX_KERNEXEC
18140+ mov %cr0, %esi
18141+ btr $16, %esi
18142+ mov %esi, %cr0
18143+#endif
18144+
18145+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
18146+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
18147+
18148+#ifdef CONFIG_PAX_KERNEXEC
18149+ bts $16, %esi
18150+ mov %esi, %cr0
18151+#endif
18152+
18153 pushl_cfi $__ESPFIX_SS
18154 pushl_cfi %eax /* new kernel esp */
18155 /* Disable interrupts, but do not irqtrace this section: we
18156@@ -619,20 +846,18 @@ work_resched:
18157 movl TI_flags(%ebp), %ecx
18158 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
18159 # than syscall tracing?
18160- jz restore_all
18161+ jz restore_all_pax
18162 testb $_TIF_NEED_RESCHED, %cl
18163 jnz work_resched
18164
18165 work_notifysig: # deal with pending signals and
18166 # notify-resume requests
18167+ movl %esp, %eax
18168 #ifdef CONFIG_VM86
18169 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
18170- movl %esp, %eax
18171 jne work_notifysig_v86 # returning to kernel-space or
18172 # vm86-space
18173 1:
18174-#else
18175- movl %esp, %eax
18176 #endif
18177 TRACE_IRQS_ON
18178 ENABLE_INTERRUPTS(CLBR_NONE)
18179@@ -653,7 +878,7 @@ work_notifysig_v86:
18180 movl %eax, %esp
18181 jmp 1b
18182 #endif
18183-END(work_pending)
18184+ENDPROC(work_pending)
18185
18186 # perform syscall exit tracing
18187 ALIGN
18188@@ -661,11 +886,14 @@ syscall_trace_entry:
18189 movl $-ENOSYS,PT_EAX(%esp)
18190 movl %esp, %eax
18191 call syscall_trace_enter
18192+
18193+ pax_erase_kstack
18194+
18195 /* What it returned is what we'll actually use. */
18196 cmpl $(NR_syscalls), %eax
18197 jnae syscall_call
18198 jmp syscall_exit
18199-END(syscall_trace_entry)
18200+ENDPROC(syscall_trace_entry)
18201
18202 # perform syscall exit tracing
18203 ALIGN
18204@@ -678,21 +906,25 @@ syscall_exit_work:
18205 movl %esp, %eax
18206 call syscall_trace_leave
18207 jmp resume_userspace
18208-END(syscall_exit_work)
18209+ENDPROC(syscall_exit_work)
18210 CFI_ENDPROC
18211
18212 RING0_INT_FRAME # can't unwind into user space anyway
18213 syscall_fault:
18214+#ifdef CONFIG_PAX_MEMORY_UDEREF
18215+ push %ss
18216+ pop %ds
18217+#endif
18218 ASM_CLAC
18219 GET_THREAD_INFO(%ebp)
18220 movl $-EFAULT,PT_EAX(%esp)
18221 jmp resume_userspace
18222-END(syscall_fault)
18223+ENDPROC(syscall_fault)
18224
18225 syscall_badsys:
18226 movl $-ENOSYS,PT_EAX(%esp)
18227 jmp resume_userspace
18228-END(syscall_badsys)
18229+ENDPROC(syscall_badsys)
18230 CFI_ENDPROC
18231 /*
18232 * End of kprobes section
18233@@ -753,8 +985,15 @@ PTREGSCALL1(vm86old)
18234 * normal stack and adjusts ESP with the matching offset.
18235 */
18236 /* fixup the stack */
18237- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
18238- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
18239+#ifdef CONFIG_SMP
18240+ movl PER_CPU_VAR(cpu_number), %ebx
18241+ shll $PAGE_SHIFT_asm, %ebx
18242+ addl $cpu_gdt_table, %ebx
18243+#else
18244+ movl $cpu_gdt_table, %ebx
18245+#endif
18246+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
18247+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
18248 shl $16, %eax
18249 addl %esp, %eax /* the adjusted stack pointer */
18250 pushl_cfi $__KERNEL_DS
18251@@ -807,7 +1046,7 @@ vector=vector+1
18252 .endr
18253 2: jmp common_interrupt
18254 .endr
18255-END(irq_entries_start)
18256+ENDPROC(irq_entries_start)
18257
18258 .previous
18259 END(interrupt)
18260@@ -858,7 +1097,7 @@ ENTRY(coprocessor_error)
18261 pushl_cfi $do_coprocessor_error
18262 jmp error_code
18263 CFI_ENDPROC
18264-END(coprocessor_error)
18265+ENDPROC(coprocessor_error)
18266
18267 ENTRY(simd_coprocessor_error)
18268 RING0_INT_FRAME
18269@@ -880,7 +1119,7 @@ ENTRY(simd_coprocessor_error)
18270 #endif
18271 jmp error_code
18272 CFI_ENDPROC
18273-END(simd_coprocessor_error)
18274+ENDPROC(simd_coprocessor_error)
18275
18276 ENTRY(device_not_available)
18277 RING0_INT_FRAME
18278@@ -889,18 +1128,18 @@ ENTRY(device_not_available)
18279 pushl_cfi $do_device_not_available
18280 jmp error_code
18281 CFI_ENDPROC
18282-END(device_not_available)
18283+ENDPROC(device_not_available)
18284
18285 #ifdef CONFIG_PARAVIRT
18286 ENTRY(native_iret)
18287 iret
18288 _ASM_EXTABLE(native_iret, iret_exc)
18289-END(native_iret)
18290+ENDPROC(native_iret)
18291
18292 ENTRY(native_irq_enable_sysexit)
18293 sti
18294 sysexit
18295-END(native_irq_enable_sysexit)
18296+ENDPROC(native_irq_enable_sysexit)
18297 #endif
18298
18299 ENTRY(overflow)
18300@@ -910,7 +1149,7 @@ ENTRY(overflow)
18301 pushl_cfi $do_overflow
18302 jmp error_code
18303 CFI_ENDPROC
18304-END(overflow)
18305+ENDPROC(overflow)
18306
18307 ENTRY(bounds)
18308 RING0_INT_FRAME
18309@@ -919,7 +1158,7 @@ ENTRY(bounds)
18310 pushl_cfi $do_bounds
18311 jmp error_code
18312 CFI_ENDPROC
18313-END(bounds)
18314+ENDPROC(bounds)
18315
18316 ENTRY(invalid_op)
18317 RING0_INT_FRAME
18318@@ -928,7 +1167,7 @@ ENTRY(invalid_op)
18319 pushl_cfi $do_invalid_op
18320 jmp error_code
18321 CFI_ENDPROC
18322-END(invalid_op)
18323+ENDPROC(invalid_op)
18324
18325 ENTRY(coprocessor_segment_overrun)
18326 RING0_INT_FRAME
18327@@ -937,7 +1176,7 @@ ENTRY(coprocessor_segment_overrun)
18328 pushl_cfi $do_coprocessor_segment_overrun
18329 jmp error_code
18330 CFI_ENDPROC
18331-END(coprocessor_segment_overrun)
18332+ENDPROC(coprocessor_segment_overrun)
18333
18334 ENTRY(invalid_TSS)
18335 RING0_EC_FRAME
18336@@ -945,7 +1184,7 @@ ENTRY(invalid_TSS)
18337 pushl_cfi $do_invalid_TSS
18338 jmp error_code
18339 CFI_ENDPROC
18340-END(invalid_TSS)
18341+ENDPROC(invalid_TSS)
18342
18343 ENTRY(segment_not_present)
18344 RING0_EC_FRAME
18345@@ -953,7 +1192,7 @@ ENTRY(segment_not_present)
18346 pushl_cfi $do_segment_not_present
18347 jmp error_code
18348 CFI_ENDPROC
18349-END(segment_not_present)
18350+ENDPROC(segment_not_present)
18351
18352 ENTRY(stack_segment)
18353 RING0_EC_FRAME
18354@@ -961,7 +1200,7 @@ ENTRY(stack_segment)
18355 pushl_cfi $do_stack_segment
18356 jmp error_code
18357 CFI_ENDPROC
18358-END(stack_segment)
18359+ENDPROC(stack_segment)
18360
18361 ENTRY(alignment_check)
18362 RING0_EC_FRAME
18363@@ -969,7 +1208,7 @@ ENTRY(alignment_check)
18364 pushl_cfi $do_alignment_check
18365 jmp error_code
18366 CFI_ENDPROC
18367-END(alignment_check)
18368+ENDPROC(alignment_check)
18369
18370 ENTRY(divide_error)
18371 RING0_INT_FRAME
18372@@ -978,7 +1217,7 @@ ENTRY(divide_error)
18373 pushl_cfi $do_divide_error
18374 jmp error_code
18375 CFI_ENDPROC
18376-END(divide_error)
18377+ENDPROC(divide_error)
18378
18379 #ifdef CONFIG_X86_MCE
18380 ENTRY(machine_check)
18381@@ -988,7 +1227,7 @@ ENTRY(machine_check)
18382 pushl_cfi machine_check_vector
18383 jmp error_code
18384 CFI_ENDPROC
18385-END(machine_check)
18386+ENDPROC(machine_check)
18387 #endif
18388
18389 ENTRY(spurious_interrupt_bug)
18390@@ -998,7 +1237,7 @@ ENTRY(spurious_interrupt_bug)
18391 pushl_cfi $do_spurious_interrupt_bug
18392 jmp error_code
18393 CFI_ENDPROC
18394-END(spurious_interrupt_bug)
18395+ENDPROC(spurious_interrupt_bug)
18396 /*
18397 * End of kprobes section
18398 */
18399@@ -1101,7 +1340,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
18400
18401 ENTRY(mcount)
18402 ret
18403-END(mcount)
18404+ENDPROC(mcount)
18405
18406 ENTRY(ftrace_caller)
18407 cmpl $0, function_trace_stop
18408@@ -1134,7 +1373,7 @@ ftrace_graph_call:
18409 .globl ftrace_stub
18410 ftrace_stub:
18411 ret
18412-END(ftrace_caller)
18413+ENDPROC(ftrace_caller)
18414
18415 ENTRY(ftrace_regs_caller)
18416 pushf /* push flags before compare (in cs location) */
18417@@ -1235,7 +1474,7 @@ trace:
18418 popl %ecx
18419 popl %eax
18420 jmp ftrace_stub
18421-END(mcount)
18422+ENDPROC(mcount)
18423 #endif /* CONFIG_DYNAMIC_FTRACE */
18424 #endif /* CONFIG_FUNCTION_TRACER */
18425
18426@@ -1253,7 +1492,7 @@ ENTRY(ftrace_graph_caller)
18427 popl %ecx
18428 popl %eax
18429 ret
18430-END(ftrace_graph_caller)
18431+ENDPROC(ftrace_graph_caller)
18432
18433 .globl return_to_handler
18434 return_to_handler:
18435@@ -1309,15 +1548,18 @@ error_code:
18436 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
18437 REG_TO_PTGS %ecx
18438 SET_KERNEL_GS %ecx
18439- movl $(__USER_DS), %ecx
18440+ movl $(__KERNEL_DS), %ecx
18441 movl %ecx, %ds
18442 movl %ecx, %es
18443+
18444+ pax_enter_kernel
18445+
18446 TRACE_IRQS_OFF
18447 movl %esp,%eax # pt_regs pointer
18448 call *%edi
18449 jmp ret_from_exception
18450 CFI_ENDPROC
18451-END(page_fault)
18452+ENDPROC(page_fault)
18453
18454 /*
18455 * Debug traps and NMI can happen at the one SYSENTER instruction
18456@@ -1360,7 +1602,7 @@ debug_stack_correct:
18457 call do_debug
18458 jmp ret_from_exception
18459 CFI_ENDPROC
18460-END(debug)
18461+ENDPROC(debug)
18462
18463 /*
18464 * NMI is doubly nasty. It can happen _while_ we're handling
18465@@ -1398,6 +1640,9 @@ nmi_stack_correct:
18466 xorl %edx,%edx # zero error code
18467 movl %esp,%eax # pt_regs pointer
18468 call do_nmi
18469+
18470+ pax_exit_kernel
18471+
18472 jmp restore_all_notrace
18473 CFI_ENDPROC
18474
18475@@ -1434,12 +1679,15 @@ nmi_espfix_stack:
18476 FIXUP_ESPFIX_STACK # %eax == %esp
18477 xorl %edx,%edx # zero error code
18478 call do_nmi
18479+
18480+ pax_exit_kernel
18481+
18482 RESTORE_REGS
18483 lss 12+4(%esp), %esp # back to espfix stack
18484 CFI_ADJUST_CFA_OFFSET -24
18485 jmp irq_return
18486 CFI_ENDPROC
18487-END(nmi)
18488+ENDPROC(nmi)
18489
18490 ENTRY(int3)
18491 RING0_INT_FRAME
18492@@ -1452,14 +1700,14 @@ ENTRY(int3)
18493 call do_int3
18494 jmp ret_from_exception
18495 CFI_ENDPROC
18496-END(int3)
18497+ENDPROC(int3)
18498
18499 ENTRY(general_protection)
18500 RING0_EC_FRAME
18501 pushl_cfi $do_general_protection
18502 jmp error_code
18503 CFI_ENDPROC
18504-END(general_protection)
18505+ENDPROC(general_protection)
18506
18507 #ifdef CONFIG_KVM_GUEST
18508 ENTRY(async_page_fault)
18509@@ -1468,7 +1716,7 @@ ENTRY(async_page_fault)
18510 pushl_cfi $do_async_page_fault
18511 jmp error_code
18512 CFI_ENDPROC
18513-END(async_page_fault)
18514+ENDPROC(async_page_fault)
18515 #endif
18516
18517 /*
18518diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
18519index cb3c591..bc63707 100644
18520--- a/arch/x86/kernel/entry_64.S
18521+++ b/arch/x86/kernel/entry_64.S
18522@@ -59,6 +59,8 @@
18523 #include <asm/context_tracking.h>
18524 #include <asm/smap.h>
18525 #include <linux/err.h>
18526+#include <asm/pgtable.h>
18527+#include <asm/alternative-asm.h>
18528
18529 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
18530 #include <linux/elf-em.h>
18531@@ -80,8 +82,9 @@
18532 #ifdef CONFIG_DYNAMIC_FTRACE
18533
18534 ENTRY(function_hook)
18535+ pax_force_retaddr
18536 retq
18537-END(function_hook)
18538+ENDPROC(function_hook)
18539
18540 /* skip is set if stack has been adjusted */
18541 .macro ftrace_caller_setup skip=0
18542@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
18543 #endif
18544
18545 GLOBAL(ftrace_stub)
18546+ pax_force_retaddr
18547 retq
18548-END(ftrace_caller)
18549+ENDPROC(ftrace_caller)
18550
18551 ENTRY(ftrace_regs_caller)
18552 /* Save the current flags before compare (in SS location)*/
18553@@ -191,7 +195,7 @@ ftrace_restore_flags:
18554 popfq
18555 jmp ftrace_stub
18556
18557-END(ftrace_regs_caller)
18558+ENDPROC(ftrace_regs_caller)
18559
18560
18561 #else /* ! CONFIG_DYNAMIC_FTRACE */
18562@@ -212,6 +216,7 @@ ENTRY(function_hook)
18563 #endif
18564
18565 GLOBAL(ftrace_stub)
18566+ pax_force_retaddr
18567 retq
18568
18569 trace:
18570@@ -225,12 +230,13 @@ trace:
18571 #endif
18572 subq $MCOUNT_INSN_SIZE, %rdi
18573
18574+ pax_force_fptr ftrace_trace_function
18575 call *ftrace_trace_function
18576
18577 MCOUNT_RESTORE_FRAME
18578
18579 jmp ftrace_stub
18580-END(function_hook)
18581+ENDPROC(function_hook)
18582 #endif /* CONFIG_DYNAMIC_FTRACE */
18583 #endif /* CONFIG_FUNCTION_TRACER */
18584
18585@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
18586
18587 MCOUNT_RESTORE_FRAME
18588
18589+ pax_force_retaddr
18590 retq
18591-END(ftrace_graph_caller)
18592+ENDPROC(ftrace_graph_caller)
18593
18594 GLOBAL(return_to_handler)
18595 subq $24, %rsp
18596@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
18597 movq 8(%rsp), %rdx
18598 movq (%rsp), %rax
18599 addq $24, %rsp
18600+ pax_force_fptr %rdi
18601 jmp *%rdi
18602+ENDPROC(return_to_handler)
18603 #endif
18604
18605
18606@@ -284,6 +293,273 @@ ENTRY(native_usergs_sysret64)
18607 ENDPROC(native_usergs_sysret64)
18608 #endif /* CONFIG_PARAVIRT */
18609
18610+ .macro ljmpq sel, off
18611+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
18612+ .byte 0x48; ljmp *1234f(%rip)
18613+ .pushsection .rodata
18614+ .align 16
18615+ 1234: .quad \off; .word \sel
18616+ .popsection
18617+#else
18618+ pushq $\sel
18619+ pushq $\off
18620+ lretq
18621+#endif
18622+ .endm
18623+
18624+ .macro pax_enter_kernel
18625+ pax_set_fptr_mask
18626+#ifdef CONFIG_PAX_KERNEXEC
18627+ call pax_enter_kernel
18628+#endif
18629+ .endm
18630+
18631+ .macro pax_exit_kernel
18632+#ifdef CONFIG_PAX_KERNEXEC
18633+ call pax_exit_kernel
18634+#endif
18635+ .endm
18636+
18637+#ifdef CONFIG_PAX_KERNEXEC
18638+ENTRY(pax_enter_kernel)
18639+ pushq %rdi
18640+
18641+#ifdef CONFIG_PARAVIRT
18642+ PV_SAVE_REGS(CLBR_RDI)
18643+#endif
18644+
18645+ GET_CR0_INTO_RDI
18646+ bts $16,%rdi
18647+ jnc 3f
18648+ mov %cs,%edi
18649+ cmp $__KERNEL_CS,%edi
18650+ jnz 2f
18651+1:
18652+
18653+#ifdef CONFIG_PARAVIRT
18654+ PV_RESTORE_REGS(CLBR_RDI)
18655+#endif
18656+
18657+ popq %rdi
18658+ pax_force_retaddr
18659+ retq
18660+
18661+2: ljmpq __KERNEL_CS,1f
18662+3: ljmpq __KERNEXEC_KERNEL_CS,4f
18663+4: SET_RDI_INTO_CR0
18664+ jmp 1b
18665+ENDPROC(pax_enter_kernel)
18666+
18667+ENTRY(pax_exit_kernel)
18668+ pushq %rdi
18669+
18670+#ifdef CONFIG_PARAVIRT
18671+ PV_SAVE_REGS(CLBR_RDI)
18672+#endif
18673+
18674+ mov %cs,%rdi
18675+ cmp $__KERNEXEC_KERNEL_CS,%edi
18676+ jz 2f
18677+1:
18678+
18679+#ifdef CONFIG_PARAVIRT
18680+ PV_RESTORE_REGS(CLBR_RDI);
18681+#endif
18682+
18683+ popq %rdi
18684+ pax_force_retaddr
18685+ retq
18686+
18687+2: GET_CR0_INTO_RDI
18688+ btr $16,%rdi
18689+ ljmpq __KERNEL_CS,3f
18690+3: SET_RDI_INTO_CR0
18691+ jmp 1b
18692+ENDPROC(pax_exit_kernel)
18693+#endif
18694+
18695+ .macro pax_enter_kernel_user
18696+ pax_set_fptr_mask
18697+#ifdef CONFIG_PAX_MEMORY_UDEREF
18698+ call pax_enter_kernel_user
18699+#endif
18700+ .endm
18701+
18702+ .macro pax_exit_kernel_user
18703+#ifdef CONFIG_PAX_MEMORY_UDEREF
18704+ call pax_exit_kernel_user
18705+#endif
18706+#ifdef CONFIG_PAX_RANDKSTACK
18707+ pushq %rax
18708+ call pax_randomize_kstack
18709+ popq %rax
18710+#endif
18711+ .endm
18712+
18713+#ifdef CONFIG_PAX_MEMORY_UDEREF
18714+ENTRY(pax_enter_kernel_user)
18715+ pushq %rdi
18716+ pushq %rbx
18717+
18718+#ifdef CONFIG_PARAVIRT
18719+ PV_SAVE_REGS(CLBR_RDI)
18720+#endif
18721+
18722+ GET_CR3_INTO_RDI
18723+ mov %rdi,%rbx
18724+ add $__START_KERNEL_map,%rbx
18725+ sub phys_base(%rip),%rbx
18726+
18727+#ifdef CONFIG_PARAVIRT
18728+ pushq %rdi
18729+ cmpl $0, pv_info+PARAVIRT_enabled
18730+ jz 1f
18731+ i = 0
18732+ .rept USER_PGD_PTRS
18733+ mov i*8(%rbx),%rsi
18734+ mov $0,%sil
18735+ lea i*8(%rbx),%rdi
18736+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
18737+ i = i + 1
18738+ .endr
18739+ jmp 2f
18740+1:
18741+#endif
18742+
18743+ i = 0
18744+ .rept USER_PGD_PTRS
18745+ movb $0,i*8(%rbx)
18746+ i = i + 1
18747+ .endr
18748+
18749+#ifdef CONFIG_PARAVIRT
18750+2: popq %rdi
18751+#endif
18752+ SET_RDI_INTO_CR3
18753+
18754+#ifdef CONFIG_PAX_KERNEXEC
18755+ GET_CR0_INTO_RDI
18756+ bts $16,%rdi
18757+ SET_RDI_INTO_CR0
18758+#endif
18759+
18760+#ifdef CONFIG_PARAVIRT
18761+ PV_RESTORE_REGS(CLBR_RDI)
18762+#endif
18763+
18764+ popq %rbx
18765+ popq %rdi
18766+ pax_force_retaddr
18767+ retq
18768+ENDPROC(pax_enter_kernel_user)
18769+
18770+ENTRY(pax_exit_kernel_user)
18771+ push %rdi
18772+
18773+#ifdef CONFIG_PARAVIRT
18774+ pushq %rbx
18775+ PV_SAVE_REGS(CLBR_RDI)
18776+#endif
18777+
18778+#ifdef CONFIG_PAX_KERNEXEC
18779+ GET_CR0_INTO_RDI
18780+ btr $16,%rdi
18781+ SET_RDI_INTO_CR0
18782+#endif
18783+
18784+ GET_CR3_INTO_RDI
18785+ add $__START_KERNEL_map,%rdi
18786+ sub phys_base(%rip),%rdi
18787+
18788+#ifdef CONFIG_PARAVIRT
18789+ cmpl $0, pv_info+PARAVIRT_enabled
18790+ jz 1f
18791+ mov %rdi,%rbx
18792+ i = 0
18793+ .rept USER_PGD_PTRS
18794+ mov i*8(%rbx),%rsi
18795+ mov $0x67,%sil
18796+ lea i*8(%rbx),%rdi
18797+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
18798+ i = i + 1
18799+ .endr
18800+ jmp 2f
18801+1:
18802+#endif
18803+
18804+ i = 0
18805+ .rept USER_PGD_PTRS
18806+ movb $0x67,i*8(%rdi)
18807+ i = i + 1
18808+ .endr
18809+
18810+#ifdef CONFIG_PARAVIRT
18811+2: PV_RESTORE_REGS(CLBR_RDI)
18812+ popq %rbx
18813+#endif
18814+
18815+ popq %rdi
18816+ pax_force_retaddr
18817+ retq
18818+ENDPROC(pax_exit_kernel_user)
18819+#endif
18820+
18821+.macro pax_erase_kstack
18822+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18823+ call pax_erase_kstack
18824+#endif
18825+.endm
18826+
18827+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18828+ENTRY(pax_erase_kstack)
18829+ pushq %rdi
18830+ pushq %rcx
18831+ pushq %rax
18832+ pushq %r11
18833+
18834+ GET_THREAD_INFO(%r11)
18835+ mov TI_lowest_stack(%r11), %rdi
18836+ mov $-0xBEEF, %rax
18837+ std
18838+
18839+1: mov %edi, %ecx
18840+ and $THREAD_SIZE_asm - 1, %ecx
18841+ shr $3, %ecx
18842+ repne scasq
18843+ jecxz 2f
18844+
18845+ cmp $2*8, %ecx
18846+ jc 2f
18847+
18848+ mov $2*8, %ecx
18849+ repe scasq
18850+ jecxz 2f
18851+ jne 1b
18852+
18853+2: cld
18854+ mov %esp, %ecx
18855+ sub %edi, %ecx
18856+
18857+ cmp $THREAD_SIZE_asm, %rcx
18858+ jb 3f
18859+ ud2
18860+3:
18861+
18862+ shr $3, %ecx
18863+ rep stosq
18864+
18865+ mov TI_task_thread_sp0(%r11), %rdi
18866+ sub $256, %rdi
18867+ mov %rdi, TI_lowest_stack(%r11)
18868+
18869+ popq %r11
18870+ popq %rax
18871+ popq %rcx
18872+ popq %rdi
18873+ pax_force_retaddr
18874+ ret
18875+ENDPROC(pax_erase_kstack)
18876+#endif
18877
18878 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
18879 #ifdef CONFIG_TRACE_IRQFLAGS
18880@@ -375,8 +651,8 @@ ENDPROC(native_usergs_sysret64)
18881 .endm
18882
18883 .macro UNFAKE_STACK_FRAME
18884- addq $8*6, %rsp
18885- CFI_ADJUST_CFA_OFFSET -(6*8)
18886+ addq $8*6 + ARG_SKIP, %rsp
18887+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
18888 .endm
18889
18890 /*
18891@@ -463,7 +739,7 @@ ENDPROC(native_usergs_sysret64)
18892 movq %rsp, %rsi
18893
18894 leaq -RBP(%rsp),%rdi /* arg1 for handler */
18895- testl $3, CS-RBP(%rsi)
18896+ testb $3, CS-RBP(%rsi)
18897 je 1f
18898 SWAPGS
18899 /*
18900@@ -498,9 +774,10 @@ ENTRY(save_rest)
18901 movq_cfi r15, R15+16
18902 movq %r11, 8(%rsp) /* return address */
18903 FIXUP_TOP_OF_STACK %r11, 16
18904+ pax_force_retaddr
18905 ret
18906 CFI_ENDPROC
18907-END(save_rest)
18908+ENDPROC(save_rest)
18909
18910 /* save complete stack frame */
18911 .pushsection .kprobes.text, "ax"
18912@@ -529,9 +806,10 @@ ENTRY(save_paranoid)
18913 js 1f /* negative -> in kernel */
18914 SWAPGS
18915 xorl %ebx,%ebx
18916-1: ret
18917+1: pax_force_retaddr_bts
18918+ ret
18919 CFI_ENDPROC
18920-END(save_paranoid)
18921+ENDPROC(save_paranoid)
18922 .popsection
18923
18924 /*
18925@@ -553,7 +831,7 @@ ENTRY(ret_from_fork)
18926
18927 RESTORE_REST
18928
18929- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
18930+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
18931 jz 1f
18932
18933 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
18934@@ -571,7 +849,7 @@ ENTRY(ret_from_fork)
18935 RESTORE_REST
18936 jmp int_ret_from_sys_call
18937 CFI_ENDPROC
18938-END(ret_from_fork)
18939+ENDPROC(ret_from_fork)
18940
18941 /*
18942 * System call entry. Up to 6 arguments in registers are supported.
18943@@ -608,7 +886,7 @@ END(ret_from_fork)
18944 ENTRY(system_call)
18945 CFI_STARTPROC simple
18946 CFI_SIGNAL_FRAME
18947- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
18948+ CFI_DEF_CFA rsp,0
18949 CFI_REGISTER rip,rcx
18950 /*CFI_REGISTER rflags,r11*/
18951 SWAPGS_UNSAFE_STACK
18952@@ -621,16 +899,23 @@ GLOBAL(system_call_after_swapgs)
18953
18954 movq %rsp,PER_CPU_VAR(old_rsp)
18955 movq PER_CPU_VAR(kernel_stack),%rsp
18956+ SAVE_ARGS 8*6,0
18957+ pax_enter_kernel_user
18958+
18959+#ifdef CONFIG_PAX_RANDKSTACK
18960+ pax_erase_kstack
18961+#endif
18962+
18963 /*
18964 * No need to follow this irqs off/on section - it's straight
18965 * and short:
18966 */
18967 ENABLE_INTERRUPTS(CLBR_NONE)
18968- SAVE_ARGS 8,0
18969 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
18970 movq %rcx,RIP-ARGOFFSET(%rsp)
18971 CFI_REL_OFFSET rip,RIP-ARGOFFSET
18972- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
18973+ GET_THREAD_INFO(%rcx)
18974+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
18975 jnz tracesys
18976 system_call_fastpath:
18977 #if __SYSCALL_MASK == ~0
18978@@ -640,7 +925,7 @@ system_call_fastpath:
18979 cmpl $__NR_syscall_max,%eax
18980 #endif
18981 ja badsys
18982- movq %r10,%rcx
18983+ movq R10-ARGOFFSET(%rsp),%rcx
18984 call *sys_call_table(,%rax,8) # XXX: rip relative
18985 movq %rax,RAX-ARGOFFSET(%rsp)
18986 /*
18987@@ -654,10 +939,13 @@ sysret_check:
18988 LOCKDEP_SYS_EXIT
18989 DISABLE_INTERRUPTS(CLBR_NONE)
18990 TRACE_IRQS_OFF
18991- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
18992+ GET_THREAD_INFO(%rcx)
18993+ movl TI_flags(%rcx),%edx
18994 andl %edi,%edx
18995 jnz sysret_careful
18996 CFI_REMEMBER_STATE
18997+ pax_exit_kernel_user
18998+ pax_erase_kstack
18999 /*
19000 * sysretq will re-enable interrupts:
19001 */
19002@@ -709,14 +997,18 @@ badsys:
19003 * jump back to the normal fast path.
19004 */
19005 auditsys:
19006- movq %r10,%r9 /* 6th arg: 4th syscall arg */
19007+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
19008 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
19009 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
19010 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
19011 movq %rax,%rsi /* 2nd arg: syscall number */
19012 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
19013 call __audit_syscall_entry
19014+
19015+ pax_erase_kstack
19016+
19017 LOAD_ARGS 0 /* reload call-clobbered registers */
19018+ pax_set_fptr_mask
19019 jmp system_call_fastpath
19020
19021 /*
19022@@ -737,7 +1029,7 @@ sysret_audit:
19023 /* Do syscall tracing */
19024 tracesys:
19025 #ifdef CONFIG_AUDITSYSCALL
19026- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19027+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
19028 jz auditsys
19029 #endif
19030 SAVE_REST
19031@@ -745,12 +1037,16 @@ tracesys:
19032 FIXUP_TOP_OF_STACK %rdi
19033 movq %rsp,%rdi
19034 call syscall_trace_enter
19035+
19036+ pax_erase_kstack
19037+
19038 /*
19039 * Reload arg registers from stack in case ptrace changed them.
19040 * We don't reload %rax because syscall_trace_enter() returned
19041 * the value it wants us to use in the table lookup.
19042 */
19043 LOAD_ARGS ARGOFFSET, 1
19044+ pax_set_fptr_mask
19045 RESTORE_REST
19046 #if __SYSCALL_MASK == ~0
19047 cmpq $__NR_syscall_max,%rax
19048@@ -759,7 +1055,7 @@ tracesys:
19049 cmpl $__NR_syscall_max,%eax
19050 #endif
19051 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
19052- movq %r10,%rcx /* fixup for C */
19053+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
19054 call *sys_call_table(,%rax,8)
19055 movq %rax,RAX-ARGOFFSET(%rsp)
19056 /* Use IRET because user could have changed frame */
19057@@ -780,7 +1076,9 @@ GLOBAL(int_with_check)
19058 andl %edi,%edx
19059 jnz int_careful
19060 andl $~TS_COMPAT,TI_status(%rcx)
19061- jmp retint_swapgs
19062+ pax_exit_kernel_user
19063+ pax_erase_kstack
19064+ jmp retint_swapgs_pax
19065
19066 /* Either reschedule or signal or syscall exit tracking needed. */
19067 /* First do a reschedule test. */
19068@@ -826,7 +1124,7 @@ int_restore_rest:
19069 TRACE_IRQS_OFF
19070 jmp int_with_check
19071 CFI_ENDPROC
19072-END(system_call)
19073+ENDPROC(system_call)
19074
19075 /*
19076 * Certain special system calls that need to save a complete full stack frame.
19077@@ -842,7 +1140,7 @@ ENTRY(\label)
19078 call \func
19079 jmp ptregscall_common
19080 CFI_ENDPROC
19081-END(\label)
19082+ENDPROC(\label)
19083 .endm
19084
19085 .macro FORK_LIKE func
19086@@ -856,9 +1154,10 @@ ENTRY(stub_\func)
19087 DEFAULT_FRAME 0 8 /* offset 8: return address */
19088 call sys_\func
19089 RESTORE_TOP_OF_STACK %r11, 8
19090+ pax_force_retaddr
19091 ret $REST_SKIP /* pop extended registers */
19092 CFI_ENDPROC
19093-END(stub_\func)
19094+ENDPROC(stub_\func)
19095 .endm
19096
19097 FORK_LIKE clone
19098@@ -875,9 +1174,10 @@ ENTRY(ptregscall_common)
19099 movq_cfi_restore R12+8, r12
19100 movq_cfi_restore RBP+8, rbp
19101 movq_cfi_restore RBX+8, rbx
19102+ pax_force_retaddr
19103 ret $REST_SKIP /* pop extended registers */
19104 CFI_ENDPROC
19105-END(ptregscall_common)
19106+ENDPROC(ptregscall_common)
19107
19108 ENTRY(stub_execve)
19109 CFI_STARTPROC
19110@@ -891,7 +1191,7 @@ ENTRY(stub_execve)
19111 RESTORE_REST
19112 jmp int_ret_from_sys_call
19113 CFI_ENDPROC
19114-END(stub_execve)
19115+ENDPROC(stub_execve)
19116
19117 /*
19118 * sigreturn is special because it needs to restore all registers on return.
19119@@ -909,7 +1209,7 @@ ENTRY(stub_rt_sigreturn)
19120 RESTORE_REST
19121 jmp int_ret_from_sys_call
19122 CFI_ENDPROC
19123-END(stub_rt_sigreturn)
19124+ENDPROC(stub_rt_sigreturn)
19125
19126 #ifdef CONFIG_X86_X32_ABI
19127 ENTRY(stub_x32_rt_sigreturn)
19128@@ -975,7 +1275,7 @@ vector=vector+1
19129 2: jmp common_interrupt
19130 .endr
19131 CFI_ENDPROC
19132-END(irq_entries_start)
19133+ENDPROC(irq_entries_start)
19134
19135 .previous
19136 END(interrupt)
19137@@ -995,6 +1295,16 @@ END(interrupt)
19138 subq $ORIG_RAX-RBP, %rsp
19139 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
19140 SAVE_ARGS_IRQ
19141+#ifdef CONFIG_PAX_MEMORY_UDEREF
19142+ testb $3, CS(%rdi)
19143+ jnz 1f
19144+ pax_enter_kernel
19145+ jmp 2f
19146+1: pax_enter_kernel_user
19147+2:
19148+#else
19149+ pax_enter_kernel
19150+#endif
19151 call \func
19152 .endm
19153
19154@@ -1027,7 +1337,7 @@ ret_from_intr:
19155
19156 exit_intr:
19157 GET_THREAD_INFO(%rcx)
19158- testl $3,CS-ARGOFFSET(%rsp)
19159+ testb $3,CS-ARGOFFSET(%rsp)
19160 je retint_kernel
19161
19162 /* Interrupt came from user space */
19163@@ -1049,12 +1359,16 @@ retint_swapgs: /* return to user-space */
19164 * The iretq could re-enable interrupts:
19165 */
19166 DISABLE_INTERRUPTS(CLBR_ANY)
19167+ pax_exit_kernel_user
19168+retint_swapgs_pax:
19169 TRACE_IRQS_IRETQ
19170 SWAPGS
19171 jmp restore_args
19172
19173 retint_restore_args: /* return to kernel space */
19174 DISABLE_INTERRUPTS(CLBR_ANY)
19175+ pax_exit_kernel
19176+ pax_force_retaddr (RIP-ARGOFFSET)
19177 /*
19178 * The iretq could re-enable interrupts:
19179 */
19180@@ -1137,7 +1451,7 @@ ENTRY(retint_kernel)
19181 #endif
19182
19183 CFI_ENDPROC
19184-END(common_interrupt)
19185+ENDPROC(common_interrupt)
19186 /*
19187 * End of kprobes section
19188 */
19189@@ -1155,7 +1469,7 @@ ENTRY(\sym)
19190 interrupt \do_sym
19191 jmp ret_from_intr
19192 CFI_ENDPROC
19193-END(\sym)
19194+ENDPROC(\sym)
19195 .endm
19196
19197 #ifdef CONFIG_SMP
19198@@ -1211,12 +1525,22 @@ ENTRY(\sym)
19199 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19200 call error_entry
19201 DEFAULT_FRAME 0
19202+#ifdef CONFIG_PAX_MEMORY_UDEREF
19203+ testb $3, CS(%rsp)
19204+ jnz 1f
19205+ pax_enter_kernel
19206+ jmp 2f
19207+1: pax_enter_kernel_user
19208+2:
19209+#else
19210+ pax_enter_kernel
19211+#endif
19212 movq %rsp,%rdi /* pt_regs pointer */
19213 xorl %esi,%esi /* no error code */
19214 call \do_sym
19215 jmp error_exit /* %ebx: no swapgs flag */
19216 CFI_ENDPROC
19217-END(\sym)
19218+ENDPROC(\sym)
19219 .endm
19220
19221 .macro paranoidzeroentry sym do_sym
19222@@ -1229,15 +1553,25 @@ ENTRY(\sym)
19223 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19224 call save_paranoid
19225 TRACE_IRQS_OFF
19226+#ifdef CONFIG_PAX_MEMORY_UDEREF
19227+ testb $3, CS(%rsp)
19228+ jnz 1f
19229+ pax_enter_kernel
19230+ jmp 2f
19231+1: pax_enter_kernel_user
19232+2:
19233+#else
19234+ pax_enter_kernel
19235+#endif
19236 movq %rsp,%rdi /* pt_regs pointer */
19237 xorl %esi,%esi /* no error code */
19238 call \do_sym
19239 jmp paranoid_exit /* %ebx: no swapgs flag */
19240 CFI_ENDPROC
19241-END(\sym)
19242+ENDPROC(\sym)
19243 .endm
19244
19245-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
19246+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
19247 .macro paranoidzeroentry_ist sym do_sym ist
19248 ENTRY(\sym)
19249 INTR_FRAME
19250@@ -1248,14 +1582,30 @@ ENTRY(\sym)
19251 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19252 call save_paranoid
19253 TRACE_IRQS_OFF_DEBUG
19254+#ifdef CONFIG_PAX_MEMORY_UDEREF
19255+ testb $3, CS(%rsp)
19256+ jnz 1f
19257+ pax_enter_kernel
19258+ jmp 2f
19259+1: pax_enter_kernel_user
19260+2:
19261+#else
19262+ pax_enter_kernel
19263+#endif
19264 movq %rsp,%rdi /* pt_regs pointer */
19265 xorl %esi,%esi /* no error code */
19266+#ifdef CONFIG_SMP
19267+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
19268+ lea init_tss(%r12), %r12
19269+#else
19270+ lea init_tss(%rip), %r12
19271+#endif
19272 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19273 call \do_sym
19274 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19275 jmp paranoid_exit /* %ebx: no swapgs flag */
19276 CFI_ENDPROC
19277-END(\sym)
19278+ENDPROC(\sym)
19279 .endm
19280
19281 .macro errorentry sym do_sym
19282@@ -1267,13 +1617,23 @@ ENTRY(\sym)
19283 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19284 call error_entry
19285 DEFAULT_FRAME 0
19286+#ifdef CONFIG_PAX_MEMORY_UDEREF
19287+ testb $3, CS(%rsp)
19288+ jnz 1f
19289+ pax_enter_kernel
19290+ jmp 2f
19291+1: pax_enter_kernel_user
19292+2:
19293+#else
19294+ pax_enter_kernel
19295+#endif
19296 movq %rsp,%rdi /* pt_regs pointer */
19297 movq ORIG_RAX(%rsp),%rsi /* get error code */
19298 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19299 call \do_sym
19300 jmp error_exit /* %ebx: no swapgs flag */
19301 CFI_ENDPROC
19302-END(\sym)
19303+ENDPROC(\sym)
19304 .endm
19305
19306 /* error code is on the stack already */
19307@@ -1287,13 +1647,23 @@ ENTRY(\sym)
19308 call save_paranoid
19309 DEFAULT_FRAME 0
19310 TRACE_IRQS_OFF
19311+#ifdef CONFIG_PAX_MEMORY_UDEREF
19312+ testb $3, CS(%rsp)
19313+ jnz 1f
19314+ pax_enter_kernel
19315+ jmp 2f
19316+1: pax_enter_kernel_user
19317+2:
19318+#else
19319+ pax_enter_kernel
19320+#endif
19321 movq %rsp,%rdi /* pt_regs pointer */
19322 movq ORIG_RAX(%rsp),%rsi /* get error code */
19323 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19324 call \do_sym
19325 jmp paranoid_exit /* %ebx: no swapgs flag */
19326 CFI_ENDPROC
19327-END(\sym)
19328+ENDPROC(\sym)
19329 .endm
19330
19331 zeroentry divide_error do_divide_error
19332@@ -1323,9 +1693,10 @@ gs_change:
19333 2: mfence /* workaround */
19334 SWAPGS
19335 popfq_cfi
19336+ pax_force_retaddr
19337 ret
19338 CFI_ENDPROC
19339-END(native_load_gs_index)
19340+ENDPROC(native_load_gs_index)
19341
19342 _ASM_EXTABLE(gs_change,bad_gs)
19343 .section .fixup,"ax"
19344@@ -1353,9 +1724,10 @@ ENTRY(call_softirq)
19345 CFI_DEF_CFA_REGISTER rsp
19346 CFI_ADJUST_CFA_OFFSET -8
19347 decl PER_CPU_VAR(irq_count)
19348+ pax_force_retaddr
19349 ret
19350 CFI_ENDPROC
19351-END(call_softirq)
19352+ENDPROC(call_softirq)
19353
19354 #ifdef CONFIG_XEN
19355 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
19356@@ -1393,7 +1765,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
19357 decl PER_CPU_VAR(irq_count)
19358 jmp error_exit
19359 CFI_ENDPROC
19360-END(xen_do_hypervisor_callback)
19361+ENDPROC(xen_do_hypervisor_callback)
19362
19363 /*
19364 * Hypervisor uses this for application faults while it executes.
19365@@ -1452,7 +1824,7 @@ ENTRY(xen_failsafe_callback)
19366 SAVE_ALL
19367 jmp error_exit
19368 CFI_ENDPROC
19369-END(xen_failsafe_callback)
19370+ENDPROC(xen_failsafe_callback)
19371
19372 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
19373 xen_hvm_callback_vector xen_evtchn_do_upcall
19374@@ -1501,16 +1873,31 @@ ENTRY(paranoid_exit)
19375 TRACE_IRQS_OFF_DEBUG
19376 testl %ebx,%ebx /* swapgs needed? */
19377 jnz paranoid_restore
19378- testl $3,CS(%rsp)
19379+ testb $3,CS(%rsp)
19380 jnz paranoid_userspace
19381+#ifdef CONFIG_PAX_MEMORY_UDEREF
19382+ pax_exit_kernel
19383+ TRACE_IRQS_IRETQ 0
19384+ SWAPGS_UNSAFE_STACK
19385+ RESTORE_ALL 8
19386+ pax_force_retaddr_bts
19387+ jmp irq_return
19388+#endif
19389 paranoid_swapgs:
19390+#ifdef CONFIG_PAX_MEMORY_UDEREF
19391+ pax_exit_kernel_user
19392+#else
19393+ pax_exit_kernel
19394+#endif
19395 TRACE_IRQS_IRETQ 0
19396 SWAPGS_UNSAFE_STACK
19397 RESTORE_ALL 8
19398 jmp irq_return
19399 paranoid_restore:
19400+ pax_exit_kernel
19401 TRACE_IRQS_IRETQ_DEBUG 0
19402 RESTORE_ALL 8
19403+ pax_force_retaddr_bts
19404 jmp irq_return
19405 paranoid_userspace:
19406 GET_THREAD_INFO(%rcx)
19407@@ -1539,7 +1926,7 @@ paranoid_schedule:
19408 TRACE_IRQS_OFF
19409 jmp paranoid_userspace
19410 CFI_ENDPROC
19411-END(paranoid_exit)
19412+ENDPROC(paranoid_exit)
19413
19414 /*
19415 * Exception entry point. This expects an error code/orig_rax on the stack.
19416@@ -1566,12 +1953,13 @@ ENTRY(error_entry)
19417 movq_cfi r14, R14+8
19418 movq_cfi r15, R15+8
19419 xorl %ebx,%ebx
19420- testl $3,CS+8(%rsp)
19421+ testb $3,CS+8(%rsp)
19422 je error_kernelspace
19423 error_swapgs:
19424 SWAPGS
19425 error_sti:
19426 TRACE_IRQS_OFF
19427+ pax_force_retaddr_bts
19428 ret
19429
19430 /*
19431@@ -1598,7 +1986,7 @@ bstep_iret:
19432 movq %rcx,RIP+8(%rsp)
19433 jmp error_swapgs
19434 CFI_ENDPROC
19435-END(error_entry)
19436+ENDPROC(error_entry)
19437
19438
19439 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
19440@@ -1618,7 +2006,7 @@ ENTRY(error_exit)
19441 jnz retint_careful
19442 jmp retint_swapgs
19443 CFI_ENDPROC
19444-END(error_exit)
19445+ENDPROC(error_exit)
19446
19447 /*
19448 * Test if a given stack is an NMI stack or not.
19449@@ -1676,9 +2064,11 @@ ENTRY(nmi)
19450 * If %cs was not the kernel segment, then the NMI triggered in user
19451 * space, which means it is definitely not nested.
19452 */
19453+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
19454+ je 1f
19455 cmpl $__KERNEL_CS, 16(%rsp)
19456 jne first_nmi
19457-
19458+1:
19459 /*
19460 * Check the special variable on the stack to see if NMIs are
19461 * executing.
19462@@ -1847,6 +2237,17 @@ end_repeat_nmi:
19463 */
19464 movq %cr2, %r12
19465
19466+#ifdef CONFIG_PAX_MEMORY_UDEREF
19467+ testb $3, CS(%rsp)
19468+ jnz 1f
19469+ pax_enter_kernel
19470+ jmp 2f
19471+1: pax_enter_kernel_user
19472+2:
19473+#else
19474+ pax_enter_kernel
19475+#endif
19476+
19477 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
19478 movq %rsp,%rdi
19479 movq $-1,%rsi
19480@@ -1862,23 +2263,34 @@ end_repeat_nmi:
19481 testl %ebx,%ebx /* swapgs needed? */
19482 jnz nmi_restore
19483 nmi_swapgs:
19484+#ifdef CONFIG_PAX_MEMORY_UDEREF
19485+ pax_exit_kernel_user
19486+#else
19487+ pax_exit_kernel
19488+#endif
19489 SWAPGS_UNSAFE_STACK
19490+ RESTORE_ALL 6*8
19491+ /* Clear the NMI executing stack variable */
19492+ movq $0, 5*8(%rsp)
19493+ jmp irq_return
19494 nmi_restore:
19495+ pax_exit_kernel
19496 /* Pop the extra iret frame at once */
19497 RESTORE_ALL 6*8
19498+ pax_force_retaddr_bts
19499
19500 /* Clear the NMI executing stack variable */
19501 movq $0, 5*8(%rsp)
19502 jmp irq_return
19503 CFI_ENDPROC
19504-END(nmi)
19505+ENDPROC(nmi)
19506
19507 ENTRY(ignore_sysret)
19508 CFI_STARTPROC
19509 mov $-ENOSYS,%eax
19510 sysret
19511 CFI_ENDPROC
19512-END(ignore_sysret)
19513+ENDPROC(ignore_sysret)
19514
19515 /*
19516 * End of kprobes section
19517diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
19518index 1d41402..af9a46a 100644
19519--- a/arch/x86/kernel/ftrace.c
19520+++ b/arch/x86/kernel/ftrace.c
19521@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
19522 {
19523 unsigned char replaced[MCOUNT_INSN_SIZE];
19524
19525+ ip = ktla_ktva(ip);
19526+
19527 /*
19528 * Note: Due to modules and __init, code can
19529 * disappear and change, we need to protect against faulting
19530@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
19531 unsigned char old[MCOUNT_INSN_SIZE], *new;
19532 int ret;
19533
19534- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
19535+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
19536 new = ftrace_call_replace(ip, (unsigned long)func);
19537
19538 /* See comment above by declaration of modifying_ftrace_code */
19539@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
19540 /* Also update the regs callback function */
19541 if (!ret) {
19542 ip = (unsigned long)(&ftrace_regs_call);
19543- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
19544+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
19545 new = ftrace_call_replace(ip, (unsigned long)func);
19546 ret = ftrace_modify_code(ip, old, new);
19547 }
19548@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
19549 * kernel identity mapping to modify code.
19550 */
19551 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
19552- ip = (unsigned long)__va(__pa(ip));
19553+ ip = (unsigned long)__va(__pa(ktla_ktva(ip)));
19554
19555 return probe_kernel_write((void *)ip, val, size);
19556 }
19557@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
19558 unsigned char replaced[MCOUNT_INSN_SIZE];
19559 unsigned char brk = BREAKPOINT_INSTRUCTION;
19560
19561- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
19562+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
19563 return -EFAULT;
19564
19565 /* Make sure it is what we expect it to be */
19566@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
19567 return ret;
19568
19569 fail_update:
19570- probe_kernel_write((void *)ip, &old_code[0], 1);
19571+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
19572 goto out;
19573 }
19574
19575@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
19576 {
19577 unsigned char code[MCOUNT_INSN_SIZE];
19578
19579+ ip = ktla_ktva(ip);
19580+
19581 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
19582 return -EFAULT;
19583
19584diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
19585index c18f59d..9c0c9f6 100644
19586--- a/arch/x86/kernel/head32.c
19587+++ b/arch/x86/kernel/head32.c
19588@@ -18,6 +18,7 @@
19589 #include <asm/io_apic.h>
19590 #include <asm/bios_ebda.h>
19591 #include <asm/tlbflush.h>
19592+#include <asm/boot.h>
19593
19594 static void __init i386_default_early_setup(void)
19595 {
19596@@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
19597
19598 void __init i386_start_kernel(void)
19599 {
19600- memblock_reserve(__pa_symbol(&_text),
19601- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
19602+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
19603
19604 #ifdef CONFIG_BLK_DEV_INITRD
19605 /* Reserve INITRD */
19606diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
19607index c8932c7..d56b622 100644
19608--- a/arch/x86/kernel/head_32.S
19609+++ b/arch/x86/kernel/head_32.S
19610@@ -26,6 +26,12 @@
19611 /* Physical address */
19612 #define pa(X) ((X) - __PAGE_OFFSET)
19613
19614+#ifdef CONFIG_PAX_KERNEXEC
19615+#define ta(X) (X)
19616+#else
19617+#define ta(X) ((X) - __PAGE_OFFSET)
19618+#endif
19619+
19620 /*
19621 * References to members of the new_cpu_data structure.
19622 */
19623@@ -55,11 +61,7 @@
19624 * and small than max_low_pfn, otherwise will waste some page table entries
19625 */
19626
19627-#if PTRS_PER_PMD > 1
19628-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
19629-#else
19630-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
19631-#endif
19632+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
19633
19634 /* Number of possible pages in the lowmem region */
19635 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
19636@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
19637 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
19638
19639 /*
19640+ * Real beginning of normal "text" segment
19641+ */
19642+ENTRY(stext)
19643+ENTRY(_stext)
19644+
19645+/*
19646 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
19647 * %esi points to the real-mode code as a 32-bit pointer.
19648 * CS and DS must be 4 GB flat segments, but we don't depend on
19649@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
19650 * can.
19651 */
19652 __HEAD
19653+
19654+#ifdef CONFIG_PAX_KERNEXEC
19655+ jmp startup_32
19656+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
19657+.fill PAGE_SIZE-5,1,0xcc
19658+#endif
19659+
19660 ENTRY(startup_32)
19661 movl pa(stack_start),%ecx
19662
19663@@ -106,6 +121,59 @@ ENTRY(startup_32)
19664 2:
19665 leal -__PAGE_OFFSET(%ecx),%esp
19666
19667+#ifdef CONFIG_SMP
19668+ movl $pa(cpu_gdt_table),%edi
19669+ movl $__per_cpu_load,%eax
19670+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
19671+ rorl $16,%eax
19672+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
19673+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
19674+ movl $__per_cpu_end - 1,%eax
19675+ subl $__per_cpu_start,%eax
19676+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
19677+#endif
19678+
19679+#ifdef CONFIG_PAX_MEMORY_UDEREF
19680+ movl $NR_CPUS,%ecx
19681+ movl $pa(cpu_gdt_table),%edi
19682+1:
19683+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
19684+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
19685+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
19686+ addl $PAGE_SIZE_asm,%edi
19687+ loop 1b
19688+#endif
19689+
19690+#ifdef CONFIG_PAX_KERNEXEC
19691+ movl $pa(boot_gdt),%edi
19692+ movl $__LOAD_PHYSICAL_ADDR,%eax
19693+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
19694+ rorl $16,%eax
19695+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
19696+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
19697+ rorl $16,%eax
19698+
19699+ ljmp $(__BOOT_CS),$1f
19700+1:
19701+
19702+ movl $NR_CPUS,%ecx
19703+ movl $pa(cpu_gdt_table),%edi
19704+ addl $__PAGE_OFFSET,%eax
19705+1:
19706+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
19707+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
19708+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
19709+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
19710+ rorl $16,%eax
19711+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
19712+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
19713+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
19714+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
19715+ rorl $16,%eax
19716+ addl $PAGE_SIZE_asm,%edi
19717+ loop 1b
19718+#endif
19719+
19720 /*
19721 * Clear BSS first so that there are no surprises...
19722 */
19723@@ -196,8 +264,11 @@ ENTRY(startup_32)
19724 movl %eax, pa(max_pfn_mapped)
19725
19726 /* Do early initialization of the fixmap area */
19727- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
19728- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
19729+#ifdef CONFIG_COMPAT_VDSO
19730+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
19731+#else
19732+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
19733+#endif
19734 #else /* Not PAE */
19735
19736 page_pde_offset = (__PAGE_OFFSET >> 20);
19737@@ -227,8 +298,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
19738 movl %eax, pa(max_pfn_mapped)
19739
19740 /* Do early initialization of the fixmap area */
19741- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
19742- movl %eax,pa(initial_page_table+0xffc)
19743+#ifdef CONFIG_COMPAT_VDSO
19744+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
19745+#else
19746+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
19747+#endif
19748 #endif
19749
19750 #ifdef CONFIG_PARAVIRT
19751@@ -242,9 +316,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
19752 cmpl $num_subarch_entries, %eax
19753 jae bad_subarch
19754
19755- movl pa(subarch_entries)(,%eax,4), %eax
19756- subl $__PAGE_OFFSET, %eax
19757- jmp *%eax
19758+ jmp *pa(subarch_entries)(,%eax,4)
19759
19760 bad_subarch:
19761 WEAK(lguest_entry)
19762@@ -256,10 +328,10 @@ WEAK(xen_entry)
19763 __INITDATA
19764
19765 subarch_entries:
19766- .long default_entry /* normal x86/PC */
19767- .long lguest_entry /* lguest hypervisor */
19768- .long xen_entry /* Xen hypervisor */
19769- .long default_entry /* Moorestown MID */
19770+ .long ta(default_entry) /* normal x86/PC */
19771+ .long ta(lguest_entry) /* lguest hypervisor */
19772+ .long ta(xen_entry) /* Xen hypervisor */
19773+ .long ta(default_entry) /* Moorestown MID */
19774 num_subarch_entries = (. - subarch_entries) / 4
19775 .previous
19776 #else
19777@@ -335,6 +407,7 @@ default_entry:
19778 movl pa(mmu_cr4_features),%eax
19779 movl %eax,%cr4
19780
19781+#ifdef CONFIG_X86_PAE
19782 testb $X86_CR4_PAE, %al # check if PAE is enabled
19783 jz 6f
19784
19785@@ -363,6 +436,9 @@ default_entry:
19786 /* Make changes effective */
19787 wrmsr
19788
19789+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
19790+#endif
19791+
19792 6:
19793
19794 /*
19795@@ -460,14 +536,20 @@ is386: movl $2,%ecx # set MP
19796 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
19797 movl %eax,%ss # after changing gdt.
19798
19799- movl $(__USER_DS),%eax # DS/ES contains default USER segment
19800+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
19801 movl %eax,%ds
19802 movl %eax,%es
19803
19804 movl $(__KERNEL_PERCPU), %eax
19805 movl %eax,%fs # set this cpu's percpu
19806
19807+#ifdef CONFIG_CC_STACKPROTECTOR
19808 movl $(__KERNEL_STACK_CANARY),%eax
19809+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
19810+ movl $(__USER_DS),%eax
19811+#else
19812+ xorl %eax,%eax
19813+#endif
19814 movl %eax,%gs
19815
19816 xorl %eax,%eax # Clear LDT
19817@@ -544,8 +626,11 @@ setup_once:
19818 * relocation. Manually set base address in stack canary
19819 * segment descriptor.
19820 */
19821- movl $gdt_page,%eax
19822+ movl $cpu_gdt_table,%eax
19823 movl $stack_canary,%ecx
19824+#ifdef CONFIG_SMP
19825+ addl $__per_cpu_load,%ecx
19826+#endif
19827 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
19828 shrl $16, %ecx
19829 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
19830@@ -576,7 +661,7 @@ ENDPROC(early_idt_handlers)
19831 /* This is global to keep gas from relaxing the jumps */
19832 ENTRY(early_idt_handler)
19833 cld
19834- cmpl $2,%ss:early_recursion_flag
19835+ cmpl $1,%ss:early_recursion_flag
19836 je hlt_loop
19837 incl %ss:early_recursion_flag
19838
19839@@ -614,8 +699,8 @@ ENTRY(early_idt_handler)
19840 pushl (20+6*4)(%esp) /* trapno */
19841 pushl $fault_msg
19842 call printk
19843-#endif
19844 call dump_stack
19845+#endif
19846 hlt_loop:
19847 hlt
19848 jmp hlt_loop
19849@@ -634,8 +719,11 @@ ENDPROC(early_idt_handler)
19850 /* This is the default interrupt "handler" :-) */
19851 ALIGN
19852 ignore_int:
19853- cld
19854 #ifdef CONFIG_PRINTK
19855+ cmpl $2,%ss:early_recursion_flag
19856+ je hlt_loop
19857+ incl %ss:early_recursion_flag
19858+ cld
19859 pushl %eax
19860 pushl %ecx
19861 pushl %edx
19862@@ -644,9 +732,6 @@ ignore_int:
19863 movl $(__KERNEL_DS),%eax
19864 movl %eax,%ds
19865 movl %eax,%es
19866- cmpl $2,early_recursion_flag
19867- je hlt_loop
19868- incl early_recursion_flag
19869 pushl 16(%esp)
19870 pushl 24(%esp)
19871 pushl 32(%esp)
19872@@ -680,29 +765,43 @@ ENTRY(setup_once_ref)
19873 /*
19874 * BSS section
19875 */
19876-__PAGE_ALIGNED_BSS
19877- .align PAGE_SIZE
19878 #ifdef CONFIG_X86_PAE
19879+.section .initial_pg_pmd,"a",@progbits
19880 initial_pg_pmd:
19881 .fill 1024*KPMDS,4,0
19882 #else
19883+.section .initial_page_table,"a",@progbits
19884 ENTRY(initial_page_table)
19885 .fill 1024,4,0
19886 #endif
19887+.section .initial_pg_fixmap,"a",@progbits
19888 initial_pg_fixmap:
19889 .fill 1024,4,0
19890+.section .empty_zero_page,"a",@progbits
19891 ENTRY(empty_zero_page)
19892 .fill 4096,1,0
19893+.section .swapper_pg_dir,"a",@progbits
19894 ENTRY(swapper_pg_dir)
19895+#ifdef CONFIG_X86_PAE
19896+ .fill 4,8,0
19897+#else
19898 .fill 1024,4,0
19899+#endif
19900+
19901+/*
19902+ * The IDT has to be page-aligned to simplify the Pentium
19903+ * F0 0F bug workaround.. We have a special link segment
19904+ * for this.
19905+ */
19906+.section .idt,"a",@progbits
19907+ENTRY(idt_table)
19908+ .fill 256,8,0
19909
19910 /*
19911 * This starts the data section.
19912 */
19913 #ifdef CONFIG_X86_PAE
19914-__PAGE_ALIGNED_DATA
19915- /* Page-aligned for the benefit of paravirt? */
19916- .align PAGE_SIZE
19917+.section .initial_page_table,"a",@progbits
19918 ENTRY(initial_page_table)
19919 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
19920 # if KPMDS == 3
19921@@ -721,12 +820,20 @@ ENTRY(initial_page_table)
19922 # error "Kernel PMDs should be 1, 2 or 3"
19923 # endif
19924 .align PAGE_SIZE /* needs to be page-sized too */
19925+
19926+#ifdef CONFIG_PAX_PER_CPU_PGD
19927+ENTRY(cpu_pgd)
19928+ .rept NR_CPUS
19929+ .fill 4,8,0
19930+ .endr
19931+#endif
19932+
19933 #endif
19934
19935 .data
19936 .balign 4
19937 ENTRY(stack_start)
19938- .long init_thread_union+THREAD_SIZE
19939+ .long init_thread_union+THREAD_SIZE-8
19940
19941 __INITRODATA
19942 int_msg:
19943@@ -754,7 +861,7 @@ fault_msg:
19944 * segment size, and 32-bit linear address value:
19945 */
19946
19947- .data
19948+.section .rodata,"a",@progbits
19949 .globl boot_gdt_descr
19950 .globl idt_descr
19951
19952@@ -763,7 +870,7 @@ fault_msg:
19953 .word 0 # 32 bit align gdt_desc.address
19954 boot_gdt_descr:
19955 .word __BOOT_DS+7
19956- .long boot_gdt - __PAGE_OFFSET
19957+ .long pa(boot_gdt)
19958
19959 .word 0 # 32-bit align idt_desc.address
19960 idt_descr:
19961@@ -774,7 +881,7 @@ idt_descr:
19962 .word 0 # 32 bit align gdt_desc.address
19963 ENTRY(early_gdt_descr)
19964 .word GDT_ENTRIES*8-1
19965- .long gdt_page /* Overwritten for secondary CPUs */
19966+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
19967
19968 /*
19969 * The boot_gdt must mirror the equivalent in setup.S and is
19970@@ -783,5 +890,65 @@ ENTRY(early_gdt_descr)
19971 .align L1_CACHE_BYTES
19972 ENTRY(boot_gdt)
19973 .fill GDT_ENTRY_BOOT_CS,8,0
19974- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
19975- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
19976+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
19977+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
19978+
19979+ .align PAGE_SIZE_asm
19980+ENTRY(cpu_gdt_table)
19981+ .rept NR_CPUS
19982+ .quad 0x0000000000000000 /* NULL descriptor */
19983+ .quad 0x0000000000000000 /* 0x0b reserved */
19984+ .quad 0x0000000000000000 /* 0x13 reserved */
19985+ .quad 0x0000000000000000 /* 0x1b reserved */
19986+
19987+#ifdef CONFIG_PAX_KERNEXEC
19988+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
19989+#else
19990+ .quad 0x0000000000000000 /* 0x20 unused */
19991+#endif
19992+
19993+ .quad 0x0000000000000000 /* 0x28 unused */
19994+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
19995+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
19996+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
19997+ .quad 0x0000000000000000 /* 0x4b reserved */
19998+ .quad 0x0000000000000000 /* 0x53 reserved */
19999+ .quad 0x0000000000000000 /* 0x5b reserved */
20000+
20001+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
20002+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
20003+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
20004+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
20005+
20006+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
20007+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
20008+
20009+ /*
20010+ * Segments used for calling PnP BIOS have byte granularity.
20011+ * The code segments and data segments have fixed 64k limits,
20012+ * the transfer segment sizes are set at run time.
20013+ */
20014+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
20015+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
20016+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
20017+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
20018+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
20019+
20020+ /*
20021+ * The APM segments have byte granularity and their bases
20022+ * are set at run time. All have 64k limits.
20023+ */
20024+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
20025+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
20026+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
20027+
20028+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
20029+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
20030+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
20031+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
20032+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
20033+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
20034+
20035+ /* Be sure this is zeroed to avoid false validations in Xen */
20036+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
20037+ .endr
20038diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
20039index 980053c..74d3b44 100644
20040--- a/arch/x86/kernel/head_64.S
20041+++ b/arch/x86/kernel/head_64.S
20042@@ -20,6 +20,8 @@
20043 #include <asm/processor-flags.h>
20044 #include <asm/percpu.h>
20045 #include <asm/nops.h>
20046+#include <asm/cpufeature.h>
20047+#include <asm/alternative-asm.h>
20048
20049 #ifdef CONFIG_PARAVIRT
20050 #include <asm/asm-offsets.h>
20051@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
20052 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
20053 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
20054 L3_START_KERNEL = pud_index(__START_KERNEL_map)
20055+L4_VMALLOC_START = pgd_index(VMALLOC_START)
20056+L3_VMALLOC_START = pud_index(VMALLOC_START)
20057+L4_VMALLOC_END = pgd_index(VMALLOC_END)
20058+L3_VMALLOC_END = pud_index(VMALLOC_END)
20059+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
20060+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
20061
20062 .text
20063 __HEAD
20064@@ -88,35 +96,23 @@ startup_64:
20065 */
20066 addq %rbp, init_level4_pgt + 0(%rip)
20067 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
20068+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
20069+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
20070+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
20071 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
20072
20073 addq %rbp, level3_ident_pgt + 0(%rip)
20074+#ifndef CONFIG_XEN
20075+ addq %rbp, level3_ident_pgt + 8(%rip)
20076+#endif
20077
20078- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
20079- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
20080+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
20081+
20082+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
20083+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
20084
20085 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
20086-
20087- /* Add an Identity mapping if I am above 1G */
20088- leaq _text(%rip), %rdi
20089- andq $PMD_PAGE_MASK, %rdi
20090-
20091- movq %rdi, %rax
20092- shrq $PUD_SHIFT, %rax
20093- andq $(PTRS_PER_PUD - 1), %rax
20094- jz ident_complete
20095-
20096- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
20097- leaq level3_ident_pgt(%rip), %rbx
20098- movq %rdx, 0(%rbx, %rax, 8)
20099-
20100- movq %rdi, %rax
20101- shrq $PMD_SHIFT, %rax
20102- andq $(PTRS_PER_PMD - 1), %rax
20103- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
20104- leaq level2_spare_pgt(%rip), %rbx
20105- movq %rdx, 0(%rbx, %rax, 8)
20106-ident_complete:
20107+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
20108
20109 /*
20110 * Fixup the kernel text+data virtual addresses. Note that
20111@@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
20112 * after the boot processor executes this code.
20113 */
20114
20115- /* Enable PAE mode and PGE */
20116- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
20117+ /* Enable PAE mode and PSE/PGE */
20118+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20119 movq %rax, %cr4
20120
20121 /* Setup early boot stage 4 level pagetables. */
20122@@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
20123 movl $MSR_EFER, %ecx
20124 rdmsr
20125 btsl $_EFER_SCE, %eax /* Enable System Call */
20126- btl $20,%edi /* No Execute supported? */
20127+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
20128 jnc 1f
20129 btsl $_EFER_NX, %eax
20130+ leaq init_level4_pgt(%rip), %rdi
20131+#ifndef CONFIG_EFI
20132+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
20133+#endif
20134+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
20135+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
20136+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
20137+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
20138 1: wrmsr /* Make changes effective */
20139
20140 /* Setup cr0 */
20141@@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
20142 * jump. In addition we need to ensure %cs is set so we make this
20143 * a far return.
20144 */
20145+ pax_set_fptr_mask
20146 movq initial_code(%rip),%rax
20147 pushq $0 # fake return address to stop unwinder
20148 pushq $__KERNEL_CS # set correct cs
20149@@ -284,7 +289,7 @@ ENDPROC(start_cpu0)
20150 bad_address:
20151 jmp bad_address
20152
20153- .section ".init.text","ax"
20154+ __INIT
20155 .globl early_idt_handlers
20156 early_idt_handlers:
20157 # 104(%rsp) %rflags
20158@@ -343,7 +348,7 @@ ENTRY(early_idt_handler)
20159 call dump_stack
20160 #ifdef CONFIG_KALLSYMS
20161 leaq early_idt_ripmsg(%rip),%rdi
20162- movq 40(%rsp),%rsi # %rip again
20163+ movq 88(%rsp),%rsi # %rip again
20164 call __print_symbol
20165 #endif
20166 #endif /* EARLY_PRINTK */
20167@@ -363,11 +368,15 @@ ENTRY(early_idt_handler)
20168 addq $16,%rsp # drop vector number and error code
20169 decl early_recursion_flag(%rip)
20170 INTERRUPT_RETURN
20171+ .previous
20172
20173+ __INITDATA
20174 .balign 4
20175 early_recursion_flag:
20176 .long 0
20177+ .previous
20178
20179+ .section .rodata,"a",@progbits
20180 #ifdef CONFIG_EARLY_PRINTK
20181 early_idt_msg:
20182 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
20183@@ -376,6 +385,7 @@ early_idt_ripmsg:
20184 #endif /* CONFIG_EARLY_PRINTK */
20185 .previous
20186
20187+ .section .rodata,"a",@progbits
20188 #define NEXT_PAGE(name) \
20189 .balign PAGE_SIZE; \
20190 ENTRY(name)
20191@@ -388,7 +398,6 @@ ENTRY(name)
20192 i = i + 1 ; \
20193 .endr
20194
20195- .data
20196 /*
20197 * This default setting generates an ident mapping at address 0x100000
20198 * and a mapping for the kernel that precisely maps virtual address
20199@@ -399,13 +408,41 @@ NEXT_PAGE(init_level4_pgt)
20200 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20201 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
20202 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20203+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
20204+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
20205+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
20206+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
20207+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
20208+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20209 .org init_level4_pgt + L4_START_KERNEL*8, 0
20210 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
20211 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
20212
20213+#ifdef CONFIG_PAX_PER_CPU_PGD
20214+NEXT_PAGE(cpu_pgd)
20215+ .rept NR_CPUS
20216+ .fill 512,8,0
20217+ .endr
20218+#endif
20219+
20220 NEXT_PAGE(level3_ident_pgt)
20221 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20222+#ifdef CONFIG_XEN
20223 .fill 511,8,0
20224+#else
20225+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
20226+ .fill 510,8,0
20227+#endif
20228+
20229+NEXT_PAGE(level3_vmalloc_start_pgt)
20230+ .fill 512,8,0
20231+
20232+NEXT_PAGE(level3_vmalloc_end_pgt)
20233+ .fill 512,8,0
20234+
20235+NEXT_PAGE(level3_vmemmap_pgt)
20236+ .fill L3_VMEMMAP_START,8,0
20237+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20238
20239 NEXT_PAGE(level3_kernel_pgt)
20240 .fill L3_START_KERNEL,8,0
20241@@ -413,20 +450,23 @@ NEXT_PAGE(level3_kernel_pgt)
20242 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
20243 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20244
20245+NEXT_PAGE(level2_vmemmap_pgt)
20246+ .fill 512,8,0
20247+
20248 NEXT_PAGE(level2_fixmap_pgt)
20249- .fill 506,8,0
20250- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20251- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
20252- .fill 5,8,0
20253+ .fill 507,8,0
20254+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
20255+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
20256+ .fill 4,8,0
20257
20258-NEXT_PAGE(level1_fixmap_pgt)
20259+NEXT_PAGE(level1_vsyscall_pgt)
20260 .fill 512,8,0
20261
20262-NEXT_PAGE(level2_ident_pgt)
20263- /* Since I easily can, map the first 1G.
20264+ /* Since I easily can, map the first 2G.
20265 * Don't set NX because code runs from these pages.
20266 */
20267- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
20268+NEXT_PAGE(level2_ident_pgt)
20269+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
20270
20271 NEXT_PAGE(level2_kernel_pgt)
20272 /*
20273@@ -439,37 +479,59 @@ NEXT_PAGE(level2_kernel_pgt)
20274 * If you want to increase this then increase MODULES_VADDR
20275 * too.)
20276 */
20277- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
20278- KERNEL_IMAGE_SIZE/PMD_SIZE)
20279-
20280-NEXT_PAGE(level2_spare_pgt)
20281- .fill 512, 8, 0
20282+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
20283
20284 #undef PMDS
20285 #undef NEXT_PAGE
20286
20287- .data
20288+ .align PAGE_SIZE
20289+ENTRY(cpu_gdt_table)
20290+ .rept NR_CPUS
20291+ .quad 0x0000000000000000 /* NULL descriptor */
20292+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
20293+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
20294+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
20295+ .quad 0x00cffb000000ffff /* __USER32_CS */
20296+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
20297+ .quad 0x00affb000000ffff /* __USER_CS */
20298+
20299+#ifdef CONFIG_PAX_KERNEXEC
20300+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
20301+#else
20302+ .quad 0x0 /* unused */
20303+#endif
20304+
20305+ .quad 0,0 /* TSS */
20306+ .quad 0,0 /* LDT */
20307+ .quad 0,0,0 /* three TLS descriptors */
20308+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
20309+ /* asm/segment.h:GDT_ENTRIES must match this */
20310+
20311+ /* zero the remaining page */
20312+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
20313+ .endr
20314+
20315 .align 16
20316 .globl early_gdt_descr
20317 early_gdt_descr:
20318 .word GDT_ENTRIES*8-1
20319 early_gdt_descr_base:
20320- .quad INIT_PER_CPU_VAR(gdt_page)
20321+ .quad cpu_gdt_table
20322
20323 ENTRY(phys_base)
20324 /* This must match the first entry in level2_kernel_pgt */
20325 .quad 0x0000000000000000
20326
20327 #include "../../x86/xen/xen-head.S"
20328-
20329- .section .bss, "aw", @nobits
20330+
20331+ .section .rodata,"a",@progbits
20332 .align L1_CACHE_BYTES
20333 ENTRY(idt_table)
20334- .skip IDT_ENTRIES * 16
20335+ .fill 512,8,0
20336
20337 .align L1_CACHE_BYTES
20338 ENTRY(nmi_idt_table)
20339- .skip IDT_ENTRIES * 16
20340+ .fill 512,8,0
20341
20342 __PAGE_ALIGNED_BSS
20343 .align PAGE_SIZE
20344diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
20345index 9c3bd4a..e1d9b35 100644
20346--- a/arch/x86/kernel/i386_ksyms_32.c
20347+++ b/arch/x86/kernel/i386_ksyms_32.c
20348@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
20349 EXPORT_SYMBOL(cmpxchg8b_emu);
20350 #endif
20351
20352+EXPORT_SYMBOL_GPL(cpu_gdt_table);
20353+
20354 /* Networking helper routines. */
20355 EXPORT_SYMBOL(csum_partial_copy_generic);
20356+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
20357+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
20358
20359 EXPORT_SYMBOL(__get_user_1);
20360 EXPORT_SYMBOL(__get_user_2);
20361@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
20362
20363 EXPORT_SYMBOL(csum_partial);
20364 EXPORT_SYMBOL(empty_zero_page);
20365+
20366+#ifdef CONFIG_PAX_KERNEXEC
20367+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
20368+#endif
20369diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
20370index 245a71d..89d9ce4 100644
20371--- a/arch/x86/kernel/i387.c
20372+++ b/arch/x86/kernel/i387.c
20373@@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
20374 static inline bool interrupted_user_mode(void)
20375 {
20376 struct pt_regs *regs = get_irq_regs();
20377- return regs && user_mode_vm(regs);
20378+ return regs && user_mode(regs);
20379 }
20380
20381 /*
20382diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
20383index 9a5c460..b332a4b 100644
20384--- a/arch/x86/kernel/i8259.c
20385+++ b/arch/x86/kernel/i8259.c
20386@@ -209,7 +209,7 @@ spurious_8259A_irq:
20387 "spurious 8259A interrupt: IRQ%d.\n", irq);
20388 spurious_irq_mask |= irqmask;
20389 }
20390- atomic_inc(&irq_err_count);
20391+ atomic_inc_unchecked(&irq_err_count);
20392 /*
20393 * Theoretically we do not have to handle this IRQ,
20394 * but in Linux this does not cause problems and is
20395@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
20396 /* (slave's support for AEOI in flat mode is to be investigated) */
20397 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
20398
20399+ pax_open_kernel();
20400 if (auto_eoi)
20401 /*
20402 * In AEOI mode we just have to mask the interrupt
20403 * when acking.
20404 */
20405- i8259A_chip.irq_mask_ack = disable_8259A_irq;
20406+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
20407 else
20408- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20409+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20410+ pax_close_kernel();
20411
20412 udelay(100); /* wait for 8259A to initialize */
20413
20414diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
20415index a979b5b..1d6db75 100644
20416--- a/arch/x86/kernel/io_delay.c
20417+++ b/arch/x86/kernel/io_delay.c
20418@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
20419 * Quirk table for systems that misbehave (lock up, etc.) if port
20420 * 0x80 is used:
20421 */
20422-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
20423+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
20424 {
20425 .callback = dmi_io_delay_0xed_port,
20426 .ident = "Compaq Presario V6000",
20427diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
20428index 8c96897..be66bfa 100644
20429--- a/arch/x86/kernel/ioport.c
20430+++ b/arch/x86/kernel/ioport.c
20431@@ -6,6 +6,7 @@
20432 #include <linux/sched.h>
20433 #include <linux/kernel.h>
20434 #include <linux/capability.h>
20435+#include <linux/security.h>
20436 #include <linux/errno.h>
20437 #include <linux/types.h>
20438 #include <linux/ioport.h>
20439@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20440
20441 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
20442 return -EINVAL;
20443+#ifdef CONFIG_GRKERNSEC_IO
20444+ if (turn_on && grsec_disable_privio) {
20445+ gr_handle_ioperm();
20446+ return -EPERM;
20447+ }
20448+#endif
20449 if (turn_on && !capable(CAP_SYS_RAWIO))
20450 return -EPERM;
20451
20452@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20453 * because the ->io_bitmap_max value must match the bitmap
20454 * contents:
20455 */
20456- tss = &per_cpu(init_tss, get_cpu());
20457+ tss = init_tss + get_cpu();
20458
20459 if (turn_on)
20460 bitmap_clear(t->io_bitmap_ptr, from, num);
20461@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
20462 return -EINVAL;
20463 /* Trying to gain more privileges? */
20464 if (level > old) {
20465+#ifdef CONFIG_GRKERNSEC_IO
20466+ if (grsec_disable_privio) {
20467+ gr_handle_iopl();
20468+ return -EPERM;
20469+ }
20470+#endif
20471 if (!capable(CAP_SYS_RAWIO))
20472 return -EPERM;
20473 }
20474diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
20475index e4595f1..ee3bfb8 100644
20476--- a/arch/x86/kernel/irq.c
20477+++ b/arch/x86/kernel/irq.c
20478@@ -18,7 +18,7 @@
20479 #include <asm/mce.h>
20480 #include <asm/hw_irq.h>
20481
20482-atomic_t irq_err_count;
20483+atomic_unchecked_t irq_err_count;
20484
20485 /* Function pointer for generic interrupt vector handling */
20486 void (*x86_platform_ipi_callback)(void) = NULL;
20487@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
20488 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
20489 seq_printf(p, " Machine check polls\n");
20490 #endif
20491- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
20492+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
20493 #if defined(CONFIG_X86_IO_APIC)
20494- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
20495+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
20496 #endif
20497 return 0;
20498 }
20499@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
20500
20501 u64 arch_irq_stat(void)
20502 {
20503- u64 sum = atomic_read(&irq_err_count);
20504+ u64 sum = atomic_read_unchecked(&irq_err_count);
20505
20506 #ifdef CONFIG_X86_IO_APIC
20507- sum += atomic_read(&irq_mis_count);
20508+ sum += atomic_read_unchecked(&irq_mis_count);
20509 #endif
20510 return sum;
20511 }
20512diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
20513index 344faf8..355f60d 100644
20514--- a/arch/x86/kernel/irq_32.c
20515+++ b/arch/x86/kernel/irq_32.c
20516@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
20517 __asm__ __volatile__("andl %%esp,%0" :
20518 "=r" (sp) : "0" (THREAD_SIZE - 1));
20519
20520- return sp < (sizeof(struct thread_info) + STACK_WARN);
20521+ return sp < STACK_WARN;
20522 }
20523
20524 static void print_stack_overflow(void)
20525@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
20526 * per-CPU IRQ handling contexts (thread information and stack)
20527 */
20528 union irq_ctx {
20529- struct thread_info tinfo;
20530- u32 stack[THREAD_SIZE/sizeof(u32)];
20531+ unsigned long previous_esp;
20532+ u32 stack[THREAD_SIZE/sizeof(u32)];
20533 } __attribute__((aligned(THREAD_SIZE)));
20534
20535 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
20536@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
20537 static inline int
20538 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20539 {
20540- union irq_ctx *curctx, *irqctx;
20541+ union irq_ctx *irqctx;
20542 u32 *isp, arg1, arg2;
20543
20544- curctx = (union irq_ctx *) current_thread_info();
20545 irqctx = __this_cpu_read(hardirq_ctx);
20546
20547 /*
20548@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20549 * handler) we can't do that and just have to keep using the
20550 * current stack (which is the irq stack already after all)
20551 */
20552- if (unlikely(curctx == irqctx))
20553+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
20554 return 0;
20555
20556 /* build the stack frame on the IRQ stack */
20557- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
20558- irqctx->tinfo.task = curctx->tinfo.task;
20559- irqctx->tinfo.previous_esp = current_stack_pointer;
20560+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
20561+ irqctx->previous_esp = current_stack_pointer;
20562
20563- /* Copy the preempt_count so that the [soft]irq checks work. */
20564- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
20565+#ifdef CONFIG_PAX_MEMORY_UDEREF
20566+ __set_fs(MAKE_MM_SEG(0));
20567+#endif
20568
20569 if (unlikely(overflow))
20570 call_on_stack(print_stack_overflow, isp);
20571@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20572 : "0" (irq), "1" (desc), "2" (isp),
20573 "D" (desc->handle_irq)
20574 : "memory", "cc", "ecx");
20575+
20576+#ifdef CONFIG_PAX_MEMORY_UDEREF
20577+ __set_fs(current_thread_info()->addr_limit);
20578+#endif
20579+
20580 return 1;
20581 }
20582
20583@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20584 */
20585 void __cpuinit irq_ctx_init(int cpu)
20586 {
20587- union irq_ctx *irqctx;
20588-
20589 if (per_cpu(hardirq_ctx, cpu))
20590 return;
20591
20592- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
20593- THREADINFO_GFP,
20594- THREAD_SIZE_ORDER));
20595- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
20596- irqctx->tinfo.cpu = cpu;
20597- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
20598- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
20599-
20600- per_cpu(hardirq_ctx, cpu) = irqctx;
20601-
20602- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
20603- THREADINFO_GFP,
20604- THREAD_SIZE_ORDER));
20605- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
20606- irqctx->tinfo.cpu = cpu;
20607- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
20608-
20609- per_cpu(softirq_ctx, cpu) = irqctx;
20610+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
20611+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
20612+
20613+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
20614+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
20615
20616 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
20617 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
20618@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
20619 asmlinkage void do_softirq(void)
20620 {
20621 unsigned long flags;
20622- struct thread_info *curctx;
20623 union irq_ctx *irqctx;
20624 u32 *isp;
20625
20626@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
20627 local_irq_save(flags);
20628
20629 if (local_softirq_pending()) {
20630- curctx = current_thread_info();
20631 irqctx = __this_cpu_read(softirq_ctx);
20632- irqctx->tinfo.task = curctx->task;
20633- irqctx->tinfo.previous_esp = current_stack_pointer;
20634+ irqctx->previous_esp = current_stack_pointer;
20635
20636 /* build the stack frame on the softirq stack */
20637- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
20638+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
20639+
20640+#ifdef CONFIG_PAX_MEMORY_UDEREF
20641+ __set_fs(MAKE_MM_SEG(0));
20642+#endif
20643
20644 call_on_stack(__do_softirq, isp);
20645+
20646+#ifdef CONFIG_PAX_MEMORY_UDEREF
20647+ __set_fs(current_thread_info()->addr_limit);
20648+#endif
20649+
20650 /*
20651 * Shouldn't happen, we returned above if in_interrupt():
20652 */
20653@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
20654 if (unlikely(!desc))
20655 return false;
20656
20657- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
20658+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
20659 if (unlikely(overflow))
20660 print_stack_overflow();
20661 desc->handle_irq(irq, desc);
20662diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
20663index d04d3ec..ea4b374 100644
20664--- a/arch/x86/kernel/irq_64.c
20665+++ b/arch/x86/kernel/irq_64.c
20666@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
20667 u64 estack_top, estack_bottom;
20668 u64 curbase = (u64)task_stack_page(current);
20669
20670- if (user_mode_vm(regs))
20671+ if (user_mode(regs))
20672 return;
20673
20674 if (regs->sp >= curbase + sizeof(struct thread_info) +
20675diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
20676index dc1404b..bbc43e7 100644
20677--- a/arch/x86/kernel/kdebugfs.c
20678+++ b/arch/x86/kernel/kdebugfs.c
20679@@ -27,7 +27,7 @@ struct setup_data_node {
20680 u32 len;
20681 };
20682
20683-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
20684+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
20685 size_t count, loff_t *ppos)
20686 {
20687 struct setup_data_node *node = file->private_data;
20688diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
20689index 836f832..a8bda67 100644
20690--- a/arch/x86/kernel/kgdb.c
20691+++ b/arch/x86/kernel/kgdb.c
20692@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
20693 #ifdef CONFIG_X86_32
20694 switch (regno) {
20695 case GDB_SS:
20696- if (!user_mode_vm(regs))
20697+ if (!user_mode(regs))
20698 *(unsigned long *)mem = __KERNEL_DS;
20699 break;
20700 case GDB_SP:
20701- if (!user_mode_vm(regs))
20702+ if (!user_mode(regs))
20703 *(unsigned long *)mem = kernel_stack_pointer(regs);
20704 break;
20705 case GDB_GS:
20706@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
20707 bp->attr.bp_addr = breakinfo[breakno].addr;
20708 bp->attr.bp_len = breakinfo[breakno].len;
20709 bp->attr.bp_type = breakinfo[breakno].type;
20710- info->address = breakinfo[breakno].addr;
20711+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
20712+ info->address = ktla_ktva(breakinfo[breakno].addr);
20713+ else
20714+ info->address = breakinfo[breakno].addr;
20715 info->len = breakinfo[breakno].len;
20716 info->type = breakinfo[breakno].type;
20717 val = arch_install_hw_breakpoint(bp);
20718@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
20719 case 'k':
20720 /* clear the trace bit */
20721 linux_regs->flags &= ~X86_EFLAGS_TF;
20722- atomic_set(&kgdb_cpu_doing_single_step, -1);
20723+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
20724
20725 /* set the trace bit if we're stepping */
20726 if (remcomInBuffer[0] == 's') {
20727 linux_regs->flags |= X86_EFLAGS_TF;
20728- atomic_set(&kgdb_cpu_doing_single_step,
20729+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
20730 raw_smp_processor_id());
20731 }
20732
20733@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
20734
20735 switch (cmd) {
20736 case DIE_DEBUG:
20737- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
20738+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
20739 if (user_mode(regs))
20740 return single_step_cont(regs, args);
20741 break;
20742@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
20743 #endif /* CONFIG_DEBUG_RODATA */
20744
20745 bpt->type = BP_BREAKPOINT;
20746- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
20747+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
20748 BREAK_INSTR_SIZE);
20749 if (err)
20750 return err;
20751- err = probe_kernel_write((char *)bpt->bpt_addr,
20752+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
20753 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
20754 #ifdef CONFIG_DEBUG_RODATA
20755 if (!err)
20756@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
20757 return -EBUSY;
20758 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
20759 BREAK_INSTR_SIZE);
20760- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
20761+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
20762 if (err)
20763 return err;
20764 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
20765@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
20766 if (mutex_is_locked(&text_mutex))
20767 goto knl_write;
20768 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
20769- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
20770+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
20771 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
20772 goto knl_write;
20773 return err;
20774 knl_write:
20775 #endif /* CONFIG_DEBUG_RODATA */
20776- return probe_kernel_write((char *)bpt->bpt_addr,
20777+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
20778 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
20779 }
20780
20781diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
20782index c5e410e..ed5a7f0 100644
20783--- a/arch/x86/kernel/kprobes-opt.c
20784+++ b/arch/x86/kernel/kprobes-opt.c
20785@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
20786 * Verify if the address gap is in 2GB range, because this uses
20787 * a relative jump.
20788 */
20789- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
20790+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
20791 if (abs(rel) > 0x7fffffff)
20792 return -ERANGE;
20793
20794@@ -353,16 +353,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
20795 op->optinsn.size = ret;
20796
20797 /* Copy arch-dep-instance from template */
20798- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
20799+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
20800
20801 /* Set probe information */
20802 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
20803
20804 /* Set probe function call */
20805- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
20806+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
20807
20808 /* Set returning jmp instruction at the tail of out-of-line buffer */
20809- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
20810+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
20811 (u8 *)op->kp.addr + op->optinsn.size);
20812
20813 flush_icache_range((unsigned long) buf,
20814@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
20815 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
20816
20817 /* Backup instructions which will be replaced by jump address */
20818- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
20819+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
20820 RELATIVE_ADDR_SIZE);
20821
20822 insn_buf[0] = RELATIVEJUMP_OPCODE;
20823@@ -483,7 +483,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
20824 /* This kprobe is really able to run optimized path. */
20825 op = container_of(p, struct optimized_kprobe, kp);
20826 /* Detour through copied instructions */
20827- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
20828+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
20829 if (!reenter)
20830 reset_current_kprobe();
20831 preempt_enable_no_resched();
20832diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
20833index 57916c0..9e0b9d0 100644
20834--- a/arch/x86/kernel/kprobes.c
20835+++ b/arch/x86/kernel/kprobes.c
20836@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
20837 s32 raddr;
20838 } __attribute__((packed)) *insn;
20839
20840- insn = (struct __arch_relative_insn *)from;
20841+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
20842+
20843+ pax_open_kernel();
20844 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
20845 insn->op = op;
20846+ pax_close_kernel();
20847 }
20848
20849 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
20850@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
20851 kprobe_opcode_t opcode;
20852 kprobe_opcode_t *orig_opcodes = opcodes;
20853
20854- if (search_exception_tables((unsigned long)opcodes))
20855+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
20856 return 0; /* Page fault may occur on this address. */
20857
20858 retry:
20859@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
20860 * for the first byte, we can recover the original instruction
20861 * from it and kp->opcode.
20862 */
20863- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
20864+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
20865 buf[0] = kp->opcode;
20866- return (unsigned long)buf;
20867+ return ktva_ktla((unsigned long)buf);
20868 }
20869
20870 /*
20871@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
20872 /* Another subsystem puts a breakpoint, failed to recover */
20873 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
20874 return 0;
20875+ pax_open_kernel();
20876 memcpy(dest, insn.kaddr, insn.length);
20877+ pax_close_kernel();
20878
20879 #ifdef CONFIG_X86_64
20880 if (insn_rip_relative(&insn)) {
20881@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
20882 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
20883 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
20884 disp = (u8 *) dest + insn_offset_displacement(&insn);
20885+ pax_open_kernel();
20886 *(s32 *) disp = (s32) newdisp;
20887+ pax_close_kernel();
20888 }
20889 #endif
20890 return insn.length;
20891@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
20892 * nor set current_kprobe, because it doesn't use single
20893 * stepping.
20894 */
20895- regs->ip = (unsigned long)p->ainsn.insn;
20896+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
20897 preempt_enable_no_resched();
20898 return;
20899 }
20900@@ -502,9 +509,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
20901 regs->flags &= ~X86_EFLAGS_IF;
20902 /* single step inline if the instruction is an int3 */
20903 if (p->opcode == BREAKPOINT_INSTRUCTION)
20904- regs->ip = (unsigned long)p->addr;
20905+ regs->ip = ktla_ktva((unsigned long)p->addr);
20906 else
20907- regs->ip = (unsigned long)p->ainsn.insn;
20908+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
20909 }
20910
20911 /*
20912@@ -600,7 +607,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
20913 setup_singlestep(p, regs, kcb, 0);
20914 return 1;
20915 }
20916- } else if (*addr != BREAKPOINT_INSTRUCTION) {
20917+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
20918 /*
20919 * The breakpoint instruction was removed right
20920 * after we hit it. Another cpu has removed
20921@@ -651,6 +658,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
20922 " movq %rax, 152(%rsp)\n"
20923 RESTORE_REGS_STRING
20924 " popfq\n"
20925+#ifdef KERNEXEC_PLUGIN
20926+ " btsq $63,(%rsp)\n"
20927+#endif
20928 #else
20929 " pushf\n"
20930 SAVE_REGS_STRING
20931@@ -788,7 +798,7 @@ static void __kprobes
20932 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
20933 {
20934 unsigned long *tos = stack_addr(regs);
20935- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
20936+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
20937 unsigned long orig_ip = (unsigned long)p->addr;
20938 kprobe_opcode_t *insn = p->ainsn.insn;
20939
20940@@ -970,7 +980,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
20941 struct die_args *args = data;
20942 int ret = NOTIFY_DONE;
20943
20944- if (args->regs && user_mode_vm(args->regs))
20945+ if (args->regs && user_mode(args->regs))
20946 return ret;
20947
20948 switch (val) {
20949diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
20950index 9c2bd8b..bb1131c 100644
20951--- a/arch/x86/kernel/kvm.c
20952+++ b/arch/x86/kernel/kvm.c
20953@@ -452,7 +452,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
20954 return NOTIFY_OK;
20955 }
20956
20957-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
20958+static struct notifier_block kvm_cpu_notifier = {
20959 .notifier_call = kvm_cpu_notify,
20960 };
20961 #endif
20962diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
20963index ebc9873..1b9724b 100644
20964--- a/arch/x86/kernel/ldt.c
20965+++ b/arch/x86/kernel/ldt.c
20966@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
20967 if (reload) {
20968 #ifdef CONFIG_SMP
20969 preempt_disable();
20970- load_LDT(pc);
20971+ load_LDT_nolock(pc);
20972 if (!cpumask_equal(mm_cpumask(current->mm),
20973 cpumask_of(smp_processor_id())))
20974 smp_call_function(flush_ldt, current->mm, 1);
20975 preempt_enable();
20976 #else
20977- load_LDT(pc);
20978+ load_LDT_nolock(pc);
20979 #endif
20980 }
20981 if (oldsize) {
20982@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
20983 return err;
20984
20985 for (i = 0; i < old->size; i++)
20986- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
20987+ write_ldt_entry(new->ldt, i, old->ldt + i);
20988 return 0;
20989 }
20990
20991@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
20992 retval = copy_ldt(&mm->context, &old_mm->context);
20993 mutex_unlock(&old_mm->context.lock);
20994 }
20995+
20996+ if (tsk == current) {
20997+ mm->context.vdso = 0;
20998+
20999+#ifdef CONFIG_X86_32
21000+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21001+ mm->context.user_cs_base = 0UL;
21002+ mm->context.user_cs_limit = ~0UL;
21003+
21004+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
21005+ cpus_clear(mm->context.cpu_user_cs_mask);
21006+#endif
21007+
21008+#endif
21009+#endif
21010+
21011+ }
21012+
21013 return retval;
21014 }
21015
21016@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
21017 }
21018 }
21019
21020+#ifdef CONFIG_PAX_SEGMEXEC
21021+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
21022+ error = -EINVAL;
21023+ goto out_unlock;
21024+ }
21025+#endif
21026+
21027 fill_ldt(&ldt, &ldt_info);
21028 if (oldmode)
21029 ldt.avl = 0;
21030diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
21031index 5b19e4d..6476a76 100644
21032--- a/arch/x86/kernel/machine_kexec_32.c
21033+++ b/arch/x86/kernel/machine_kexec_32.c
21034@@ -26,7 +26,7 @@
21035 #include <asm/cacheflush.h>
21036 #include <asm/debugreg.h>
21037
21038-static void set_idt(void *newidt, __u16 limit)
21039+static void set_idt(struct desc_struct *newidt, __u16 limit)
21040 {
21041 struct desc_ptr curidt;
21042
21043@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
21044 }
21045
21046
21047-static void set_gdt(void *newgdt, __u16 limit)
21048+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
21049 {
21050 struct desc_ptr curgdt;
21051
21052@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
21053 }
21054
21055 control_page = page_address(image->control_code_page);
21056- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
21057+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
21058
21059 relocate_kernel_ptr = control_page;
21060 page_list[PA_CONTROL_PAGE] = __pa(control_page);
21061diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
21062index 3a04b22..1d2eb09 100644
21063--- a/arch/x86/kernel/microcode_core.c
21064+++ b/arch/x86/kernel/microcode_core.c
21065@@ -512,7 +512,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21066 return NOTIFY_OK;
21067 }
21068
21069-static struct notifier_block __refdata mc_cpu_notifier = {
21070+static struct notifier_block mc_cpu_notifier = {
21071 .notifier_call = mc_cpu_callback,
21072 };
21073
21074diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
21075index 3544aed..01ddc1c 100644
21076--- a/arch/x86/kernel/microcode_intel.c
21077+++ b/arch/x86/kernel/microcode_intel.c
21078@@ -431,13 +431,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21079
21080 static int get_ucode_user(void *to, const void *from, size_t n)
21081 {
21082- return copy_from_user(to, from, n);
21083+ return copy_from_user(to, (const void __force_user *)from, n);
21084 }
21085
21086 static enum ucode_state
21087 request_microcode_user(int cpu, const void __user *buf, size_t size)
21088 {
21089- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21090+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21091 }
21092
21093 static void microcode_fini_cpu(int cpu)
21094diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
21095index 216a4d7..228255a 100644
21096--- a/arch/x86/kernel/module.c
21097+++ b/arch/x86/kernel/module.c
21098@@ -43,15 +43,60 @@ do { \
21099 } while (0)
21100 #endif
21101
21102-void *module_alloc(unsigned long size)
21103+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
21104 {
21105- if (PAGE_ALIGN(size) > MODULES_LEN)
21106+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
21107 return NULL;
21108 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
21109- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
21110+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
21111 -1, __builtin_return_address(0));
21112 }
21113
21114+void *module_alloc(unsigned long size)
21115+{
21116+
21117+#ifdef CONFIG_PAX_KERNEXEC
21118+ return __module_alloc(size, PAGE_KERNEL);
21119+#else
21120+ return __module_alloc(size, PAGE_KERNEL_EXEC);
21121+#endif
21122+
21123+}
21124+
21125+#ifdef CONFIG_PAX_KERNEXEC
21126+#ifdef CONFIG_X86_32
21127+void *module_alloc_exec(unsigned long size)
21128+{
21129+ struct vm_struct *area;
21130+
21131+ if (size == 0)
21132+ return NULL;
21133+
21134+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
21135+ return area ? area->addr : NULL;
21136+}
21137+EXPORT_SYMBOL(module_alloc_exec);
21138+
21139+void module_free_exec(struct module *mod, void *module_region)
21140+{
21141+ vunmap(module_region);
21142+}
21143+EXPORT_SYMBOL(module_free_exec);
21144+#else
21145+void module_free_exec(struct module *mod, void *module_region)
21146+{
21147+ module_free(mod, module_region);
21148+}
21149+EXPORT_SYMBOL(module_free_exec);
21150+
21151+void *module_alloc_exec(unsigned long size)
21152+{
21153+ return __module_alloc(size, PAGE_KERNEL_RX);
21154+}
21155+EXPORT_SYMBOL(module_alloc_exec);
21156+#endif
21157+#endif
21158+
21159 #ifdef CONFIG_X86_32
21160 int apply_relocate(Elf32_Shdr *sechdrs,
21161 const char *strtab,
21162@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21163 unsigned int i;
21164 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
21165 Elf32_Sym *sym;
21166- uint32_t *location;
21167+ uint32_t *plocation, location;
21168
21169 DEBUGP("Applying relocate section %u to %u\n",
21170 relsec, sechdrs[relsec].sh_info);
21171 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
21172 /* This is where to make the change */
21173- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
21174- + rel[i].r_offset;
21175+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
21176+ location = (uint32_t)plocation;
21177+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
21178+ plocation = ktla_ktva((void *)plocation);
21179 /* This is the symbol it is referring to. Note that all
21180 undefined symbols have been resolved. */
21181 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
21182@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21183 switch (ELF32_R_TYPE(rel[i].r_info)) {
21184 case R_386_32:
21185 /* We add the value into the location given */
21186- *location += sym->st_value;
21187+ pax_open_kernel();
21188+ *plocation += sym->st_value;
21189+ pax_close_kernel();
21190 break;
21191 case R_386_PC32:
21192 /* Add the value, subtract its position */
21193- *location += sym->st_value - (uint32_t)location;
21194+ pax_open_kernel();
21195+ *plocation += sym->st_value - location;
21196+ pax_close_kernel();
21197 break;
21198 default:
21199 pr_err("%s: Unknown relocation: %u\n",
21200@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
21201 case R_X86_64_NONE:
21202 break;
21203 case R_X86_64_64:
21204+ pax_open_kernel();
21205 *(u64 *)loc = val;
21206+ pax_close_kernel();
21207 break;
21208 case R_X86_64_32:
21209+ pax_open_kernel();
21210 *(u32 *)loc = val;
21211+ pax_close_kernel();
21212 if (val != *(u32 *)loc)
21213 goto overflow;
21214 break;
21215 case R_X86_64_32S:
21216+ pax_open_kernel();
21217 *(s32 *)loc = val;
21218+ pax_close_kernel();
21219 if ((s64)val != *(s32 *)loc)
21220 goto overflow;
21221 break;
21222 case R_X86_64_PC32:
21223 val -= (u64)loc;
21224+ pax_open_kernel();
21225 *(u32 *)loc = val;
21226+ pax_close_kernel();
21227+
21228 #if 0
21229 if ((s64)val != *(s32 *)loc)
21230 goto overflow;
21231diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
21232index 4929502..686c291 100644
21233--- a/arch/x86/kernel/msr.c
21234+++ b/arch/x86/kernel/msr.c
21235@@ -234,7 +234,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
21236 return notifier_from_errno(err);
21237 }
21238
21239-static struct notifier_block __refdata msr_class_cpu_notifier = {
21240+static struct notifier_block msr_class_cpu_notifier = {
21241 .notifier_call = msr_class_cpu_callback,
21242 };
21243
21244diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
21245index f84f5c5..f404e81 100644
21246--- a/arch/x86/kernel/nmi.c
21247+++ b/arch/x86/kernel/nmi.c
21248@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
21249 return handled;
21250 }
21251
21252-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21253+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
21254 {
21255 struct nmi_desc *desc = nmi_to_desc(type);
21256 unsigned long flags;
21257@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21258 * event confuses some handlers (kdump uses this flag)
21259 */
21260 if (action->flags & NMI_FLAG_FIRST)
21261- list_add_rcu(&action->list, &desc->head);
21262+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
21263 else
21264- list_add_tail_rcu(&action->list, &desc->head);
21265+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
21266
21267 spin_unlock_irqrestore(&desc->lock, flags);
21268 return 0;
21269@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
21270 if (!strcmp(n->name, name)) {
21271 WARN(in_nmi(),
21272 "Trying to free NMI (%s) from NMI context!\n", n->name);
21273- list_del_rcu(&n->list);
21274+ pax_list_del_rcu((struct list_head *)&n->list);
21275 break;
21276 }
21277 }
21278@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
21279 dotraplinkage notrace __kprobes void
21280 do_nmi(struct pt_regs *regs, long error_code)
21281 {
21282+
21283+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21284+ if (!user_mode(regs)) {
21285+ unsigned long cs = regs->cs & 0xFFFF;
21286+ unsigned long ip = ktva_ktla(regs->ip);
21287+
21288+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21289+ regs->ip = ip;
21290+ }
21291+#endif
21292+
21293 nmi_nesting_preprocess(regs);
21294
21295 nmi_enter();
21296diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
21297index 6d9582e..f746287 100644
21298--- a/arch/x86/kernel/nmi_selftest.c
21299+++ b/arch/x86/kernel/nmi_selftest.c
21300@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
21301 {
21302 /* trap all the unknown NMIs we may generate */
21303 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
21304- __initdata);
21305+ __initconst);
21306 }
21307
21308 static void __init cleanup_nmi_testsuite(void)
21309@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
21310 unsigned long timeout;
21311
21312 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
21313- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
21314+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
21315 nmi_fail = FAILURE;
21316 return;
21317 }
21318diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
21319index 676b8c7..870ba04 100644
21320--- a/arch/x86/kernel/paravirt-spinlocks.c
21321+++ b/arch/x86/kernel/paravirt-spinlocks.c
21322@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
21323 arch_spin_lock(lock);
21324 }
21325
21326-struct pv_lock_ops pv_lock_ops = {
21327+struct pv_lock_ops pv_lock_ops __read_only = {
21328 #ifdef CONFIG_SMP
21329 .spin_is_locked = __ticket_spin_is_locked,
21330 .spin_is_contended = __ticket_spin_is_contended,
21331diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
21332index 17fff18..5cfa0f4 100644
21333--- a/arch/x86/kernel/paravirt.c
21334+++ b/arch/x86/kernel/paravirt.c
21335@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
21336 {
21337 return x;
21338 }
21339+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21340+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
21341+#endif
21342
21343 void __init default_banner(void)
21344 {
21345@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
21346 if (opfunc == NULL)
21347 /* If there's no function, patch it with a ud2a (BUG) */
21348 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
21349- else if (opfunc == _paravirt_nop)
21350+ else if (opfunc == (void *)_paravirt_nop)
21351 /* If the operation is a nop, then nop the callsite */
21352 ret = paravirt_patch_nop();
21353
21354 /* identity functions just return their single argument */
21355- else if (opfunc == _paravirt_ident_32)
21356+ else if (opfunc == (void *)_paravirt_ident_32)
21357 ret = paravirt_patch_ident_32(insnbuf, len);
21358- else if (opfunc == _paravirt_ident_64)
21359+ else if (opfunc == (void *)_paravirt_ident_64)
21360 ret = paravirt_patch_ident_64(insnbuf, len);
21361+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21362+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
21363+ ret = paravirt_patch_ident_64(insnbuf, len);
21364+#endif
21365
21366 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
21367 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
21368@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
21369 if (insn_len > len || start == NULL)
21370 insn_len = len;
21371 else
21372- memcpy(insnbuf, start, insn_len);
21373+ memcpy(insnbuf, ktla_ktva(start), insn_len);
21374
21375 return insn_len;
21376 }
21377@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
21378 preempt_enable();
21379 }
21380
21381-struct pv_info pv_info = {
21382+struct pv_info pv_info __read_only = {
21383 .name = "bare hardware",
21384 .paravirt_enabled = 0,
21385 .kernel_rpl = 0,
21386@@ -315,16 +322,16 @@ struct pv_info pv_info = {
21387 #endif
21388 };
21389
21390-struct pv_init_ops pv_init_ops = {
21391+struct pv_init_ops pv_init_ops __read_only = {
21392 .patch = native_patch,
21393 };
21394
21395-struct pv_time_ops pv_time_ops = {
21396+struct pv_time_ops pv_time_ops __read_only = {
21397 .sched_clock = native_sched_clock,
21398 .steal_clock = native_steal_clock,
21399 };
21400
21401-struct pv_irq_ops pv_irq_ops = {
21402+struct pv_irq_ops pv_irq_ops __read_only = {
21403 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
21404 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
21405 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
21406@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
21407 #endif
21408 };
21409
21410-struct pv_cpu_ops pv_cpu_ops = {
21411+struct pv_cpu_ops pv_cpu_ops __read_only = {
21412 .cpuid = native_cpuid,
21413 .get_debugreg = native_get_debugreg,
21414 .set_debugreg = native_set_debugreg,
21415@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
21416 .end_context_switch = paravirt_nop,
21417 };
21418
21419-struct pv_apic_ops pv_apic_ops = {
21420+struct pv_apic_ops pv_apic_ops __read_only= {
21421 #ifdef CONFIG_X86_LOCAL_APIC
21422 .startup_ipi_hook = paravirt_nop,
21423 #endif
21424 };
21425
21426-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
21427+#ifdef CONFIG_X86_32
21428+#ifdef CONFIG_X86_PAE
21429+/* 64-bit pagetable entries */
21430+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
21431+#else
21432 /* 32-bit pagetable entries */
21433 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
21434+#endif
21435 #else
21436 /* 64-bit pagetable entries */
21437 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
21438 #endif
21439
21440-struct pv_mmu_ops pv_mmu_ops = {
21441+struct pv_mmu_ops pv_mmu_ops __read_only = {
21442
21443 .read_cr2 = native_read_cr2,
21444 .write_cr2 = native_write_cr2,
21445@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
21446 .make_pud = PTE_IDENT,
21447
21448 .set_pgd = native_set_pgd,
21449+ .set_pgd_batched = native_set_pgd_batched,
21450 #endif
21451 #endif /* PAGETABLE_LEVELS >= 3 */
21452
21453@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
21454 },
21455
21456 .set_fixmap = native_set_fixmap,
21457+
21458+#ifdef CONFIG_PAX_KERNEXEC
21459+ .pax_open_kernel = native_pax_open_kernel,
21460+ .pax_close_kernel = native_pax_close_kernel,
21461+#endif
21462+
21463 };
21464
21465 EXPORT_SYMBOL_GPL(pv_time_ops);
21466diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
21467index 35ccf75..7a15747 100644
21468--- a/arch/x86/kernel/pci-iommu_table.c
21469+++ b/arch/x86/kernel/pci-iommu_table.c
21470@@ -2,7 +2,7 @@
21471 #include <asm/iommu_table.h>
21472 #include <linux/string.h>
21473 #include <linux/kallsyms.h>
21474-
21475+#include <linux/sched.h>
21476
21477 #define DEBUG 1
21478
21479diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
21480index 6c483ba..d10ce2f 100644
21481--- a/arch/x86/kernel/pci-swiotlb.c
21482+++ b/arch/x86/kernel/pci-swiotlb.c
21483@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
21484 void *vaddr, dma_addr_t dma_addr,
21485 struct dma_attrs *attrs)
21486 {
21487- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
21488+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
21489 }
21490
21491 static struct dma_map_ops swiotlb_dma_ops = {
21492diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
21493index 2ed787f..f70c9f6 100644
21494--- a/arch/x86/kernel/process.c
21495+++ b/arch/x86/kernel/process.c
21496@@ -36,7 +36,8 @@
21497 * section. Since TSS's are completely CPU-local, we want them
21498 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
21499 */
21500-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
21501+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
21502+EXPORT_SYMBOL(init_tss);
21503
21504 #ifdef CONFIG_X86_64
21505 static DEFINE_PER_CPU(unsigned char, is_idle);
21506@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
21507 task_xstate_cachep =
21508 kmem_cache_create("task_xstate", xstate_size,
21509 __alignof__(union thread_xstate),
21510- SLAB_PANIC | SLAB_NOTRACK, NULL);
21511+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
21512 }
21513
21514 /*
21515@@ -105,7 +106,7 @@ void exit_thread(void)
21516 unsigned long *bp = t->io_bitmap_ptr;
21517
21518 if (bp) {
21519- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
21520+ struct tss_struct *tss = init_tss + get_cpu();
21521
21522 t->io_bitmap_ptr = NULL;
21523 clear_thread_flag(TIF_IO_BITMAP);
21524@@ -136,7 +137,7 @@ void show_regs_common(void)
21525 board = dmi_get_system_info(DMI_BOARD_NAME);
21526
21527 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
21528- current->pid, current->comm, print_tainted(),
21529+ task_pid_nr(current), current->comm, print_tainted(),
21530 init_utsname()->release,
21531 (int)strcspn(init_utsname()->version, " "),
21532 init_utsname()->version,
21533@@ -149,6 +150,9 @@ void flush_thread(void)
21534 {
21535 struct task_struct *tsk = current;
21536
21537+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
21538+ loadsegment(gs, 0);
21539+#endif
21540 flush_ptrace_hw_breakpoint(tsk);
21541 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
21542 drop_init_fpu(tsk);
21543@@ -301,7 +305,7 @@ static void __exit_idle(void)
21544 void exit_idle(void)
21545 {
21546 /* idle loop has pid 0 */
21547- if (current->pid)
21548+ if (task_pid_nr(current))
21549 return;
21550 __exit_idle();
21551 }
21552@@ -404,7 +408,7 @@ bool set_pm_idle_to_default(void)
21553
21554 return ret;
21555 }
21556-void stop_this_cpu(void *dummy)
21557+__noreturn void stop_this_cpu(void *dummy)
21558 {
21559 local_irq_disable();
21560 /*
21561@@ -632,16 +636,37 @@ static int __init idle_setup(char *str)
21562 }
21563 early_param("idle", idle_setup);
21564
21565-unsigned long arch_align_stack(unsigned long sp)
21566+#ifdef CONFIG_PAX_RANDKSTACK
21567+void pax_randomize_kstack(struct pt_regs *regs)
21568 {
21569- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
21570- sp -= get_random_int() % 8192;
21571- return sp & ~0xf;
21572-}
21573+ struct thread_struct *thread = &current->thread;
21574+ unsigned long time;
21575
21576-unsigned long arch_randomize_brk(struct mm_struct *mm)
21577-{
21578- unsigned long range_end = mm->brk + 0x02000000;
21579- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
21580-}
21581+ if (!randomize_va_space)
21582+ return;
21583+
21584+ if (v8086_mode(regs))
21585+ return;
21586
21587+ rdtscl(time);
21588+
21589+ /* P4 seems to return a 0 LSB, ignore it */
21590+#ifdef CONFIG_MPENTIUM4
21591+ time &= 0x3EUL;
21592+ time <<= 2;
21593+#elif defined(CONFIG_X86_64)
21594+ time &= 0xFUL;
21595+ time <<= 4;
21596+#else
21597+ time &= 0x1FUL;
21598+ time <<= 3;
21599+#endif
21600+
21601+ thread->sp0 ^= time;
21602+ load_sp0(init_tss + smp_processor_id(), thread);
21603+
21604+#ifdef CONFIG_X86_64
21605+ this_cpu_write(kernel_stack, thread->sp0);
21606+#endif
21607+}
21608+#endif
21609diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
21610index b5a8905..d9cacac 100644
21611--- a/arch/x86/kernel/process_32.c
21612+++ b/arch/x86/kernel/process_32.c
21613@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
21614 unsigned long thread_saved_pc(struct task_struct *tsk)
21615 {
21616 return ((unsigned long *)tsk->thread.sp)[3];
21617+//XXX return tsk->thread.eip;
21618 }
21619
21620 void __show_regs(struct pt_regs *regs, int all)
21621@@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
21622 unsigned long sp;
21623 unsigned short ss, gs;
21624
21625- if (user_mode_vm(regs)) {
21626+ if (user_mode(regs)) {
21627 sp = regs->sp;
21628 ss = regs->ss & 0xffff;
21629- gs = get_user_gs(regs);
21630 } else {
21631 sp = kernel_stack_pointer(regs);
21632 savesegment(ss, ss);
21633- savesegment(gs, gs);
21634 }
21635+ gs = get_user_gs(regs);
21636
21637 show_regs_common();
21638
21639 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
21640 (u16)regs->cs, regs->ip, regs->flags,
21641- smp_processor_id());
21642+ raw_smp_processor_id());
21643 print_symbol("EIP is at %s\n", regs->ip);
21644
21645 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
21646@@ -130,20 +130,21 @@ void release_thread(struct task_struct *dead_task)
21647 int copy_thread(unsigned long clone_flags, unsigned long sp,
21648 unsigned long arg, struct task_struct *p)
21649 {
21650- struct pt_regs *childregs = task_pt_regs(p);
21651+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
21652 struct task_struct *tsk;
21653 int err;
21654
21655 p->thread.sp = (unsigned long) childregs;
21656 p->thread.sp0 = (unsigned long) (childregs+1);
21657+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
21658
21659 if (unlikely(p->flags & PF_KTHREAD)) {
21660 /* kernel thread */
21661 memset(childregs, 0, sizeof(struct pt_regs));
21662 p->thread.ip = (unsigned long) ret_from_kernel_thread;
21663- task_user_gs(p) = __KERNEL_STACK_CANARY;
21664- childregs->ds = __USER_DS;
21665- childregs->es = __USER_DS;
21666+ savesegment(gs, childregs->gs);
21667+ childregs->ds = __KERNEL_DS;
21668+ childregs->es = __KERNEL_DS;
21669 childregs->fs = __KERNEL_PERCPU;
21670 childregs->bx = sp; /* function */
21671 childregs->bp = arg;
21672@@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21673 struct thread_struct *prev = &prev_p->thread,
21674 *next = &next_p->thread;
21675 int cpu = smp_processor_id();
21676- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21677+ struct tss_struct *tss = init_tss + cpu;
21678 fpu_switch_t fpu;
21679
21680 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
21681@@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21682 */
21683 lazy_save_gs(prev->gs);
21684
21685+#ifdef CONFIG_PAX_MEMORY_UDEREF
21686+ __set_fs(task_thread_info(next_p)->addr_limit);
21687+#endif
21688+
21689 /*
21690 * Load the per-thread Thread-Local Storage descriptor.
21691 */
21692@@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21693 */
21694 arch_end_context_switch(next_p);
21695
21696+ this_cpu_write(current_task, next_p);
21697+ this_cpu_write(current_tinfo, &next_p->tinfo);
21698+
21699 /*
21700 * Restore %gs if needed (which is common)
21701 */
21702@@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21703
21704 switch_fpu_finish(next_p, fpu);
21705
21706- this_cpu_write(current_task, next_p);
21707-
21708 return prev_p;
21709 }
21710
21711@@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
21712 } while (count++ < 16);
21713 return 0;
21714 }
21715-
21716diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
21717index 6e68a61..955a9a5 100644
21718--- a/arch/x86/kernel/process_64.c
21719+++ b/arch/x86/kernel/process_64.c
21720@@ -152,10 +152,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
21721 struct pt_regs *childregs;
21722 struct task_struct *me = current;
21723
21724- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
21725+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
21726 childregs = task_pt_regs(p);
21727 p->thread.sp = (unsigned long) childregs;
21728 p->thread.usersp = me->thread.usersp;
21729+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
21730 set_tsk_thread_flag(p, TIF_FORK);
21731 p->fpu_counter = 0;
21732 p->thread.io_bitmap_ptr = NULL;
21733@@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21734 struct thread_struct *prev = &prev_p->thread;
21735 struct thread_struct *next = &next_p->thread;
21736 int cpu = smp_processor_id();
21737- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21738+ struct tss_struct *tss = init_tss + cpu;
21739 unsigned fsindex, gsindex;
21740 fpu_switch_t fpu;
21741
21742@@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21743 prev->usersp = this_cpu_read(old_rsp);
21744 this_cpu_write(old_rsp, next->usersp);
21745 this_cpu_write(current_task, next_p);
21746+ this_cpu_write(current_tinfo, &next_p->tinfo);
21747
21748- this_cpu_write(kernel_stack,
21749- (unsigned long)task_stack_page(next_p) +
21750- THREAD_SIZE - KERNEL_STACK_OFFSET);
21751+ this_cpu_write(kernel_stack, next->sp0);
21752
21753 /*
21754 * Now maybe reload the debug registers and handle I/O bitmaps
21755@@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
21756 if (!p || p == current || p->state == TASK_RUNNING)
21757 return 0;
21758 stack = (unsigned long)task_stack_page(p);
21759- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
21760+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
21761 return 0;
21762 fp = *(u64 *)(p->thread.sp);
21763 do {
21764- if (fp < (unsigned long)stack ||
21765- fp >= (unsigned long)stack+THREAD_SIZE)
21766+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
21767 return 0;
21768 ip = *(u64 *)(fp+8);
21769 if (!in_sched_functions(ip))
21770diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
21771index b629bbe..0fa615a 100644
21772--- a/arch/x86/kernel/ptrace.c
21773+++ b/arch/x86/kernel/ptrace.c
21774@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
21775 {
21776 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
21777 unsigned long sp = (unsigned long)&regs->sp;
21778- struct thread_info *tinfo;
21779
21780- if (context == (sp & ~(THREAD_SIZE - 1)))
21781+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
21782 return sp;
21783
21784- tinfo = (struct thread_info *)context;
21785- if (tinfo->previous_esp)
21786- return tinfo->previous_esp;
21787+ sp = *(unsigned long *)context;
21788+ if (sp)
21789+ return sp;
21790
21791 return (unsigned long)regs;
21792 }
21793@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
21794 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
21795 {
21796 int i;
21797- int dr7 = 0;
21798+ unsigned long dr7 = 0;
21799 struct arch_hw_breakpoint *info;
21800
21801 for (i = 0; i < HBP_NUM; i++) {
21802@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
21803 unsigned long addr, unsigned long data)
21804 {
21805 int ret;
21806- unsigned long __user *datap = (unsigned long __user *)data;
21807+ unsigned long __user *datap = (__force unsigned long __user *)data;
21808
21809 switch (request) {
21810 /* read the word at location addr in the USER area. */
21811@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
21812 if ((int) addr < 0)
21813 return -EIO;
21814 ret = do_get_thread_area(child, addr,
21815- (struct user_desc __user *)data);
21816+ (__force struct user_desc __user *) data);
21817 break;
21818
21819 case PTRACE_SET_THREAD_AREA:
21820 if ((int) addr < 0)
21821 return -EIO;
21822 ret = do_set_thread_area(child, addr,
21823- (struct user_desc __user *)data, 0);
21824+ (__force struct user_desc __user *) data, 0);
21825 break;
21826 #endif
21827
21828@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
21829
21830 #ifdef CONFIG_X86_64
21831
21832-static struct user_regset x86_64_regsets[] __read_mostly = {
21833+static user_regset_no_const x86_64_regsets[] __read_only = {
21834 [REGSET_GENERAL] = {
21835 .core_note_type = NT_PRSTATUS,
21836 .n = sizeof(struct user_regs_struct) / sizeof(long),
21837@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
21838 #endif /* CONFIG_X86_64 */
21839
21840 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
21841-static struct user_regset x86_32_regsets[] __read_mostly = {
21842+static user_regset_no_const x86_32_regsets[] __read_only = {
21843 [REGSET_GENERAL] = {
21844 .core_note_type = NT_PRSTATUS,
21845 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
21846@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
21847 */
21848 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
21849
21850-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
21851+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
21852 {
21853 #ifdef CONFIG_X86_64
21854 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
21855@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
21856 memset(info, 0, sizeof(*info));
21857 info->si_signo = SIGTRAP;
21858 info->si_code = si_code;
21859- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
21860+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
21861 }
21862
21863 void user_single_step_siginfo(struct task_struct *tsk,
21864@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
21865 # define IS_IA32 0
21866 #endif
21867
21868+#ifdef CONFIG_GRKERNSEC_SETXID
21869+extern void gr_delayed_cred_worker(void);
21870+#endif
21871+
21872 /*
21873 * We must return the syscall number to actually look up in the table.
21874 * This can be -1L to skip running any syscall at all.
21875@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
21876
21877 user_exit();
21878
21879+#ifdef CONFIG_GRKERNSEC_SETXID
21880+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
21881+ gr_delayed_cred_worker();
21882+#endif
21883+
21884 /*
21885 * If we stepped into a sysenter/syscall insn, it trapped in
21886 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
21887@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
21888 */
21889 user_exit();
21890
21891+#ifdef CONFIG_GRKERNSEC_SETXID
21892+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
21893+ gr_delayed_cred_worker();
21894+#endif
21895+
21896 audit_syscall_exit(regs);
21897
21898 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
21899diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
21900index 85c3959..76b89f9 100644
21901--- a/arch/x86/kernel/pvclock.c
21902+++ b/arch/x86/kernel/pvclock.c
21903@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
21904 return pv_tsc_khz;
21905 }
21906
21907-static atomic64_t last_value = ATOMIC64_INIT(0);
21908+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
21909
21910 void pvclock_resume(void)
21911 {
21912- atomic64_set(&last_value, 0);
21913+ atomic64_set_unchecked(&last_value, 0);
21914 }
21915
21916 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
21917@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
21918 * updating at the same time, and one of them could be slightly behind,
21919 * making the assumption that last_value always go forward fail to hold.
21920 */
21921- last = atomic64_read(&last_value);
21922+ last = atomic64_read_unchecked(&last_value);
21923 do {
21924 if (ret < last)
21925 return last;
21926- last = atomic64_cmpxchg(&last_value, last, ret);
21927+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
21928 } while (unlikely(last != ret));
21929
21930 return ret;
21931diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
21932index 76fa1e9..abf09ea 100644
21933--- a/arch/x86/kernel/reboot.c
21934+++ b/arch/x86/kernel/reboot.c
21935@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
21936 EXPORT_SYMBOL(pm_power_off);
21937
21938 static const struct desc_ptr no_idt = {};
21939-static int reboot_mode;
21940+static unsigned short reboot_mode;
21941 enum reboot_type reboot_type = BOOT_ACPI;
21942 int reboot_force;
21943
21944@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
21945
21946 void __noreturn machine_real_restart(unsigned int type)
21947 {
21948+
21949+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21950+ struct desc_struct *gdt;
21951+#endif
21952+
21953 local_irq_disable();
21954
21955 /*
21956@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
21957
21958 /* Jump to the identity-mapped low memory code */
21959 #ifdef CONFIG_X86_32
21960- asm volatile("jmpl *%0" : :
21961+
21962+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21963+ gdt = get_cpu_gdt_table(smp_processor_id());
21964+ pax_open_kernel();
21965+#ifdef CONFIG_PAX_MEMORY_UDEREF
21966+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
21967+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
21968+ loadsegment(ds, __KERNEL_DS);
21969+ loadsegment(es, __KERNEL_DS);
21970+ loadsegment(ss, __KERNEL_DS);
21971+#endif
21972+#ifdef CONFIG_PAX_KERNEXEC
21973+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
21974+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
21975+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
21976+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
21977+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
21978+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
21979+#endif
21980+ pax_close_kernel();
21981+#endif
21982+
21983+ asm volatile("ljmpl *%0" : :
21984 "rm" (real_mode_header->machine_real_restart_asm),
21985 "a" (type));
21986 #else
21987@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
21988 * try to force a triple fault and then cycle between hitting the keyboard
21989 * controller and doing that
21990 */
21991-static void native_machine_emergency_restart(void)
21992+static void __noreturn native_machine_emergency_restart(void)
21993 {
21994 int i;
21995 int attempt = 0;
21996@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
21997 #endif
21998 }
21999
22000-static void __machine_emergency_restart(int emergency)
22001+static void __noreturn __machine_emergency_restart(int emergency)
22002 {
22003 reboot_emergency = emergency;
22004 machine_ops.emergency_restart();
22005 }
22006
22007-static void native_machine_restart(char *__unused)
22008+static void __noreturn native_machine_restart(char *__unused)
22009 {
22010 pr_notice("machine restart\n");
22011
22012@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
22013 __machine_emergency_restart(0);
22014 }
22015
22016-static void native_machine_halt(void)
22017+static void __noreturn native_machine_halt(void)
22018 {
22019 /* Stop other cpus and apics */
22020 machine_shutdown();
22021@@ -679,7 +706,7 @@ static void native_machine_halt(void)
22022 stop_this_cpu(NULL);
22023 }
22024
22025-static void native_machine_power_off(void)
22026+static void __noreturn native_machine_power_off(void)
22027 {
22028 if (pm_power_off) {
22029 if (!reboot_force)
22030@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
22031 }
22032 /* A fallback in case there is no PM info available */
22033 tboot_shutdown(TB_SHUTDOWN_HALT);
22034+ unreachable();
22035 }
22036
22037-struct machine_ops machine_ops = {
22038+struct machine_ops machine_ops __read_only = {
22039 .power_off = native_machine_power_off,
22040 .shutdown = native_machine_shutdown,
22041 .emergency_restart = native_machine_emergency_restart,
22042diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
22043index 7a6f3b3..bed145d7 100644
22044--- a/arch/x86/kernel/relocate_kernel_64.S
22045+++ b/arch/x86/kernel/relocate_kernel_64.S
22046@@ -11,6 +11,7 @@
22047 #include <asm/kexec.h>
22048 #include <asm/processor-flags.h>
22049 #include <asm/pgtable_types.h>
22050+#include <asm/alternative-asm.h>
22051
22052 /*
22053 * Must be relocatable PIC code callable as a C function
22054@@ -160,13 +161,14 @@ identity_mapped:
22055 xorq %rbp, %rbp
22056 xorq %r8, %r8
22057 xorq %r9, %r9
22058- xorq %r10, %r9
22059+ xorq %r10, %r10
22060 xorq %r11, %r11
22061 xorq %r12, %r12
22062 xorq %r13, %r13
22063 xorq %r14, %r14
22064 xorq %r15, %r15
22065
22066+ pax_force_retaddr 0, 1
22067 ret
22068
22069 1:
22070diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
22071index 8b24289..d37b58b 100644
22072--- a/arch/x86/kernel/setup.c
22073+++ b/arch/x86/kernel/setup.c
22074@@ -437,7 +437,7 @@ static void __init parse_setup_data(void)
22075
22076 switch (data->type) {
22077 case SETUP_E820_EXT:
22078- parse_e820_ext(data);
22079+ parse_e820_ext((struct setup_data __force_kernel *)data);
22080 break;
22081 case SETUP_DTB:
22082 add_dtb(pa_data);
22083@@ -706,7 +706,7 @@ static void __init trim_bios_range(void)
22084 * area (640->1Mb) as ram even though it is not.
22085 * take them out.
22086 */
22087- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
22088+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
22089
22090 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
22091 }
22092@@ -830,14 +830,14 @@ void __init setup_arch(char **cmdline_p)
22093
22094 if (!boot_params.hdr.root_flags)
22095 root_mountflags &= ~MS_RDONLY;
22096- init_mm.start_code = (unsigned long) _text;
22097- init_mm.end_code = (unsigned long) _etext;
22098+ init_mm.start_code = ktla_ktva((unsigned long) _text);
22099+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
22100 init_mm.end_data = (unsigned long) _edata;
22101 init_mm.brk = _brk_end;
22102
22103- code_resource.start = virt_to_phys(_text);
22104- code_resource.end = virt_to_phys(_etext)-1;
22105- data_resource.start = virt_to_phys(_etext);
22106+ code_resource.start = virt_to_phys(ktla_ktva(_text));
22107+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
22108+ data_resource.start = virt_to_phys(_sdata);
22109 data_resource.end = virt_to_phys(_edata)-1;
22110 bss_resource.start = virt_to_phys(&__bss_start);
22111 bss_resource.end = virt_to_phys(&__bss_stop)-1;
22112diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
22113index 5cdff03..5810740 100644
22114--- a/arch/x86/kernel/setup_percpu.c
22115+++ b/arch/x86/kernel/setup_percpu.c
22116@@ -21,19 +21,17 @@
22117 #include <asm/cpu.h>
22118 #include <asm/stackprotector.h>
22119
22120-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
22121+#ifdef CONFIG_SMP
22122+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
22123 EXPORT_PER_CPU_SYMBOL(cpu_number);
22124+#endif
22125
22126-#ifdef CONFIG_X86_64
22127 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
22128-#else
22129-#define BOOT_PERCPU_OFFSET 0
22130-#endif
22131
22132 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
22133 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
22134
22135-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
22136+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
22137 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
22138 };
22139 EXPORT_SYMBOL(__per_cpu_offset);
22140@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
22141 {
22142 #ifdef CONFIG_X86_32
22143 struct desc_struct gdt;
22144+ unsigned long base = per_cpu_offset(cpu);
22145
22146- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
22147- 0x2 | DESCTYPE_S, 0x8);
22148- gdt.s = 1;
22149+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
22150+ 0x83 | DESCTYPE_S, 0xC);
22151 write_gdt_entry(get_cpu_gdt_table(cpu),
22152 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
22153 #endif
22154@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
22155 /* alrighty, percpu areas up and running */
22156 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
22157 for_each_possible_cpu(cpu) {
22158+#ifdef CONFIG_CC_STACKPROTECTOR
22159+#ifdef CONFIG_X86_32
22160+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
22161+#endif
22162+#endif
22163 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
22164 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
22165 per_cpu(cpu_number, cpu) = cpu;
22166@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
22167 */
22168 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
22169 #endif
22170+#ifdef CONFIG_CC_STACKPROTECTOR
22171+#ifdef CONFIG_X86_32
22172+ if (!cpu)
22173+ per_cpu(stack_canary.canary, cpu) = canary;
22174+#endif
22175+#endif
22176 /*
22177 * Up to this point, the boot CPU has been using .init.data
22178 * area. Reload any changed state for the boot CPU.
22179diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
22180index d6bf1f3..3ffce5a 100644
22181--- a/arch/x86/kernel/signal.c
22182+++ b/arch/x86/kernel/signal.c
22183@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
22184 * Align the stack pointer according to the i386 ABI,
22185 * i.e. so that on function entry ((sp + 4) & 15) == 0.
22186 */
22187- sp = ((sp + 4) & -16ul) - 4;
22188+ sp = ((sp - 12) & -16ul) - 4;
22189 #else /* !CONFIG_X86_32 */
22190 sp = round_down(sp, 16) - 8;
22191 #endif
22192@@ -304,9 +304,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
22193 }
22194
22195 if (current->mm->context.vdso)
22196- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22197+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22198 else
22199- restorer = &frame->retcode;
22200+ restorer = (void __user *)&frame->retcode;
22201 if (ka->sa.sa_flags & SA_RESTORER)
22202 restorer = ka->sa.sa_restorer;
22203
22204@@ -320,7 +320,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
22205 * reasons and because gdb uses it as a signature to notice
22206 * signal handler stack frames.
22207 */
22208- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
22209+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
22210
22211 if (err)
22212 return -EFAULT;
22213@@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
22214 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
22215
22216 /* Set up to return from userspace. */
22217- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22218+ if (current->mm->context.vdso)
22219+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22220+ else
22221+ restorer = (void __user *)&frame->retcode;
22222 if (ka->sa.sa_flags & SA_RESTORER)
22223 restorer = ka->sa.sa_restorer;
22224 put_user_ex(restorer, &frame->pretcode);
22225@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
22226 * reasons and because gdb uses it as a signature to notice
22227 * signal handler stack frames.
22228 */
22229- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
22230+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
22231 } put_user_catch(err);
22232
22233 err |= copy_siginfo_to_user(&frame->info, info);
22234diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
22235index 48d2b7d..90d328a 100644
22236--- a/arch/x86/kernel/smp.c
22237+++ b/arch/x86/kernel/smp.c
22238@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
22239
22240 __setup("nonmi_ipi", nonmi_ipi_setup);
22241
22242-struct smp_ops smp_ops = {
22243+struct smp_ops smp_ops __read_only = {
22244 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
22245 .smp_prepare_cpus = native_smp_prepare_cpus,
22246 .smp_cpus_done = native_smp_cpus_done,
22247diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
22248index ed0fe38..87fc692 100644
22249--- a/arch/x86/kernel/smpboot.c
22250+++ b/arch/x86/kernel/smpboot.c
22251@@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22252 idle->thread.sp = (unsigned long) (((struct pt_regs *)
22253 (THREAD_SIZE + task_stack_page(idle))) - 1);
22254 per_cpu(current_task, cpu) = idle;
22255+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22256
22257 #ifdef CONFIG_X86_32
22258 /* Stack for startup_32 can be just as for start_secondary onwards */
22259@@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22260 #else
22261 clear_tsk_thread_flag(idle, TIF_FORK);
22262 initial_gs = per_cpu_offset(cpu);
22263- per_cpu(kernel_stack, cpu) =
22264- (unsigned long)task_stack_page(idle) -
22265- KERNEL_STACK_OFFSET + THREAD_SIZE;
22266+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22267 #endif
22268+
22269+ pax_open_kernel();
22270 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22271+ pax_close_kernel();
22272+
22273 initial_code = (unsigned long)start_secondary;
22274 stack_start = idle->thread.sp;
22275
22276@@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
22277 /* the FPU context is blank, nobody can own it */
22278 __cpu_disable_lazy_restore(cpu);
22279
22280+#ifdef CONFIG_PAX_PER_CPU_PGD
22281+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
22282+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22283+ KERNEL_PGD_PTRS);
22284+#endif
22285+
22286+ /* the FPU context is blank, nobody can own it */
22287+ __cpu_disable_lazy_restore(cpu);
22288+
22289 err = do_boot_cpu(apicid, cpu, tidle);
22290 if (err) {
22291 pr_debug("do_boot_cpu failed %d\n", err);
22292diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
22293index 9b4d51d..5d28b58 100644
22294--- a/arch/x86/kernel/step.c
22295+++ b/arch/x86/kernel/step.c
22296@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22297 struct desc_struct *desc;
22298 unsigned long base;
22299
22300- seg &= ~7UL;
22301+ seg >>= 3;
22302
22303 mutex_lock(&child->mm->context.lock);
22304- if (unlikely((seg >> 3) >= child->mm->context.size))
22305+ if (unlikely(seg >= child->mm->context.size))
22306 addr = -1L; /* bogus selector, access would fault */
22307 else {
22308 desc = child->mm->context.ldt + seg;
22309@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22310 addr += base;
22311 }
22312 mutex_unlock(&child->mm->context.lock);
22313- }
22314+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
22315+ addr = ktla_ktva(addr);
22316
22317 return addr;
22318 }
22319@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
22320 unsigned char opcode[15];
22321 unsigned long addr = convert_ip_to_linear(child, regs);
22322
22323+ if (addr == -EINVAL)
22324+ return 0;
22325+
22326 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
22327 for (i = 0; i < copied; i++) {
22328 switch (opcode[i]) {
22329diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
22330new file mode 100644
22331index 0000000..26bb1af
22332--- /dev/null
22333+++ b/arch/x86/kernel/sys_i386_32.c
22334@@ -0,0 +1,249 @@
22335+/*
22336+ * This file contains various random system calls that
22337+ * have a non-standard calling sequence on the Linux/i386
22338+ * platform.
22339+ */
22340+
22341+#include <linux/errno.h>
22342+#include <linux/sched.h>
22343+#include <linux/mm.h>
22344+#include <linux/fs.h>
22345+#include <linux/smp.h>
22346+#include <linux/sem.h>
22347+#include <linux/msg.h>
22348+#include <linux/shm.h>
22349+#include <linux/stat.h>
22350+#include <linux/syscalls.h>
22351+#include <linux/mman.h>
22352+#include <linux/file.h>
22353+#include <linux/utsname.h>
22354+#include <linux/ipc.h>
22355+
22356+#include <linux/uaccess.h>
22357+#include <linux/unistd.h>
22358+
22359+#include <asm/syscalls.h>
22360+
22361+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
22362+{
22363+ unsigned long pax_task_size = TASK_SIZE;
22364+
22365+#ifdef CONFIG_PAX_SEGMEXEC
22366+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
22367+ pax_task_size = SEGMEXEC_TASK_SIZE;
22368+#endif
22369+
22370+ if (len > pax_task_size || addr > pax_task_size - len)
22371+ return -EINVAL;
22372+
22373+ return 0;
22374+}
22375+
22376+unsigned long
22377+arch_get_unmapped_area(struct file *filp, unsigned long addr,
22378+ unsigned long len, unsigned long pgoff, unsigned long flags)
22379+{
22380+ struct mm_struct *mm = current->mm;
22381+ struct vm_area_struct *vma;
22382+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22383+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22384+
22385+#ifdef CONFIG_PAX_SEGMEXEC
22386+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22387+ pax_task_size = SEGMEXEC_TASK_SIZE;
22388+#endif
22389+
22390+ pax_task_size -= PAGE_SIZE;
22391+
22392+ if (len > pax_task_size)
22393+ return -ENOMEM;
22394+
22395+ if (flags & MAP_FIXED)
22396+ return addr;
22397+
22398+#ifdef CONFIG_PAX_RANDMMAP
22399+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22400+#endif
22401+
22402+ if (addr) {
22403+ addr = PAGE_ALIGN(addr);
22404+ if (pax_task_size - len >= addr) {
22405+ vma = find_vma(mm, addr);
22406+ if (check_heap_stack_gap(vma, addr, len, offset))
22407+ return addr;
22408+ }
22409+ }
22410+ if (len > mm->cached_hole_size) {
22411+ start_addr = addr = mm->free_area_cache;
22412+ } else {
22413+ start_addr = addr = mm->mmap_base;
22414+ mm->cached_hole_size = 0;
22415+ }
22416+
22417+#ifdef CONFIG_PAX_PAGEEXEC
22418+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
22419+ start_addr = 0x00110000UL;
22420+
22421+#ifdef CONFIG_PAX_RANDMMAP
22422+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22423+ start_addr += mm->delta_mmap & 0x03FFF000UL;
22424+#endif
22425+
22426+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
22427+ start_addr = addr = mm->mmap_base;
22428+ else
22429+ addr = start_addr;
22430+ }
22431+#endif
22432+
22433+full_search:
22434+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22435+ /* At this point: (!vma || addr < vma->vm_end). */
22436+ if (pax_task_size - len < addr) {
22437+ /*
22438+ * Start a new search - just in case we missed
22439+ * some holes.
22440+ */
22441+ if (start_addr != mm->mmap_base) {
22442+ start_addr = addr = mm->mmap_base;
22443+ mm->cached_hole_size = 0;
22444+ goto full_search;
22445+ }
22446+ return -ENOMEM;
22447+ }
22448+ if (check_heap_stack_gap(vma, addr, len, offset))
22449+ break;
22450+ if (addr + mm->cached_hole_size < vma->vm_start)
22451+ mm->cached_hole_size = vma->vm_start - addr;
22452+ addr = vma->vm_end;
22453+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
22454+ start_addr = addr = mm->mmap_base;
22455+ mm->cached_hole_size = 0;
22456+ goto full_search;
22457+ }
22458+ }
22459+
22460+ /*
22461+ * Remember the place where we stopped the search:
22462+ */
22463+ mm->free_area_cache = addr + len;
22464+ return addr;
22465+}
22466+
22467+unsigned long
22468+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
22469+ const unsigned long len, const unsigned long pgoff,
22470+ const unsigned long flags)
22471+{
22472+ struct vm_area_struct *vma;
22473+ struct mm_struct *mm = current->mm;
22474+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
22475+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22476+
22477+#ifdef CONFIG_PAX_SEGMEXEC
22478+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22479+ pax_task_size = SEGMEXEC_TASK_SIZE;
22480+#endif
22481+
22482+ pax_task_size -= PAGE_SIZE;
22483+
22484+ /* requested length too big for entire address space */
22485+ if (len > pax_task_size)
22486+ return -ENOMEM;
22487+
22488+ if (flags & MAP_FIXED)
22489+ return addr;
22490+
22491+#ifdef CONFIG_PAX_PAGEEXEC
22492+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
22493+ goto bottomup;
22494+#endif
22495+
22496+#ifdef CONFIG_PAX_RANDMMAP
22497+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22498+#endif
22499+
22500+ /* requesting a specific address */
22501+ if (addr) {
22502+ addr = PAGE_ALIGN(addr);
22503+ if (pax_task_size - len >= addr) {
22504+ vma = find_vma(mm, addr);
22505+ if (check_heap_stack_gap(vma, addr, len, offset))
22506+ return addr;
22507+ }
22508+ }
22509+
22510+ /* check if free_area_cache is useful for us */
22511+ if (len <= mm->cached_hole_size) {
22512+ mm->cached_hole_size = 0;
22513+ mm->free_area_cache = mm->mmap_base;
22514+ }
22515+
22516+ /* either no address requested or can't fit in requested address hole */
22517+ addr = mm->free_area_cache;
22518+
22519+ /* make sure it can fit in the remaining address space */
22520+ if (addr > len) {
22521+ vma = find_vma(mm, addr-len);
22522+ if (check_heap_stack_gap(vma, addr - len, len, offset))
22523+ /* remember the address as a hint for next time */
22524+ return (mm->free_area_cache = addr-len);
22525+ }
22526+
22527+ if (mm->mmap_base < len)
22528+ goto bottomup;
22529+
22530+ addr = mm->mmap_base-len;
22531+
22532+ do {
22533+ /*
22534+ * Lookup failure means no vma is above this address,
22535+ * else if new region fits below vma->vm_start,
22536+ * return with success:
22537+ */
22538+ vma = find_vma(mm, addr);
22539+ if (check_heap_stack_gap(vma, addr, len, offset))
22540+ /* remember the address as a hint for next time */
22541+ return (mm->free_area_cache = addr);
22542+
22543+ /* remember the largest hole we saw so far */
22544+ if (addr + mm->cached_hole_size < vma->vm_start)
22545+ mm->cached_hole_size = vma->vm_start - addr;
22546+
22547+ /* try just below the current vma->vm_start */
22548+ addr = skip_heap_stack_gap(vma, len, offset);
22549+ } while (!IS_ERR_VALUE(addr));
22550+
22551+bottomup:
22552+ /*
22553+ * A failed mmap() very likely causes application failure,
22554+ * so fall back to the bottom-up function here. This scenario
22555+ * can happen with large stack limits and large mmap()
22556+ * allocations.
22557+ */
22558+
22559+#ifdef CONFIG_PAX_SEGMEXEC
22560+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22561+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22562+ else
22563+#endif
22564+
22565+ mm->mmap_base = TASK_UNMAPPED_BASE;
22566+
22567+#ifdef CONFIG_PAX_RANDMMAP
22568+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22569+ mm->mmap_base += mm->delta_mmap;
22570+#endif
22571+
22572+ mm->free_area_cache = mm->mmap_base;
22573+ mm->cached_hole_size = ~0UL;
22574+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
22575+ /*
22576+ * Restore the topdown base:
22577+ */
22578+ mm->mmap_base = base;
22579+ mm->free_area_cache = base;
22580+ mm->cached_hole_size = ~0UL;
22581+
22582+ return addr;
22583+}
22584diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
22585index 97ef74b..57a1882 100644
22586--- a/arch/x86/kernel/sys_x86_64.c
22587+++ b/arch/x86/kernel/sys_x86_64.c
22588@@ -81,8 +81,8 @@ out:
22589 return error;
22590 }
22591
22592-static void find_start_end(unsigned long flags, unsigned long *begin,
22593- unsigned long *end)
22594+static void find_start_end(struct mm_struct *mm, unsigned long flags,
22595+ unsigned long *begin, unsigned long *end)
22596 {
22597 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
22598 unsigned long new_begin;
22599@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
22600 *begin = new_begin;
22601 }
22602 } else {
22603- *begin = TASK_UNMAPPED_BASE;
22604+ *begin = mm->mmap_base;
22605 *end = TASK_SIZE;
22606 }
22607 }
22608@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
22609 struct vm_area_struct *vma;
22610 struct vm_unmapped_area_info info;
22611 unsigned long begin, end;
22612+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22613
22614 if (flags & MAP_FIXED)
22615 return addr;
22616
22617- find_start_end(flags, &begin, &end);
22618+ find_start_end(mm, flags, &begin, &end);
22619
22620 if (len > end)
22621 return -ENOMEM;
22622
22623+#ifdef CONFIG_PAX_RANDMMAP
22624+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22625+#endif
22626+
22627 if (addr) {
22628 addr = PAGE_ALIGN(addr);
22629 vma = find_vma(mm, addr);
22630- if (end - len >= addr &&
22631- (!vma || addr + len <= vma->vm_start))
22632+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
22633 return addr;
22634 }
22635
22636@@ -161,6 +165,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
22637 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
22638 goto bottomup;
22639
22640+#ifdef CONFIG_PAX_RANDMMAP
22641+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22642+#endif
22643+
22644 /* requesting a specific address */
22645 if (addr) {
22646 addr = PAGE_ALIGN(addr);
22647diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
22648index f84fe00..f41d9f1 100644
22649--- a/arch/x86/kernel/tboot.c
22650+++ b/arch/x86/kernel/tboot.c
22651@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
22652
22653 void tboot_shutdown(u32 shutdown_type)
22654 {
22655- void (*shutdown)(void);
22656+ void (* __noreturn shutdown)(void);
22657
22658 if (!tboot_enabled())
22659 return;
22660@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
22661
22662 switch_to_tboot_pt();
22663
22664- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
22665+ shutdown = (void *)tboot->shutdown_entry;
22666 shutdown();
22667
22668 /* should not reach here */
22669@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
22670 return 0;
22671 }
22672
22673-static atomic_t ap_wfs_count;
22674+static atomic_unchecked_t ap_wfs_count;
22675
22676 static int tboot_wait_for_aps(int num_aps)
22677 {
22678@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
22679 {
22680 switch (action) {
22681 case CPU_DYING:
22682- atomic_inc(&ap_wfs_count);
22683+ atomic_inc_unchecked(&ap_wfs_count);
22684 if (num_online_cpus() == 1)
22685- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
22686+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
22687 return NOTIFY_BAD;
22688 break;
22689 }
22690 return NOTIFY_OK;
22691 }
22692
22693-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
22694+static struct notifier_block tboot_cpu_notifier =
22695 {
22696 .notifier_call = tboot_cpu_callback,
22697 };
22698@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
22699
22700 tboot_create_trampoline();
22701
22702- atomic_set(&ap_wfs_count, 0);
22703+ atomic_set_unchecked(&ap_wfs_count, 0);
22704 register_hotcpu_notifier(&tboot_cpu_notifier);
22705
22706 acpi_os_set_prepare_sleep(&tboot_sleep);
22707diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
22708index 24d3c91..d06b473 100644
22709--- a/arch/x86/kernel/time.c
22710+++ b/arch/x86/kernel/time.c
22711@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
22712 {
22713 unsigned long pc = instruction_pointer(regs);
22714
22715- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
22716+ if (!user_mode(regs) && in_lock_functions(pc)) {
22717 #ifdef CONFIG_FRAME_POINTER
22718- return *(unsigned long *)(regs->bp + sizeof(long));
22719+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
22720 #else
22721 unsigned long *sp =
22722 (unsigned long *)kernel_stack_pointer(regs);
22723@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
22724 * or above a saved flags. Eflags has bits 22-31 zero,
22725 * kernel addresses don't.
22726 */
22727+
22728+#ifdef CONFIG_PAX_KERNEXEC
22729+ return ktla_ktva(sp[0]);
22730+#else
22731 if (sp[0] >> 22)
22732 return sp[0];
22733 if (sp[1] >> 22)
22734 return sp[1];
22735 #endif
22736+
22737+#endif
22738 }
22739 return pc;
22740 }
22741diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
22742index 9d9d2f9..cad418a 100644
22743--- a/arch/x86/kernel/tls.c
22744+++ b/arch/x86/kernel/tls.c
22745@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
22746 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
22747 return -EINVAL;
22748
22749+#ifdef CONFIG_PAX_SEGMEXEC
22750+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
22751+ return -EINVAL;
22752+#endif
22753+
22754 set_tls_desc(p, idx, &info, 1);
22755
22756 return 0;
22757@@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
22758
22759 if (kbuf)
22760 info = kbuf;
22761- else if (__copy_from_user(infobuf, ubuf, count))
22762+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
22763 return -EFAULT;
22764 else
22765 info = infobuf;
22766diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
22767index ecffca1..95c4d13 100644
22768--- a/arch/x86/kernel/traps.c
22769+++ b/arch/x86/kernel/traps.c
22770@@ -68,12 +68,6 @@
22771 #include <asm/setup.h>
22772
22773 asmlinkage int system_call(void);
22774-
22775-/*
22776- * The IDT has to be page-aligned to simplify the Pentium
22777- * F0 0F bug workaround.
22778- */
22779-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
22780 #endif
22781
22782 DECLARE_BITMAP(used_vectors, NR_VECTORS);
22783@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
22784 }
22785
22786 static int __kprobes
22787-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
22788+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
22789 struct pt_regs *regs, long error_code)
22790 {
22791 #ifdef CONFIG_X86_32
22792- if (regs->flags & X86_VM_MASK) {
22793+ if (v8086_mode(regs)) {
22794 /*
22795 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
22796 * On nmi (interrupt 2), do_trap should not be called.
22797@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
22798 return -1;
22799 }
22800 #endif
22801- if (!user_mode(regs)) {
22802+ if (!user_mode_novm(regs)) {
22803 if (!fixup_exception(regs)) {
22804 tsk->thread.error_code = error_code;
22805 tsk->thread.trap_nr = trapnr;
22806+
22807+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22808+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
22809+ str = "PAX: suspicious stack segment fault";
22810+#endif
22811+
22812 die(str, regs, error_code);
22813 }
22814+
22815+#ifdef CONFIG_PAX_REFCOUNT
22816+ if (trapnr == 4)
22817+ pax_report_refcount_overflow(regs);
22818+#endif
22819+
22820 return 0;
22821 }
22822
22823@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
22824 }
22825
22826 static void __kprobes
22827-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
22828+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
22829 long error_code, siginfo_t *info)
22830 {
22831 struct task_struct *tsk = current;
22832@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
22833 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
22834 printk_ratelimit()) {
22835 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
22836- tsk->comm, tsk->pid, str,
22837+ tsk->comm, task_pid_nr(tsk), str,
22838 regs->ip, regs->sp, error_code);
22839 print_vma_addr(" in ", regs->ip);
22840 pr_cont("\n");
22841@@ -266,7 +272,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
22842 conditional_sti(regs);
22843
22844 #ifdef CONFIG_X86_32
22845- if (regs->flags & X86_VM_MASK) {
22846+ if (v8086_mode(regs)) {
22847 local_irq_enable();
22848 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
22849 goto exit;
22850@@ -274,18 +280,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
22851 #endif
22852
22853 tsk = current;
22854- if (!user_mode(regs)) {
22855+ if (!user_mode_novm(regs)) {
22856 if (fixup_exception(regs))
22857 goto exit;
22858
22859 tsk->thread.error_code = error_code;
22860 tsk->thread.trap_nr = X86_TRAP_GP;
22861 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
22862- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
22863+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
22864+
22865+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22866+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
22867+ die("PAX: suspicious general protection fault", regs, error_code);
22868+ else
22869+#endif
22870+
22871 die("general protection fault", regs, error_code);
22872+ }
22873 goto exit;
22874 }
22875
22876+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
22877+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
22878+ struct mm_struct *mm = tsk->mm;
22879+ unsigned long limit;
22880+
22881+ down_write(&mm->mmap_sem);
22882+ limit = mm->context.user_cs_limit;
22883+ if (limit < TASK_SIZE) {
22884+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
22885+ up_write(&mm->mmap_sem);
22886+ return;
22887+ }
22888+ up_write(&mm->mmap_sem);
22889+ }
22890+#endif
22891+
22892 tsk->thread.error_code = error_code;
22893 tsk->thread.trap_nr = X86_TRAP_GP;
22894
22895@@ -440,7 +470,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
22896 /* It's safe to allow irq's after DR6 has been saved */
22897 preempt_conditional_sti(regs);
22898
22899- if (regs->flags & X86_VM_MASK) {
22900+ if (v8086_mode(regs)) {
22901 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
22902 X86_TRAP_DB);
22903 preempt_conditional_cli(regs);
22904@@ -455,7 +485,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
22905 * We already checked v86 mode above, so we can check for kernel mode
22906 * by just checking the CPL of CS.
22907 */
22908- if ((dr6 & DR_STEP) && !user_mode(regs)) {
22909+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
22910 tsk->thread.debugreg6 &= ~DR_STEP;
22911 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
22912 regs->flags &= ~X86_EFLAGS_TF;
22913@@ -487,7 +517,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
22914 return;
22915 conditional_sti(regs);
22916
22917- if (!user_mode_vm(regs))
22918+ if (!user_mode(regs))
22919 {
22920 if (!fixup_exception(regs)) {
22921 task->thread.error_code = error_code;
22922diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
22923index c71025b..b117501 100644
22924--- a/arch/x86/kernel/uprobes.c
22925+++ b/arch/x86/kernel/uprobes.c
22926@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
22927 int ret = NOTIFY_DONE;
22928
22929 /* We are only interested in userspace traps */
22930- if (regs && !user_mode_vm(regs))
22931+ if (regs && !user_mode(regs))
22932 return NOTIFY_DONE;
22933
22934 switch (val) {
22935diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
22936index b9242ba..50c5edd 100644
22937--- a/arch/x86/kernel/verify_cpu.S
22938+++ b/arch/x86/kernel/verify_cpu.S
22939@@ -20,6 +20,7 @@
22940 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
22941 * arch/x86/kernel/trampoline_64.S: secondary processor verification
22942 * arch/x86/kernel/head_32.S: processor startup
22943+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
22944 *
22945 * verify_cpu, returns the status of longmode and SSE in register %eax.
22946 * 0: Success 1: Failure
22947diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
22948index 1dfe69c..a3df6f6 100644
22949--- a/arch/x86/kernel/vm86_32.c
22950+++ b/arch/x86/kernel/vm86_32.c
22951@@ -43,6 +43,7 @@
22952 #include <linux/ptrace.h>
22953 #include <linux/audit.h>
22954 #include <linux/stddef.h>
22955+#include <linux/grsecurity.h>
22956
22957 #include <asm/uaccess.h>
22958 #include <asm/io.h>
22959@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
22960 do_exit(SIGSEGV);
22961 }
22962
22963- tss = &per_cpu(init_tss, get_cpu());
22964+ tss = init_tss + get_cpu();
22965 current->thread.sp0 = current->thread.saved_sp0;
22966 current->thread.sysenter_cs = __KERNEL_CS;
22967 load_sp0(tss, &current->thread);
22968@@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
22969 struct task_struct *tsk;
22970 int tmp, ret = -EPERM;
22971
22972+#ifdef CONFIG_GRKERNSEC_VM86
22973+ if (!capable(CAP_SYS_RAWIO)) {
22974+ gr_handle_vm86();
22975+ goto out;
22976+ }
22977+#endif
22978+
22979 tsk = current;
22980 if (tsk->thread.saved_sp0)
22981 goto out;
22982@@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
22983 int tmp, ret;
22984 struct vm86plus_struct __user *v86;
22985
22986+#ifdef CONFIG_GRKERNSEC_VM86
22987+ if (!capable(CAP_SYS_RAWIO)) {
22988+ gr_handle_vm86();
22989+ ret = -EPERM;
22990+ goto out;
22991+ }
22992+#endif
22993+
22994 tsk = current;
22995 switch (cmd) {
22996 case VM86_REQUEST_IRQ:
22997@@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
22998 tsk->thread.saved_fs = info->regs32->fs;
22999 tsk->thread.saved_gs = get_user_gs(info->regs32);
23000
23001- tss = &per_cpu(init_tss, get_cpu());
23002+ tss = init_tss + get_cpu();
23003 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
23004 if (cpu_has_sep)
23005 tsk->thread.sysenter_cs = 0;
23006@@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
23007 goto cannot_handle;
23008 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
23009 goto cannot_handle;
23010- intr_ptr = (unsigned long __user *) (i << 2);
23011+ intr_ptr = (__force unsigned long __user *) (i << 2);
23012 if (get_user(segoffs, intr_ptr))
23013 goto cannot_handle;
23014 if ((segoffs >> 16) == BIOSSEG)
23015diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
23016index 22a1530..8fbaaad 100644
23017--- a/arch/x86/kernel/vmlinux.lds.S
23018+++ b/arch/x86/kernel/vmlinux.lds.S
23019@@ -26,6 +26,13 @@
23020 #include <asm/page_types.h>
23021 #include <asm/cache.h>
23022 #include <asm/boot.h>
23023+#include <asm/segment.h>
23024+
23025+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23026+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
23027+#else
23028+#define __KERNEL_TEXT_OFFSET 0
23029+#endif
23030
23031 #undef i386 /* in case the preprocessor is a 32bit one */
23032
23033@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
23034
23035 PHDRS {
23036 text PT_LOAD FLAGS(5); /* R_E */
23037+#ifdef CONFIG_X86_32
23038+ module PT_LOAD FLAGS(5); /* R_E */
23039+#endif
23040+#ifdef CONFIG_XEN
23041+ rodata PT_LOAD FLAGS(5); /* R_E */
23042+#else
23043+ rodata PT_LOAD FLAGS(4); /* R__ */
23044+#endif
23045 data PT_LOAD FLAGS(6); /* RW_ */
23046-#ifdef CONFIG_X86_64
23047+ init.begin PT_LOAD FLAGS(6); /* RW_ */
23048 #ifdef CONFIG_SMP
23049 percpu PT_LOAD FLAGS(6); /* RW_ */
23050 #endif
23051+ text.init PT_LOAD FLAGS(5); /* R_E */
23052+ text.exit PT_LOAD FLAGS(5); /* R_E */
23053 init PT_LOAD FLAGS(7); /* RWE */
23054-#endif
23055 note PT_NOTE FLAGS(0); /* ___ */
23056 }
23057
23058 SECTIONS
23059 {
23060 #ifdef CONFIG_X86_32
23061- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
23062- phys_startup_32 = startup_32 - LOAD_OFFSET;
23063+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
23064 #else
23065- . = __START_KERNEL;
23066- phys_startup_64 = startup_64 - LOAD_OFFSET;
23067+ . = __START_KERNEL;
23068 #endif
23069
23070 /* Text and read-only data */
23071- .text : AT(ADDR(.text) - LOAD_OFFSET) {
23072- _text = .;
23073+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23074 /* bootstrapping code */
23075+#ifdef CONFIG_X86_32
23076+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23077+#else
23078+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23079+#endif
23080+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23081+ _text = .;
23082 HEAD_TEXT
23083 #ifdef CONFIG_X86_32
23084 . = ALIGN(PAGE_SIZE);
23085@@ -108,13 +128,48 @@ SECTIONS
23086 IRQENTRY_TEXT
23087 *(.fixup)
23088 *(.gnu.warning)
23089- /* End of text section */
23090- _etext = .;
23091 } :text = 0x9090
23092
23093- NOTES :text :note
23094+ . += __KERNEL_TEXT_OFFSET;
23095
23096- EXCEPTION_TABLE(16) :text = 0x9090
23097+#ifdef CONFIG_X86_32
23098+ . = ALIGN(PAGE_SIZE);
23099+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
23100+
23101+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
23102+ MODULES_EXEC_VADDR = .;
23103+ BYTE(0)
23104+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
23105+ . = ALIGN(HPAGE_SIZE) - 1;
23106+ MODULES_EXEC_END = .;
23107+#endif
23108+
23109+ } :module
23110+#endif
23111+
23112+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
23113+ /* End of text section */
23114+ BYTE(0)
23115+ _etext = . - __KERNEL_TEXT_OFFSET;
23116+ }
23117+
23118+#ifdef CONFIG_X86_32
23119+ . = ALIGN(PAGE_SIZE);
23120+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
23121+ *(.idt)
23122+ . = ALIGN(PAGE_SIZE);
23123+ *(.empty_zero_page)
23124+ *(.initial_pg_fixmap)
23125+ *(.initial_pg_pmd)
23126+ *(.initial_page_table)
23127+ *(.swapper_pg_dir)
23128+ } :rodata
23129+#endif
23130+
23131+ . = ALIGN(PAGE_SIZE);
23132+ NOTES :rodata :note
23133+
23134+ EXCEPTION_TABLE(16) :rodata
23135
23136 #if defined(CONFIG_DEBUG_RODATA)
23137 /* .text should occupy whole number of pages */
23138@@ -126,16 +181,20 @@ SECTIONS
23139
23140 /* Data */
23141 .data : AT(ADDR(.data) - LOAD_OFFSET) {
23142+
23143+#ifdef CONFIG_PAX_KERNEXEC
23144+ . = ALIGN(HPAGE_SIZE);
23145+#else
23146+ . = ALIGN(PAGE_SIZE);
23147+#endif
23148+
23149 /* Start of data section */
23150 _sdata = .;
23151
23152 /* init_task */
23153 INIT_TASK_DATA(THREAD_SIZE)
23154
23155-#ifdef CONFIG_X86_32
23156- /* 32 bit has nosave before _edata */
23157 NOSAVE_DATA
23158-#endif
23159
23160 PAGE_ALIGNED_DATA(PAGE_SIZE)
23161
23162@@ -176,12 +235,19 @@ SECTIONS
23163 #endif /* CONFIG_X86_64 */
23164
23165 /* Init code and data - will be freed after init */
23166- . = ALIGN(PAGE_SIZE);
23167 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
23168+ BYTE(0)
23169+
23170+#ifdef CONFIG_PAX_KERNEXEC
23171+ . = ALIGN(HPAGE_SIZE);
23172+#else
23173+ . = ALIGN(PAGE_SIZE);
23174+#endif
23175+
23176 __init_begin = .; /* paired with __init_end */
23177- }
23178+ } :init.begin
23179
23180-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
23181+#ifdef CONFIG_SMP
23182 /*
23183 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
23184 * output PHDR, so the next output section - .init.text - should
23185@@ -190,12 +256,27 @@ SECTIONS
23186 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
23187 #endif
23188
23189- INIT_TEXT_SECTION(PAGE_SIZE)
23190-#ifdef CONFIG_X86_64
23191- :init
23192-#endif
23193+ . = ALIGN(PAGE_SIZE);
23194+ init_begin = .;
23195+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
23196+ VMLINUX_SYMBOL(_sinittext) = .;
23197+ INIT_TEXT
23198+ VMLINUX_SYMBOL(_einittext) = .;
23199+ . = ALIGN(PAGE_SIZE);
23200+ } :text.init
23201
23202- INIT_DATA_SECTION(16)
23203+ /*
23204+ * .exit.text is discard at runtime, not link time, to deal with
23205+ * references from .altinstructions and .eh_frame
23206+ */
23207+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23208+ EXIT_TEXT
23209+ . = ALIGN(16);
23210+ } :text.exit
23211+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
23212+
23213+ . = ALIGN(PAGE_SIZE);
23214+ INIT_DATA_SECTION(16) :init
23215
23216 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
23217 __x86_cpu_dev_start = .;
23218@@ -257,19 +338,12 @@ SECTIONS
23219 }
23220
23221 . = ALIGN(8);
23222- /*
23223- * .exit.text is discard at runtime, not link time, to deal with
23224- * references from .altinstructions and .eh_frame
23225- */
23226- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
23227- EXIT_TEXT
23228- }
23229
23230 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
23231 EXIT_DATA
23232 }
23233
23234-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
23235+#ifndef CONFIG_SMP
23236 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
23237 #endif
23238
23239@@ -288,16 +362,10 @@ SECTIONS
23240 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
23241 __smp_locks = .;
23242 *(.smp_locks)
23243- . = ALIGN(PAGE_SIZE);
23244 __smp_locks_end = .;
23245+ . = ALIGN(PAGE_SIZE);
23246 }
23247
23248-#ifdef CONFIG_X86_64
23249- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
23250- NOSAVE_DATA
23251- }
23252-#endif
23253-
23254 /* BSS */
23255 . = ALIGN(PAGE_SIZE);
23256 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
23257@@ -313,6 +381,7 @@ SECTIONS
23258 __brk_base = .;
23259 . += 64 * 1024; /* 64k alignment slop space */
23260 *(.brk_reservation) /* areas brk users have reserved */
23261+ . = ALIGN(HPAGE_SIZE);
23262 __brk_limit = .;
23263 }
23264
23265@@ -339,13 +408,12 @@ SECTIONS
23266 * for the boot processor.
23267 */
23268 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
23269-INIT_PER_CPU(gdt_page);
23270 INIT_PER_CPU(irq_stack_union);
23271
23272 /*
23273 * Build-time check on the image size:
23274 */
23275-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
23276+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
23277 "kernel image bigger than KERNEL_IMAGE_SIZE");
23278
23279 #ifdef CONFIG_SMP
23280diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
23281index 9a907a6..f83f921 100644
23282--- a/arch/x86/kernel/vsyscall_64.c
23283+++ b/arch/x86/kernel/vsyscall_64.c
23284@@ -56,15 +56,13 @@
23285 DEFINE_VVAR(int, vgetcpu_mode);
23286 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
23287
23288-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
23289+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
23290
23291 static int __init vsyscall_setup(char *str)
23292 {
23293 if (str) {
23294 if (!strcmp("emulate", str))
23295 vsyscall_mode = EMULATE;
23296- else if (!strcmp("native", str))
23297- vsyscall_mode = NATIVE;
23298 else if (!strcmp("none", str))
23299 vsyscall_mode = NONE;
23300 else
23301@@ -323,8 +321,7 @@ do_ret:
23302 return true;
23303
23304 sigsegv:
23305- force_sig(SIGSEGV, current);
23306- return true;
23307+ do_group_exit(SIGKILL);
23308 }
23309
23310 /*
23311@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
23312 extern char __vvar_page;
23313 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
23314
23315- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
23316- vsyscall_mode == NATIVE
23317- ? PAGE_KERNEL_VSYSCALL
23318- : PAGE_KERNEL_VVAR);
23319+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
23320 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
23321 (unsigned long)VSYSCALL_START);
23322
23323diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
23324index 1330dd1..d220b99 100644
23325--- a/arch/x86/kernel/x8664_ksyms_64.c
23326+++ b/arch/x86/kernel/x8664_ksyms_64.c
23327@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
23328 EXPORT_SYMBOL(copy_user_generic_unrolled);
23329 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
23330 EXPORT_SYMBOL(__copy_user_nocache);
23331-EXPORT_SYMBOL(_copy_from_user);
23332-EXPORT_SYMBOL(_copy_to_user);
23333
23334 EXPORT_SYMBOL(copy_page);
23335 EXPORT_SYMBOL(clear_page);
23336diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
23337index 7a3d075..6cb373d 100644
23338--- a/arch/x86/kernel/x86_init.c
23339+++ b/arch/x86/kernel/x86_init.c
23340@@ -88,7 +88,7 @@ struct x86_init_ops x86_init __initdata = {
23341 },
23342 };
23343
23344-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23345+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
23346 .early_percpu_clock_init = x86_init_noop,
23347 .setup_percpu_clockev = setup_secondary_APIC_clock,
23348 };
23349@@ -96,7 +96,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23350 static void default_nmi_init(void) { };
23351 static int default_i8042_detect(void) { return 1; };
23352
23353-struct x86_platform_ops x86_platform = {
23354+struct x86_platform_ops x86_platform __read_only = {
23355 .calibrate_tsc = native_calibrate_tsc,
23356 .get_wallclock = mach_get_cmos_time,
23357 .set_wallclock = mach_set_rtc_mmss,
23358@@ -110,14 +110,14 @@ struct x86_platform_ops x86_platform = {
23359 };
23360
23361 EXPORT_SYMBOL_GPL(x86_platform);
23362-struct x86_msi_ops x86_msi = {
23363+struct x86_msi_ops x86_msi __read_only = {
23364 .setup_msi_irqs = native_setup_msi_irqs,
23365 .teardown_msi_irq = native_teardown_msi_irq,
23366 .teardown_msi_irqs = default_teardown_msi_irqs,
23367 .restore_msi_irqs = default_restore_msi_irqs,
23368 };
23369
23370-struct x86_io_apic_ops x86_io_apic_ops = {
23371+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
23372 .init = native_io_apic_init_mappings,
23373 .read = native_io_apic_read,
23374 .write = native_io_apic_write,
23375diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
23376index ada87a3..afea76d 100644
23377--- a/arch/x86/kernel/xsave.c
23378+++ b/arch/x86/kernel/xsave.c
23379@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
23380 {
23381 int err;
23382
23383+ buf = (struct xsave_struct __user *)____m(buf);
23384 if (use_xsave())
23385 err = xsave_user(buf);
23386 else if (use_fxsr())
23387@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
23388 */
23389 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
23390 {
23391+ buf = (void __user *)____m(buf);
23392 if (use_xsave()) {
23393 if ((unsigned long)buf % 64 || fx_only) {
23394 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
23395diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
23396index a20ecb5..d0e2194 100644
23397--- a/arch/x86/kvm/cpuid.c
23398+++ b/arch/x86/kvm/cpuid.c
23399@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
23400 struct kvm_cpuid2 *cpuid,
23401 struct kvm_cpuid_entry2 __user *entries)
23402 {
23403- int r;
23404+ int r, i;
23405
23406 r = -E2BIG;
23407 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
23408 goto out;
23409 r = -EFAULT;
23410- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
23411- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23412+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23413 goto out;
23414+ for (i = 0; i < cpuid->nent; ++i) {
23415+ struct kvm_cpuid_entry2 cpuid_entry;
23416+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
23417+ goto out;
23418+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
23419+ }
23420 vcpu->arch.cpuid_nent = cpuid->nent;
23421 kvm_apic_set_version(vcpu);
23422 kvm_x86_ops->cpuid_update(vcpu);
23423@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
23424 struct kvm_cpuid2 *cpuid,
23425 struct kvm_cpuid_entry2 __user *entries)
23426 {
23427- int r;
23428+ int r, i;
23429
23430 r = -E2BIG;
23431 if (cpuid->nent < vcpu->arch.cpuid_nent)
23432 goto out;
23433 r = -EFAULT;
23434- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
23435- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
23436+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
23437 goto out;
23438+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
23439+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
23440+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
23441+ goto out;
23442+ }
23443 return 0;
23444
23445 out:
23446diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
23447index a27e763..54bfe43 100644
23448--- a/arch/x86/kvm/emulate.c
23449+++ b/arch/x86/kvm/emulate.c
23450@@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23451
23452 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
23453 do { \
23454+ unsigned long _tmp; \
23455 __asm__ __volatile__ ( \
23456 _PRE_EFLAGS("0", "4", "2") \
23457 _op _suffix " %"_x"3,%1; " \
23458@@ -306,8 +307,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23459 /* Raw emulation: instruction has two explicit operands. */
23460 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
23461 do { \
23462- unsigned long _tmp; \
23463- \
23464 switch ((ctxt)->dst.bytes) { \
23465 case 2: \
23466 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
23467@@ -323,7 +322,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23468
23469 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
23470 do { \
23471- unsigned long _tmp; \
23472 switch ((ctxt)->dst.bytes) { \
23473 case 1: \
23474 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
23475diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
23476index 9392f52..0e56d77 100644
23477--- a/arch/x86/kvm/lapic.c
23478+++ b/arch/x86/kvm/lapic.c
23479@@ -55,7 +55,7 @@
23480 #define APIC_BUS_CYCLE_NS 1
23481
23482 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
23483-#define apic_debug(fmt, arg...)
23484+#define apic_debug(fmt, arg...) do {} while (0)
23485
23486 #define APIC_LVT_NUM 6
23487 /* 14 is the version for Xeon and Pentium 8.4.8*/
23488diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
23489index 891eb6d..e027900 100644
23490--- a/arch/x86/kvm/paging_tmpl.h
23491+++ b/arch/x86/kvm/paging_tmpl.h
23492@@ -208,7 +208,7 @@ retry_walk:
23493 if (unlikely(kvm_is_error_hva(host_addr)))
23494 goto error;
23495
23496- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
23497+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
23498 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
23499 goto error;
23500 walker->ptep_user[walker->level - 1] = ptep_user;
23501diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
23502index d29d3cd..ec9d522 100644
23503--- a/arch/x86/kvm/svm.c
23504+++ b/arch/x86/kvm/svm.c
23505@@ -3507,7 +3507,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
23506 int cpu = raw_smp_processor_id();
23507
23508 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
23509+
23510+ pax_open_kernel();
23511 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
23512+ pax_close_kernel();
23513+
23514 load_TR_desc();
23515 }
23516
23517@@ -3881,6 +3885,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
23518 #endif
23519 #endif
23520
23521+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23522+ __set_fs(current_thread_info()->addr_limit);
23523+#endif
23524+
23525 reload_tss(vcpu);
23526
23527 local_irq_disable();
23528diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
23529index 9120ae1..238abc0 100644
23530--- a/arch/x86/kvm/vmx.c
23531+++ b/arch/x86/kvm/vmx.c
23532@@ -1370,7 +1370,11 @@ static void reload_tss(void)
23533 struct desc_struct *descs;
23534
23535 descs = (void *)gdt->address;
23536+
23537+ pax_open_kernel();
23538 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
23539+ pax_close_kernel();
23540+
23541 load_TR_desc();
23542 }
23543
23544@@ -1594,6 +1598,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
23545 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
23546 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
23547
23548+#ifdef CONFIG_PAX_PER_CPU_PGD
23549+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
23550+#endif
23551+
23552 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
23553 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
23554 vmx->loaded_vmcs->cpu = cpu;
23555@@ -2738,8 +2746,11 @@ static __init int hardware_setup(void)
23556 if (!cpu_has_vmx_flexpriority())
23557 flexpriority_enabled = 0;
23558
23559- if (!cpu_has_vmx_tpr_shadow())
23560- kvm_x86_ops->update_cr8_intercept = NULL;
23561+ if (!cpu_has_vmx_tpr_shadow()) {
23562+ pax_open_kernel();
23563+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
23564+ pax_close_kernel();
23565+ }
23566
23567 if (enable_ept && !cpu_has_vmx_ept_2m_page())
23568 kvm_disable_largepages();
23569@@ -3782,7 +3793,10 @@ static void vmx_set_constant_host_state(void)
23570
23571 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
23572 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
23573+
23574+#ifndef CONFIG_PAX_PER_CPU_PGD
23575 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
23576+#endif
23577
23578 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
23579 #ifdef CONFIG_X86_64
23580@@ -3803,7 +3817,7 @@ static void vmx_set_constant_host_state(void)
23581 native_store_idt(&dt);
23582 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
23583
23584- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
23585+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
23586
23587 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
23588 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
23589@@ -6355,6 +6369,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23590 "jmp 2f \n\t"
23591 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
23592 "2: "
23593+
23594+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23595+ "ljmp %[cs],$3f\n\t"
23596+ "3: "
23597+#endif
23598+
23599 /* Save guest registers, load host registers, keep flags */
23600 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
23601 "pop %0 \n\t"
23602@@ -6407,6 +6427,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23603 #endif
23604 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
23605 [wordsize]"i"(sizeof(ulong))
23606+
23607+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23608+ ,[cs]"i"(__KERNEL_CS)
23609+#endif
23610+
23611 : "cc", "memory"
23612 #ifdef CONFIG_X86_64
23613 , "rax", "rbx", "rdi", "rsi"
23614@@ -6420,7 +6445,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23615 if (debugctlmsr)
23616 update_debugctlmsr(debugctlmsr);
23617
23618-#ifndef CONFIG_X86_64
23619+#ifdef CONFIG_X86_32
23620 /*
23621 * The sysexit path does not restore ds/es, so we must set them to
23622 * a reasonable value ourselves.
23623@@ -6429,8 +6454,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23624 * may be executed in interrupt context, which saves and restore segments
23625 * around it, nullifying its effect.
23626 */
23627- loadsegment(ds, __USER_DS);
23628- loadsegment(es, __USER_DS);
23629+ loadsegment(ds, __KERNEL_DS);
23630+ loadsegment(es, __KERNEL_DS);
23631+ loadsegment(ss, __KERNEL_DS);
23632+
23633+#ifdef CONFIG_PAX_KERNEXEC
23634+ loadsegment(fs, __KERNEL_PERCPU);
23635+#endif
23636+
23637+#ifdef CONFIG_PAX_MEMORY_UDEREF
23638+ __set_fs(current_thread_info()->addr_limit);
23639+#endif
23640+
23641 #endif
23642
23643 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
23644diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
23645index c243b81..9eb193f 100644
23646--- a/arch/x86/kvm/x86.c
23647+++ b/arch/x86/kvm/x86.c
23648@@ -1692,8 +1692,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
23649 {
23650 struct kvm *kvm = vcpu->kvm;
23651 int lm = is_long_mode(vcpu);
23652- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
23653- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
23654+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
23655+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
23656 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
23657 : kvm->arch.xen_hvm_config.blob_size_32;
23658 u32 page_num = data & ~PAGE_MASK;
23659@@ -2571,6 +2571,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
23660 if (n < msr_list.nmsrs)
23661 goto out;
23662 r = -EFAULT;
23663+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
23664+ goto out;
23665 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
23666 num_msrs_to_save * sizeof(u32)))
23667 goto out;
23668@@ -2700,7 +2702,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
23669 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
23670 struct kvm_interrupt *irq)
23671 {
23672- if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
23673+ if (irq->irq >= KVM_NR_INTERRUPTS)
23674 return -EINVAL;
23675 if (irqchip_in_kernel(vcpu->kvm))
23676 return -ENXIO;
23677@@ -5213,7 +5215,7 @@ static struct notifier_block pvclock_gtod_notifier = {
23678 };
23679 #endif
23680
23681-int kvm_arch_init(void *opaque)
23682+int kvm_arch_init(const void *opaque)
23683 {
23684 int r;
23685 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
23686diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
23687index df4176c..23ce092 100644
23688--- a/arch/x86/lguest/boot.c
23689+++ b/arch/x86/lguest/boot.c
23690@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
23691 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
23692 * Launcher to reboot us.
23693 */
23694-static void lguest_restart(char *reason)
23695+static __noreturn void lguest_restart(char *reason)
23696 {
23697 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
23698+ BUG();
23699 }
23700
23701 /*G:050
23702diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
23703index 00933d5..3a64af9 100644
23704--- a/arch/x86/lib/atomic64_386_32.S
23705+++ b/arch/x86/lib/atomic64_386_32.S
23706@@ -48,6 +48,10 @@ BEGIN(read)
23707 movl (v), %eax
23708 movl 4(v), %edx
23709 RET_ENDP
23710+BEGIN(read_unchecked)
23711+ movl (v), %eax
23712+ movl 4(v), %edx
23713+RET_ENDP
23714 #undef v
23715
23716 #define v %esi
23717@@ -55,6 +59,10 @@ BEGIN(set)
23718 movl %ebx, (v)
23719 movl %ecx, 4(v)
23720 RET_ENDP
23721+BEGIN(set_unchecked)
23722+ movl %ebx, (v)
23723+ movl %ecx, 4(v)
23724+RET_ENDP
23725 #undef v
23726
23727 #define v %esi
23728@@ -70,6 +78,20 @@ RET_ENDP
23729 BEGIN(add)
23730 addl %eax, (v)
23731 adcl %edx, 4(v)
23732+
23733+#ifdef CONFIG_PAX_REFCOUNT
23734+ jno 0f
23735+ subl %eax, (v)
23736+ sbbl %edx, 4(v)
23737+ int $4
23738+0:
23739+ _ASM_EXTABLE(0b, 0b)
23740+#endif
23741+
23742+RET_ENDP
23743+BEGIN(add_unchecked)
23744+ addl %eax, (v)
23745+ adcl %edx, 4(v)
23746 RET_ENDP
23747 #undef v
23748
23749@@ -77,6 +99,24 @@ RET_ENDP
23750 BEGIN(add_return)
23751 addl (v), %eax
23752 adcl 4(v), %edx
23753+
23754+#ifdef CONFIG_PAX_REFCOUNT
23755+ into
23756+1234:
23757+ _ASM_EXTABLE(1234b, 2f)
23758+#endif
23759+
23760+ movl %eax, (v)
23761+ movl %edx, 4(v)
23762+
23763+#ifdef CONFIG_PAX_REFCOUNT
23764+2:
23765+#endif
23766+
23767+RET_ENDP
23768+BEGIN(add_return_unchecked)
23769+ addl (v), %eax
23770+ adcl 4(v), %edx
23771 movl %eax, (v)
23772 movl %edx, 4(v)
23773 RET_ENDP
23774@@ -86,6 +126,20 @@ RET_ENDP
23775 BEGIN(sub)
23776 subl %eax, (v)
23777 sbbl %edx, 4(v)
23778+
23779+#ifdef CONFIG_PAX_REFCOUNT
23780+ jno 0f
23781+ addl %eax, (v)
23782+ adcl %edx, 4(v)
23783+ int $4
23784+0:
23785+ _ASM_EXTABLE(0b, 0b)
23786+#endif
23787+
23788+RET_ENDP
23789+BEGIN(sub_unchecked)
23790+ subl %eax, (v)
23791+ sbbl %edx, 4(v)
23792 RET_ENDP
23793 #undef v
23794
23795@@ -96,6 +150,27 @@ BEGIN(sub_return)
23796 sbbl $0, %edx
23797 addl (v), %eax
23798 adcl 4(v), %edx
23799+
23800+#ifdef CONFIG_PAX_REFCOUNT
23801+ into
23802+1234:
23803+ _ASM_EXTABLE(1234b, 2f)
23804+#endif
23805+
23806+ movl %eax, (v)
23807+ movl %edx, 4(v)
23808+
23809+#ifdef CONFIG_PAX_REFCOUNT
23810+2:
23811+#endif
23812+
23813+RET_ENDP
23814+BEGIN(sub_return_unchecked)
23815+ negl %edx
23816+ negl %eax
23817+ sbbl $0, %edx
23818+ addl (v), %eax
23819+ adcl 4(v), %edx
23820 movl %eax, (v)
23821 movl %edx, 4(v)
23822 RET_ENDP
23823@@ -105,6 +180,20 @@ RET_ENDP
23824 BEGIN(inc)
23825 addl $1, (v)
23826 adcl $0, 4(v)
23827+
23828+#ifdef CONFIG_PAX_REFCOUNT
23829+ jno 0f
23830+ subl $1, (v)
23831+ sbbl $0, 4(v)
23832+ int $4
23833+0:
23834+ _ASM_EXTABLE(0b, 0b)
23835+#endif
23836+
23837+RET_ENDP
23838+BEGIN(inc_unchecked)
23839+ addl $1, (v)
23840+ adcl $0, 4(v)
23841 RET_ENDP
23842 #undef v
23843
23844@@ -114,6 +203,26 @@ BEGIN(inc_return)
23845 movl 4(v), %edx
23846 addl $1, %eax
23847 adcl $0, %edx
23848+
23849+#ifdef CONFIG_PAX_REFCOUNT
23850+ into
23851+1234:
23852+ _ASM_EXTABLE(1234b, 2f)
23853+#endif
23854+
23855+ movl %eax, (v)
23856+ movl %edx, 4(v)
23857+
23858+#ifdef CONFIG_PAX_REFCOUNT
23859+2:
23860+#endif
23861+
23862+RET_ENDP
23863+BEGIN(inc_return_unchecked)
23864+ movl (v), %eax
23865+ movl 4(v), %edx
23866+ addl $1, %eax
23867+ adcl $0, %edx
23868 movl %eax, (v)
23869 movl %edx, 4(v)
23870 RET_ENDP
23871@@ -123,6 +232,20 @@ RET_ENDP
23872 BEGIN(dec)
23873 subl $1, (v)
23874 sbbl $0, 4(v)
23875+
23876+#ifdef CONFIG_PAX_REFCOUNT
23877+ jno 0f
23878+ addl $1, (v)
23879+ adcl $0, 4(v)
23880+ int $4
23881+0:
23882+ _ASM_EXTABLE(0b, 0b)
23883+#endif
23884+
23885+RET_ENDP
23886+BEGIN(dec_unchecked)
23887+ subl $1, (v)
23888+ sbbl $0, 4(v)
23889 RET_ENDP
23890 #undef v
23891
23892@@ -132,6 +255,26 @@ BEGIN(dec_return)
23893 movl 4(v), %edx
23894 subl $1, %eax
23895 sbbl $0, %edx
23896+
23897+#ifdef CONFIG_PAX_REFCOUNT
23898+ into
23899+1234:
23900+ _ASM_EXTABLE(1234b, 2f)
23901+#endif
23902+
23903+ movl %eax, (v)
23904+ movl %edx, 4(v)
23905+
23906+#ifdef CONFIG_PAX_REFCOUNT
23907+2:
23908+#endif
23909+
23910+RET_ENDP
23911+BEGIN(dec_return_unchecked)
23912+ movl (v), %eax
23913+ movl 4(v), %edx
23914+ subl $1, %eax
23915+ sbbl $0, %edx
23916 movl %eax, (v)
23917 movl %edx, 4(v)
23918 RET_ENDP
23919@@ -143,6 +286,13 @@ BEGIN(add_unless)
23920 adcl %edx, %edi
23921 addl (v), %eax
23922 adcl 4(v), %edx
23923+
23924+#ifdef CONFIG_PAX_REFCOUNT
23925+ into
23926+1234:
23927+ _ASM_EXTABLE(1234b, 2f)
23928+#endif
23929+
23930 cmpl %eax, %ecx
23931 je 3f
23932 1:
23933@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
23934 1:
23935 addl $1, %eax
23936 adcl $0, %edx
23937+
23938+#ifdef CONFIG_PAX_REFCOUNT
23939+ into
23940+1234:
23941+ _ASM_EXTABLE(1234b, 2f)
23942+#endif
23943+
23944 movl %eax, (v)
23945 movl %edx, 4(v)
23946 movl $1, %eax
23947@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
23948 movl 4(v), %edx
23949 subl $1, %eax
23950 sbbl $0, %edx
23951+
23952+#ifdef CONFIG_PAX_REFCOUNT
23953+ into
23954+1234:
23955+ _ASM_EXTABLE(1234b, 1f)
23956+#endif
23957+
23958 js 1f
23959 movl %eax, (v)
23960 movl %edx, 4(v)
23961diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
23962index f5cc9eb..51fa319 100644
23963--- a/arch/x86/lib/atomic64_cx8_32.S
23964+++ b/arch/x86/lib/atomic64_cx8_32.S
23965@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
23966 CFI_STARTPROC
23967
23968 read64 %ecx
23969+ pax_force_retaddr
23970 ret
23971 CFI_ENDPROC
23972 ENDPROC(atomic64_read_cx8)
23973
23974+ENTRY(atomic64_read_unchecked_cx8)
23975+ CFI_STARTPROC
23976+
23977+ read64 %ecx
23978+ pax_force_retaddr
23979+ ret
23980+ CFI_ENDPROC
23981+ENDPROC(atomic64_read_unchecked_cx8)
23982+
23983 ENTRY(atomic64_set_cx8)
23984 CFI_STARTPROC
23985
23986@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
23987 cmpxchg8b (%esi)
23988 jne 1b
23989
23990+ pax_force_retaddr
23991 ret
23992 CFI_ENDPROC
23993 ENDPROC(atomic64_set_cx8)
23994
23995+ENTRY(atomic64_set_unchecked_cx8)
23996+ CFI_STARTPROC
23997+
23998+1:
23999+/* we don't need LOCK_PREFIX since aligned 64-bit writes
24000+ * are atomic on 586 and newer */
24001+ cmpxchg8b (%esi)
24002+ jne 1b
24003+
24004+ pax_force_retaddr
24005+ ret
24006+ CFI_ENDPROC
24007+ENDPROC(atomic64_set_unchecked_cx8)
24008+
24009 ENTRY(atomic64_xchg_cx8)
24010 CFI_STARTPROC
24011
24012@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
24013 cmpxchg8b (%esi)
24014 jne 1b
24015
24016+ pax_force_retaddr
24017 ret
24018 CFI_ENDPROC
24019 ENDPROC(atomic64_xchg_cx8)
24020
24021-.macro addsub_return func ins insc
24022-ENTRY(atomic64_\func\()_return_cx8)
24023+.macro addsub_return func ins insc unchecked=""
24024+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24025 CFI_STARTPROC
24026 SAVE ebp
24027 SAVE ebx
24028@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
24029 movl %edx, %ecx
24030 \ins\()l %esi, %ebx
24031 \insc\()l %edi, %ecx
24032+
24033+.ifb \unchecked
24034+#ifdef CONFIG_PAX_REFCOUNT
24035+ into
24036+2:
24037+ _ASM_EXTABLE(2b, 3f)
24038+#endif
24039+.endif
24040+
24041 LOCK_PREFIX
24042 cmpxchg8b (%ebp)
24043 jne 1b
24044-
24045-10:
24046 movl %ebx, %eax
24047 movl %ecx, %edx
24048+
24049+.ifb \unchecked
24050+#ifdef CONFIG_PAX_REFCOUNT
24051+3:
24052+#endif
24053+.endif
24054+
24055 RESTORE edi
24056 RESTORE esi
24057 RESTORE ebx
24058 RESTORE ebp
24059+ pax_force_retaddr
24060 ret
24061 CFI_ENDPROC
24062-ENDPROC(atomic64_\func\()_return_cx8)
24063+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24064 .endm
24065
24066 addsub_return add add adc
24067 addsub_return sub sub sbb
24068+addsub_return add add adc _unchecked
24069+addsub_return sub sub sbb _unchecked
24070
24071-.macro incdec_return func ins insc
24072-ENTRY(atomic64_\func\()_return_cx8)
24073+.macro incdec_return func ins insc unchecked=""
24074+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24075 CFI_STARTPROC
24076 SAVE ebx
24077
24078@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
24079 movl %edx, %ecx
24080 \ins\()l $1, %ebx
24081 \insc\()l $0, %ecx
24082+
24083+.ifb \unchecked
24084+#ifdef CONFIG_PAX_REFCOUNT
24085+ into
24086+2:
24087+ _ASM_EXTABLE(2b, 3f)
24088+#endif
24089+.endif
24090+
24091 LOCK_PREFIX
24092 cmpxchg8b (%esi)
24093 jne 1b
24094
24095-10:
24096 movl %ebx, %eax
24097 movl %ecx, %edx
24098+
24099+.ifb \unchecked
24100+#ifdef CONFIG_PAX_REFCOUNT
24101+3:
24102+#endif
24103+.endif
24104+
24105 RESTORE ebx
24106+ pax_force_retaddr
24107 ret
24108 CFI_ENDPROC
24109-ENDPROC(atomic64_\func\()_return_cx8)
24110+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24111 .endm
24112
24113 incdec_return inc add adc
24114 incdec_return dec sub sbb
24115+incdec_return inc add adc _unchecked
24116+incdec_return dec sub sbb _unchecked
24117
24118 ENTRY(atomic64_dec_if_positive_cx8)
24119 CFI_STARTPROC
24120@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
24121 movl %edx, %ecx
24122 subl $1, %ebx
24123 sbb $0, %ecx
24124+
24125+#ifdef CONFIG_PAX_REFCOUNT
24126+ into
24127+1234:
24128+ _ASM_EXTABLE(1234b, 2f)
24129+#endif
24130+
24131 js 2f
24132 LOCK_PREFIX
24133 cmpxchg8b (%esi)
24134@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
24135 movl %ebx, %eax
24136 movl %ecx, %edx
24137 RESTORE ebx
24138+ pax_force_retaddr
24139 ret
24140 CFI_ENDPROC
24141 ENDPROC(atomic64_dec_if_positive_cx8)
24142@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
24143 movl %edx, %ecx
24144 addl %ebp, %ebx
24145 adcl %edi, %ecx
24146+
24147+#ifdef CONFIG_PAX_REFCOUNT
24148+ into
24149+1234:
24150+ _ASM_EXTABLE(1234b, 3f)
24151+#endif
24152+
24153 LOCK_PREFIX
24154 cmpxchg8b (%esi)
24155 jne 1b
24156@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
24157 CFI_ADJUST_CFA_OFFSET -8
24158 RESTORE ebx
24159 RESTORE ebp
24160+ pax_force_retaddr
24161 ret
24162 4:
24163 cmpl %edx, 4(%esp)
24164@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
24165 xorl %ecx, %ecx
24166 addl $1, %ebx
24167 adcl %edx, %ecx
24168+
24169+#ifdef CONFIG_PAX_REFCOUNT
24170+ into
24171+1234:
24172+ _ASM_EXTABLE(1234b, 3f)
24173+#endif
24174+
24175 LOCK_PREFIX
24176 cmpxchg8b (%esi)
24177 jne 1b
24178@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
24179 movl $1, %eax
24180 3:
24181 RESTORE ebx
24182+ pax_force_retaddr
24183 ret
24184 CFI_ENDPROC
24185 ENDPROC(atomic64_inc_not_zero_cx8)
24186diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
24187index 2af5df3..62b1a5a 100644
24188--- a/arch/x86/lib/checksum_32.S
24189+++ b/arch/x86/lib/checksum_32.S
24190@@ -29,7 +29,8 @@
24191 #include <asm/dwarf2.h>
24192 #include <asm/errno.h>
24193 #include <asm/asm.h>
24194-
24195+#include <asm/segment.h>
24196+
24197 /*
24198 * computes a partial checksum, e.g. for TCP/UDP fragments
24199 */
24200@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
24201
24202 #define ARGBASE 16
24203 #define FP 12
24204-
24205-ENTRY(csum_partial_copy_generic)
24206+
24207+ENTRY(csum_partial_copy_generic_to_user)
24208 CFI_STARTPROC
24209+
24210+#ifdef CONFIG_PAX_MEMORY_UDEREF
24211+ pushl_cfi %gs
24212+ popl_cfi %es
24213+ jmp csum_partial_copy_generic
24214+#endif
24215+
24216+ENTRY(csum_partial_copy_generic_from_user)
24217+
24218+#ifdef CONFIG_PAX_MEMORY_UDEREF
24219+ pushl_cfi %gs
24220+ popl_cfi %ds
24221+#endif
24222+
24223+ENTRY(csum_partial_copy_generic)
24224 subl $4,%esp
24225 CFI_ADJUST_CFA_OFFSET 4
24226 pushl_cfi %edi
24227@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
24228 jmp 4f
24229 SRC(1: movw (%esi), %bx )
24230 addl $2, %esi
24231-DST( movw %bx, (%edi) )
24232+DST( movw %bx, %es:(%edi) )
24233 addl $2, %edi
24234 addw %bx, %ax
24235 adcl $0, %eax
24236@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
24237 SRC(1: movl (%esi), %ebx )
24238 SRC( movl 4(%esi), %edx )
24239 adcl %ebx, %eax
24240-DST( movl %ebx, (%edi) )
24241+DST( movl %ebx, %es:(%edi) )
24242 adcl %edx, %eax
24243-DST( movl %edx, 4(%edi) )
24244+DST( movl %edx, %es:4(%edi) )
24245
24246 SRC( movl 8(%esi), %ebx )
24247 SRC( movl 12(%esi), %edx )
24248 adcl %ebx, %eax
24249-DST( movl %ebx, 8(%edi) )
24250+DST( movl %ebx, %es:8(%edi) )
24251 adcl %edx, %eax
24252-DST( movl %edx, 12(%edi) )
24253+DST( movl %edx, %es:12(%edi) )
24254
24255 SRC( movl 16(%esi), %ebx )
24256 SRC( movl 20(%esi), %edx )
24257 adcl %ebx, %eax
24258-DST( movl %ebx, 16(%edi) )
24259+DST( movl %ebx, %es:16(%edi) )
24260 adcl %edx, %eax
24261-DST( movl %edx, 20(%edi) )
24262+DST( movl %edx, %es:20(%edi) )
24263
24264 SRC( movl 24(%esi), %ebx )
24265 SRC( movl 28(%esi), %edx )
24266 adcl %ebx, %eax
24267-DST( movl %ebx, 24(%edi) )
24268+DST( movl %ebx, %es:24(%edi) )
24269 adcl %edx, %eax
24270-DST( movl %edx, 28(%edi) )
24271+DST( movl %edx, %es:28(%edi) )
24272
24273 lea 32(%esi), %esi
24274 lea 32(%edi), %edi
24275@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
24276 shrl $2, %edx # This clears CF
24277 SRC(3: movl (%esi), %ebx )
24278 adcl %ebx, %eax
24279-DST( movl %ebx, (%edi) )
24280+DST( movl %ebx, %es:(%edi) )
24281 lea 4(%esi), %esi
24282 lea 4(%edi), %edi
24283 dec %edx
24284@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
24285 jb 5f
24286 SRC( movw (%esi), %cx )
24287 leal 2(%esi), %esi
24288-DST( movw %cx, (%edi) )
24289+DST( movw %cx, %es:(%edi) )
24290 leal 2(%edi), %edi
24291 je 6f
24292 shll $16,%ecx
24293 SRC(5: movb (%esi), %cl )
24294-DST( movb %cl, (%edi) )
24295+DST( movb %cl, %es:(%edi) )
24296 6: addl %ecx, %eax
24297 adcl $0, %eax
24298 7:
24299@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
24300
24301 6001:
24302 movl ARGBASE+20(%esp), %ebx # src_err_ptr
24303- movl $-EFAULT, (%ebx)
24304+ movl $-EFAULT, %ss:(%ebx)
24305
24306 # zero the complete destination - computing the rest
24307 # is too much work
24308@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
24309
24310 6002:
24311 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24312- movl $-EFAULT,(%ebx)
24313+ movl $-EFAULT,%ss:(%ebx)
24314 jmp 5000b
24315
24316 .previous
24317
24318+ pushl_cfi %ss
24319+ popl_cfi %ds
24320+ pushl_cfi %ss
24321+ popl_cfi %es
24322 popl_cfi %ebx
24323 CFI_RESTORE ebx
24324 popl_cfi %esi
24325@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
24326 popl_cfi %ecx # equivalent to addl $4,%esp
24327 ret
24328 CFI_ENDPROC
24329-ENDPROC(csum_partial_copy_generic)
24330+ENDPROC(csum_partial_copy_generic_to_user)
24331
24332 #else
24333
24334 /* Version for PentiumII/PPro */
24335
24336 #define ROUND1(x) \
24337+ nop; nop; nop; \
24338 SRC(movl x(%esi), %ebx ) ; \
24339 addl %ebx, %eax ; \
24340- DST(movl %ebx, x(%edi) ) ;
24341+ DST(movl %ebx, %es:x(%edi)) ;
24342
24343 #define ROUND(x) \
24344+ nop; nop; nop; \
24345 SRC(movl x(%esi), %ebx ) ; \
24346 adcl %ebx, %eax ; \
24347- DST(movl %ebx, x(%edi) ) ;
24348+ DST(movl %ebx, %es:x(%edi)) ;
24349
24350 #define ARGBASE 12
24351-
24352-ENTRY(csum_partial_copy_generic)
24353+
24354+ENTRY(csum_partial_copy_generic_to_user)
24355 CFI_STARTPROC
24356+
24357+#ifdef CONFIG_PAX_MEMORY_UDEREF
24358+ pushl_cfi %gs
24359+ popl_cfi %es
24360+ jmp csum_partial_copy_generic
24361+#endif
24362+
24363+ENTRY(csum_partial_copy_generic_from_user)
24364+
24365+#ifdef CONFIG_PAX_MEMORY_UDEREF
24366+ pushl_cfi %gs
24367+ popl_cfi %ds
24368+#endif
24369+
24370+ENTRY(csum_partial_copy_generic)
24371 pushl_cfi %ebx
24372 CFI_REL_OFFSET ebx, 0
24373 pushl_cfi %edi
24374@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
24375 subl %ebx, %edi
24376 lea -1(%esi),%edx
24377 andl $-32,%edx
24378- lea 3f(%ebx,%ebx), %ebx
24379+ lea 3f(%ebx,%ebx,2), %ebx
24380 testl %esi, %esi
24381 jmp *%ebx
24382 1: addl $64,%esi
24383@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
24384 jb 5f
24385 SRC( movw (%esi), %dx )
24386 leal 2(%esi), %esi
24387-DST( movw %dx, (%edi) )
24388+DST( movw %dx, %es:(%edi) )
24389 leal 2(%edi), %edi
24390 je 6f
24391 shll $16,%edx
24392 5:
24393 SRC( movb (%esi), %dl )
24394-DST( movb %dl, (%edi) )
24395+DST( movb %dl, %es:(%edi) )
24396 6: addl %edx, %eax
24397 adcl $0, %eax
24398 7:
24399 .section .fixup, "ax"
24400 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
24401- movl $-EFAULT, (%ebx)
24402+ movl $-EFAULT, %ss:(%ebx)
24403 # zero the complete destination (computing the rest is too much work)
24404 movl ARGBASE+8(%esp),%edi # dst
24405 movl ARGBASE+12(%esp),%ecx # len
24406@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
24407 rep; stosb
24408 jmp 7b
24409 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24410- movl $-EFAULT, (%ebx)
24411+ movl $-EFAULT, %ss:(%ebx)
24412 jmp 7b
24413 .previous
24414
24415+#ifdef CONFIG_PAX_MEMORY_UDEREF
24416+ pushl_cfi %ss
24417+ popl_cfi %ds
24418+ pushl_cfi %ss
24419+ popl_cfi %es
24420+#endif
24421+
24422 popl_cfi %esi
24423 CFI_RESTORE esi
24424 popl_cfi %edi
24425@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
24426 CFI_RESTORE ebx
24427 ret
24428 CFI_ENDPROC
24429-ENDPROC(csum_partial_copy_generic)
24430+ENDPROC(csum_partial_copy_generic_to_user)
24431
24432 #undef ROUND
24433 #undef ROUND1
24434diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
24435index f2145cf..cea889d 100644
24436--- a/arch/x86/lib/clear_page_64.S
24437+++ b/arch/x86/lib/clear_page_64.S
24438@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
24439 movl $4096/8,%ecx
24440 xorl %eax,%eax
24441 rep stosq
24442+ pax_force_retaddr
24443 ret
24444 CFI_ENDPROC
24445 ENDPROC(clear_page_c)
24446@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
24447 movl $4096,%ecx
24448 xorl %eax,%eax
24449 rep stosb
24450+ pax_force_retaddr
24451 ret
24452 CFI_ENDPROC
24453 ENDPROC(clear_page_c_e)
24454@@ -43,6 +45,7 @@ ENTRY(clear_page)
24455 leaq 64(%rdi),%rdi
24456 jnz .Lloop
24457 nop
24458+ pax_force_retaddr
24459 ret
24460 CFI_ENDPROC
24461 .Lclear_page_end:
24462@@ -58,7 +61,7 @@ ENDPROC(clear_page)
24463
24464 #include <asm/cpufeature.h>
24465
24466- .section .altinstr_replacement,"ax"
24467+ .section .altinstr_replacement,"a"
24468 1: .byte 0xeb /* jmp <disp8> */
24469 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
24470 2: .byte 0xeb /* jmp <disp8> */
24471diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
24472index 1e572c5..2a162cd 100644
24473--- a/arch/x86/lib/cmpxchg16b_emu.S
24474+++ b/arch/x86/lib/cmpxchg16b_emu.S
24475@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
24476
24477 popf
24478 mov $1, %al
24479+ pax_force_retaddr
24480 ret
24481
24482 not_same:
24483 popf
24484 xor %al,%al
24485+ pax_force_retaddr
24486 ret
24487
24488 CFI_ENDPROC
24489diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
24490index 176cca6..1166c50 100644
24491--- a/arch/x86/lib/copy_page_64.S
24492+++ b/arch/x86/lib/copy_page_64.S
24493@@ -9,6 +9,7 @@ copy_page_rep:
24494 CFI_STARTPROC
24495 movl $4096/8, %ecx
24496 rep movsq
24497+ pax_force_retaddr
24498 ret
24499 CFI_ENDPROC
24500 ENDPROC(copy_page_rep)
24501@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
24502
24503 ENTRY(copy_page)
24504 CFI_STARTPROC
24505- subq $2*8, %rsp
24506- CFI_ADJUST_CFA_OFFSET 2*8
24507+ subq $3*8, %rsp
24508+ CFI_ADJUST_CFA_OFFSET 3*8
24509 movq %rbx, (%rsp)
24510 CFI_REL_OFFSET rbx, 0
24511 movq %r12, 1*8(%rsp)
24512 CFI_REL_OFFSET r12, 1*8
24513+ movq %r13, 2*8(%rsp)
24514+ CFI_REL_OFFSET r13, 2*8
24515
24516 movl $(4096/64)-5, %ecx
24517 .p2align 4
24518@@ -36,7 +39,7 @@ ENTRY(copy_page)
24519 movq 0x8*2(%rsi), %rdx
24520 movq 0x8*3(%rsi), %r8
24521 movq 0x8*4(%rsi), %r9
24522- movq 0x8*5(%rsi), %r10
24523+ movq 0x8*5(%rsi), %r13
24524 movq 0x8*6(%rsi), %r11
24525 movq 0x8*7(%rsi), %r12
24526
24527@@ -47,7 +50,7 @@ ENTRY(copy_page)
24528 movq %rdx, 0x8*2(%rdi)
24529 movq %r8, 0x8*3(%rdi)
24530 movq %r9, 0x8*4(%rdi)
24531- movq %r10, 0x8*5(%rdi)
24532+ movq %r13, 0x8*5(%rdi)
24533 movq %r11, 0x8*6(%rdi)
24534 movq %r12, 0x8*7(%rdi)
24535
24536@@ -66,7 +69,7 @@ ENTRY(copy_page)
24537 movq 0x8*2(%rsi), %rdx
24538 movq 0x8*3(%rsi), %r8
24539 movq 0x8*4(%rsi), %r9
24540- movq 0x8*5(%rsi), %r10
24541+ movq 0x8*5(%rsi), %r13
24542 movq 0x8*6(%rsi), %r11
24543 movq 0x8*7(%rsi), %r12
24544
24545@@ -75,7 +78,7 @@ ENTRY(copy_page)
24546 movq %rdx, 0x8*2(%rdi)
24547 movq %r8, 0x8*3(%rdi)
24548 movq %r9, 0x8*4(%rdi)
24549- movq %r10, 0x8*5(%rdi)
24550+ movq %r13, 0x8*5(%rdi)
24551 movq %r11, 0x8*6(%rdi)
24552 movq %r12, 0x8*7(%rdi)
24553
24554@@ -87,8 +90,11 @@ ENTRY(copy_page)
24555 CFI_RESTORE rbx
24556 movq 1*8(%rsp), %r12
24557 CFI_RESTORE r12
24558- addq $2*8, %rsp
24559- CFI_ADJUST_CFA_OFFSET -2*8
24560+ movq 2*8(%rsp), %r13
24561+ CFI_RESTORE r13
24562+ addq $3*8, %rsp
24563+ CFI_ADJUST_CFA_OFFSET -3*8
24564+ pax_force_retaddr
24565 ret
24566 .Lcopy_page_end:
24567 CFI_ENDPROC
24568@@ -99,7 +105,7 @@ ENDPROC(copy_page)
24569
24570 #include <asm/cpufeature.h>
24571
24572- .section .altinstr_replacement,"ax"
24573+ .section .altinstr_replacement,"a"
24574 1: .byte 0xeb /* jmp <disp8> */
24575 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
24576 2:
24577diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
24578index a30ca15..d25fab6 100644
24579--- a/arch/x86/lib/copy_user_64.S
24580+++ b/arch/x86/lib/copy_user_64.S
24581@@ -18,6 +18,7 @@
24582 #include <asm/alternative-asm.h>
24583 #include <asm/asm.h>
24584 #include <asm/smap.h>
24585+#include <asm/pgtable.h>
24586
24587 /*
24588 * By placing feature2 after feature1 in altinstructions section, we logically
24589@@ -31,7 +32,7 @@
24590 .byte 0xe9 /* 32bit jump */
24591 .long \orig-1f /* by default jump to orig */
24592 1:
24593- .section .altinstr_replacement,"ax"
24594+ .section .altinstr_replacement,"a"
24595 2: .byte 0xe9 /* near jump with 32bit immediate */
24596 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
24597 3: .byte 0xe9 /* near jump with 32bit immediate */
24598@@ -70,47 +71,20 @@
24599 #endif
24600 .endm
24601
24602-/* Standard copy_to_user with segment limit checking */
24603-ENTRY(_copy_to_user)
24604- CFI_STARTPROC
24605- GET_THREAD_INFO(%rax)
24606- movq %rdi,%rcx
24607- addq %rdx,%rcx
24608- jc bad_to_user
24609- cmpq TI_addr_limit(%rax),%rcx
24610- ja bad_to_user
24611- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
24612- copy_user_generic_unrolled,copy_user_generic_string, \
24613- copy_user_enhanced_fast_string
24614- CFI_ENDPROC
24615-ENDPROC(_copy_to_user)
24616-
24617-/* Standard copy_from_user with segment limit checking */
24618-ENTRY(_copy_from_user)
24619- CFI_STARTPROC
24620- GET_THREAD_INFO(%rax)
24621- movq %rsi,%rcx
24622- addq %rdx,%rcx
24623- jc bad_from_user
24624- cmpq TI_addr_limit(%rax),%rcx
24625- ja bad_from_user
24626- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
24627- copy_user_generic_unrolled,copy_user_generic_string, \
24628- copy_user_enhanced_fast_string
24629- CFI_ENDPROC
24630-ENDPROC(_copy_from_user)
24631-
24632 .section .fixup,"ax"
24633 /* must zero dest */
24634 ENTRY(bad_from_user)
24635 bad_from_user:
24636 CFI_STARTPROC
24637+ testl %edx,%edx
24638+ js bad_to_user
24639 movl %edx,%ecx
24640 xorl %eax,%eax
24641 rep
24642 stosb
24643 bad_to_user:
24644 movl %edx,%eax
24645+ pax_force_retaddr
24646 ret
24647 CFI_ENDPROC
24648 ENDPROC(bad_from_user)
24649@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
24650 jz 17f
24651 1: movq (%rsi),%r8
24652 2: movq 1*8(%rsi),%r9
24653-3: movq 2*8(%rsi),%r10
24654+3: movq 2*8(%rsi),%rax
24655 4: movq 3*8(%rsi),%r11
24656 5: movq %r8,(%rdi)
24657 6: movq %r9,1*8(%rdi)
24658-7: movq %r10,2*8(%rdi)
24659+7: movq %rax,2*8(%rdi)
24660 8: movq %r11,3*8(%rdi)
24661 9: movq 4*8(%rsi),%r8
24662 10: movq 5*8(%rsi),%r9
24663-11: movq 6*8(%rsi),%r10
24664+11: movq 6*8(%rsi),%rax
24665 12: movq 7*8(%rsi),%r11
24666 13: movq %r8,4*8(%rdi)
24667 14: movq %r9,5*8(%rdi)
24668-15: movq %r10,6*8(%rdi)
24669+15: movq %rax,6*8(%rdi)
24670 16: movq %r11,7*8(%rdi)
24671 leaq 64(%rsi),%rsi
24672 leaq 64(%rdi),%rdi
24673@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
24674 jnz 21b
24675 23: xor %eax,%eax
24676 ASM_CLAC
24677+ pax_force_retaddr
24678 ret
24679
24680 .section .fixup,"ax"
24681@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
24682 movsb
24683 4: xorl %eax,%eax
24684 ASM_CLAC
24685+ pax_force_retaddr
24686 ret
24687
24688 .section .fixup,"ax"
24689@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
24690 movsb
24691 2: xorl %eax,%eax
24692 ASM_CLAC
24693+ pax_force_retaddr
24694 ret
24695
24696 .section .fixup,"ax"
24697diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
24698index 6a4f43c..f5f9e26 100644
24699--- a/arch/x86/lib/copy_user_nocache_64.S
24700+++ b/arch/x86/lib/copy_user_nocache_64.S
24701@@ -8,6 +8,7 @@
24702
24703 #include <linux/linkage.h>
24704 #include <asm/dwarf2.h>
24705+#include <asm/alternative-asm.h>
24706
24707 #define FIX_ALIGNMENT 1
24708
24709@@ -16,6 +17,7 @@
24710 #include <asm/thread_info.h>
24711 #include <asm/asm.h>
24712 #include <asm/smap.h>
24713+#include <asm/pgtable.h>
24714
24715 .macro ALIGN_DESTINATION
24716 #ifdef FIX_ALIGNMENT
24717@@ -49,6 +51,15 @@
24718 */
24719 ENTRY(__copy_user_nocache)
24720 CFI_STARTPROC
24721+
24722+#ifdef CONFIG_PAX_MEMORY_UDEREF
24723+ mov $PAX_USER_SHADOW_BASE,%rcx
24724+ cmp %rcx,%rsi
24725+ jae 1f
24726+ add %rcx,%rsi
24727+1:
24728+#endif
24729+
24730 ASM_STAC
24731 cmpl $8,%edx
24732 jb 20f /* less then 8 bytes, go to byte copy loop */
24733@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
24734 jz 17f
24735 1: movq (%rsi),%r8
24736 2: movq 1*8(%rsi),%r9
24737-3: movq 2*8(%rsi),%r10
24738+3: movq 2*8(%rsi),%rax
24739 4: movq 3*8(%rsi),%r11
24740 5: movnti %r8,(%rdi)
24741 6: movnti %r9,1*8(%rdi)
24742-7: movnti %r10,2*8(%rdi)
24743+7: movnti %rax,2*8(%rdi)
24744 8: movnti %r11,3*8(%rdi)
24745 9: movq 4*8(%rsi),%r8
24746 10: movq 5*8(%rsi),%r9
24747-11: movq 6*8(%rsi),%r10
24748+11: movq 6*8(%rsi),%rax
24749 12: movq 7*8(%rsi),%r11
24750 13: movnti %r8,4*8(%rdi)
24751 14: movnti %r9,5*8(%rdi)
24752-15: movnti %r10,6*8(%rdi)
24753+15: movnti %rax,6*8(%rdi)
24754 16: movnti %r11,7*8(%rdi)
24755 leaq 64(%rsi),%rsi
24756 leaq 64(%rdi),%rdi
24757@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
24758 23: xorl %eax,%eax
24759 ASM_CLAC
24760 sfence
24761+ pax_force_retaddr
24762 ret
24763
24764 .section .fixup,"ax"
24765diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
24766index 2419d5f..953ee51 100644
24767--- a/arch/x86/lib/csum-copy_64.S
24768+++ b/arch/x86/lib/csum-copy_64.S
24769@@ -9,6 +9,7 @@
24770 #include <asm/dwarf2.h>
24771 #include <asm/errno.h>
24772 #include <asm/asm.h>
24773+#include <asm/alternative-asm.h>
24774
24775 /*
24776 * Checksum copy with exception handling.
24777@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
24778 CFI_RESTORE rbp
24779 addq $7*8, %rsp
24780 CFI_ADJUST_CFA_OFFSET -7*8
24781+ pax_force_retaddr 0, 1
24782 ret
24783 CFI_RESTORE_STATE
24784
24785diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
24786index 25b7ae8..169fafc 100644
24787--- a/arch/x86/lib/csum-wrappers_64.c
24788+++ b/arch/x86/lib/csum-wrappers_64.c
24789@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
24790 len -= 2;
24791 }
24792 }
24793- isum = csum_partial_copy_generic((__force const void *)src,
24794+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
24795 dst, len, isum, errp, NULL);
24796 if (unlikely(*errp))
24797 goto out_err;
24798@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
24799 }
24800
24801 *errp = 0;
24802- return csum_partial_copy_generic(src, (void __force *)dst,
24803+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
24804 len, isum, NULL, errp);
24805 }
24806 EXPORT_SYMBOL(csum_partial_copy_to_user);
24807diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
24808index 156b9c8..b144132 100644
24809--- a/arch/x86/lib/getuser.S
24810+++ b/arch/x86/lib/getuser.S
24811@@ -34,17 +34,40 @@
24812 #include <asm/thread_info.h>
24813 #include <asm/asm.h>
24814 #include <asm/smap.h>
24815+#include <asm/segment.h>
24816+#include <asm/pgtable.h>
24817+#include <asm/alternative-asm.h>
24818+
24819+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24820+#define __copyuser_seg gs;
24821+#else
24822+#define __copyuser_seg
24823+#endif
24824
24825 .text
24826 ENTRY(__get_user_1)
24827 CFI_STARTPROC
24828+
24829+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24830 GET_THREAD_INFO(%_ASM_DX)
24831 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
24832 jae bad_get_user
24833 ASM_STAC
24834-1: movzb (%_ASM_AX),%edx
24835+
24836+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24837+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
24838+ cmp %_ASM_DX,%_ASM_AX
24839+ jae 1234f
24840+ add %_ASM_DX,%_ASM_AX
24841+1234:
24842+#endif
24843+
24844+#endif
24845+
24846+1: __copyuser_seg movzb (%_ASM_AX),%edx
24847 xor %eax,%eax
24848 ASM_CLAC
24849+ pax_force_retaddr
24850 ret
24851 CFI_ENDPROC
24852 ENDPROC(__get_user_1)
24853@@ -52,14 +75,28 @@ ENDPROC(__get_user_1)
24854 ENTRY(__get_user_2)
24855 CFI_STARTPROC
24856 add $1,%_ASM_AX
24857+
24858+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24859 jc bad_get_user
24860 GET_THREAD_INFO(%_ASM_DX)
24861 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
24862 jae bad_get_user
24863 ASM_STAC
24864-2: movzwl -1(%_ASM_AX),%edx
24865+
24866+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24867+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
24868+ cmp %_ASM_DX,%_ASM_AX
24869+ jae 1234f
24870+ add %_ASM_DX,%_ASM_AX
24871+1234:
24872+#endif
24873+
24874+#endif
24875+
24876+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
24877 xor %eax,%eax
24878 ASM_CLAC
24879+ pax_force_retaddr
24880 ret
24881 CFI_ENDPROC
24882 ENDPROC(__get_user_2)
24883@@ -67,14 +104,28 @@ ENDPROC(__get_user_2)
24884 ENTRY(__get_user_4)
24885 CFI_STARTPROC
24886 add $3,%_ASM_AX
24887+
24888+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24889 jc bad_get_user
24890 GET_THREAD_INFO(%_ASM_DX)
24891 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
24892 jae bad_get_user
24893 ASM_STAC
24894-3: mov -3(%_ASM_AX),%edx
24895+
24896+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24897+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
24898+ cmp %_ASM_DX,%_ASM_AX
24899+ jae 1234f
24900+ add %_ASM_DX,%_ASM_AX
24901+1234:
24902+#endif
24903+
24904+#endif
24905+
24906+3: __copyuser_seg mov -3(%_ASM_AX),%edx
24907 xor %eax,%eax
24908 ASM_CLAC
24909+ pax_force_retaddr
24910 ret
24911 CFI_ENDPROC
24912 ENDPROC(__get_user_4)
24913@@ -87,10 +138,20 @@ ENTRY(__get_user_8)
24914 GET_THREAD_INFO(%_ASM_DX)
24915 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
24916 jae bad_get_user
24917+
24918+#ifdef CONFIG_PAX_MEMORY_UDEREF
24919+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
24920+ cmp %_ASM_DX,%_ASM_AX
24921+ jae 1234f
24922+ add %_ASM_DX,%_ASM_AX
24923+1234:
24924+#endif
24925+
24926 ASM_STAC
24927 4: movq -7(%_ASM_AX),%_ASM_DX
24928 xor %eax,%eax
24929 ASM_CLAC
24930+ pax_force_retaddr
24931 ret
24932 CFI_ENDPROC
24933 ENDPROC(__get_user_8)
24934@@ -101,6 +162,7 @@ bad_get_user:
24935 xor %edx,%edx
24936 mov $(-EFAULT),%_ASM_AX
24937 ASM_CLAC
24938+ pax_force_retaddr
24939 ret
24940 CFI_ENDPROC
24941 END(bad_get_user)
24942diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
24943index 54fcffe..7be149e 100644
24944--- a/arch/x86/lib/insn.c
24945+++ b/arch/x86/lib/insn.c
24946@@ -20,8 +20,10 @@
24947
24948 #ifdef __KERNEL__
24949 #include <linux/string.h>
24950+#include <asm/pgtable_types.h>
24951 #else
24952 #include <string.h>
24953+#define ktla_ktva(addr) addr
24954 #endif
24955 #include <asm/inat.h>
24956 #include <asm/insn.h>
24957@@ -53,8 +55,8 @@
24958 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
24959 {
24960 memset(insn, 0, sizeof(*insn));
24961- insn->kaddr = kaddr;
24962- insn->next_byte = kaddr;
24963+ insn->kaddr = ktla_ktva(kaddr);
24964+ insn->next_byte = ktla_ktva(kaddr);
24965 insn->x86_64 = x86_64 ? 1 : 0;
24966 insn->opnd_bytes = 4;
24967 if (x86_64)
24968diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
24969index 05a95e7..326f2fa 100644
24970--- a/arch/x86/lib/iomap_copy_64.S
24971+++ b/arch/x86/lib/iomap_copy_64.S
24972@@ -17,6 +17,7 @@
24973
24974 #include <linux/linkage.h>
24975 #include <asm/dwarf2.h>
24976+#include <asm/alternative-asm.h>
24977
24978 /*
24979 * override generic version in lib/iomap_copy.c
24980@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
24981 CFI_STARTPROC
24982 movl %edx,%ecx
24983 rep movsd
24984+ pax_force_retaddr
24985 ret
24986 CFI_ENDPROC
24987 ENDPROC(__iowrite32_copy)
24988diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
24989index 1c273be..da9cc0e 100644
24990--- a/arch/x86/lib/memcpy_64.S
24991+++ b/arch/x86/lib/memcpy_64.S
24992@@ -33,6 +33,7 @@
24993 rep movsq
24994 movl %edx, %ecx
24995 rep movsb
24996+ pax_force_retaddr
24997 ret
24998 .Lmemcpy_e:
24999 .previous
25000@@ -49,6 +50,7 @@
25001 movq %rdi, %rax
25002 movq %rdx, %rcx
25003 rep movsb
25004+ pax_force_retaddr
25005 ret
25006 .Lmemcpy_e_e:
25007 .previous
25008@@ -76,13 +78,13 @@ ENTRY(memcpy)
25009 */
25010 movq 0*8(%rsi), %r8
25011 movq 1*8(%rsi), %r9
25012- movq 2*8(%rsi), %r10
25013+ movq 2*8(%rsi), %rcx
25014 movq 3*8(%rsi), %r11
25015 leaq 4*8(%rsi), %rsi
25016
25017 movq %r8, 0*8(%rdi)
25018 movq %r9, 1*8(%rdi)
25019- movq %r10, 2*8(%rdi)
25020+ movq %rcx, 2*8(%rdi)
25021 movq %r11, 3*8(%rdi)
25022 leaq 4*8(%rdi), %rdi
25023 jae .Lcopy_forward_loop
25024@@ -105,12 +107,12 @@ ENTRY(memcpy)
25025 subq $0x20, %rdx
25026 movq -1*8(%rsi), %r8
25027 movq -2*8(%rsi), %r9
25028- movq -3*8(%rsi), %r10
25029+ movq -3*8(%rsi), %rcx
25030 movq -4*8(%rsi), %r11
25031 leaq -4*8(%rsi), %rsi
25032 movq %r8, -1*8(%rdi)
25033 movq %r9, -2*8(%rdi)
25034- movq %r10, -3*8(%rdi)
25035+ movq %rcx, -3*8(%rdi)
25036 movq %r11, -4*8(%rdi)
25037 leaq -4*8(%rdi), %rdi
25038 jae .Lcopy_backward_loop
25039@@ -130,12 +132,13 @@ ENTRY(memcpy)
25040 */
25041 movq 0*8(%rsi), %r8
25042 movq 1*8(%rsi), %r9
25043- movq -2*8(%rsi, %rdx), %r10
25044+ movq -2*8(%rsi, %rdx), %rcx
25045 movq -1*8(%rsi, %rdx), %r11
25046 movq %r8, 0*8(%rdi)
25047 movq %r9, 1*8(%rdi)
25048- movq %r10, -2*8(%rdi, %rdx)
25049+ movq %rcx, -2*8(%rdi, %rdx)
25050 movq %r11, -1*8(%rdi, %rdx)
25051+ pax_force_retaddr
25052 retq
25053 .p2align 4
25054 .Lless_16bytes:
25055@@ -148,6 +151,7 @@ ENTRY(memcpy)
25056 movq -1*8(%rsi, %rdx), %r9
25057 movq %r8, 0*8(%rdi)
25058 movq %r9, -1*8(%rdi, %rdx)
25059+ pax_force_retaddr
25060 retq
25061 .p2align 4
25062 .Lless_8bytes:
25063@@ -161,6 +165,7 @@ ENTRY(memcpy)
25064 movl -4(%rsi, %rdx), %r8d
25065 movl %ecx, (%rdi)
25066 movl %r8d, -4(%rdi, %rdx)
25067+ pax_force_retaddr
25068 retq
25069 .p2align 4
25070 .Lless_3bytes:
25071@@ -179,6 +184,7 @@ ENTRY(memcpy)
25072 movb %cl, (%rdi)
25073
25074 .Lend:
25075+ pax_force_retaddr
25076 retq
25077 CFI_ENDPROC
25078 ENDPROC(memcpy)
25079diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
25080index ee16461..c39c199 100644
25081--- a/arch/x86/lib/memmove_64.S
25082+++ b/arch/x86/lib/memmove_64.S
25083@@ -61,13 +61,13 @@ ENTRY(memmove)
25084 5:
25085 sub $0x20, %rdx
25086 movq 0*8(%rsi), %r11
25087- movq 1*8(%rsi), %r10
25088+ movq 1*8(%rsi), %rcx
25089 movq 2*8(%rsi), %r9
25090 movq 3*8(%rsi), %r8
25091 leaq 4*8(%rsi), %rsi
25092
25093 movq %r11, 0*8(%rdi)
25094- movq %r10, 1*8(%rdi)
25095+ movq %rcx, 1*8(%rdi)
25096 movq %r9, 2*8(%rdi)
25097 movq %r8, 3*8(%rdi)
25098 leaq 4*8(%rdi), %rdi
25099@@ -81,10 +81,10 @@ ENTRY(memmove)
25100 4:
25101 movq %rdx, %rcx
25102 movq -8(%rsi, %rdx), %r11
25103- lea -8(%rdi, %rdx), %r10
25104+ lea -8(%rdi, %rdx), %r9
25105 shrq $3, %rcx
25106 rep movsq
25107- movq %r11, (%r10)
25108+ movq %r11, (%r9)
25109 jmp 13f
25110 .Lmemmove_end_forward:
25111
25112@@ -95,14 +95,14 @@ ENTRY(memmove)
25113 7:
25114 movq %rdx, %rcx
25115 movq (%rsi), %r11
25116- movq %rdi, %r10
25117+ movq %rdi, %r9
25118 leaq -8(%rsi, %rdx), %rsi
25119 leaq -8(%rdi, %rdx), %rdi
25120 shrq $3, %rcx
25121 std
25122 rep movsq
25123 cld
25124- movq %r11, (%r10)
25125+ movq %r11, (%r9)
25126 jmp 13f
25127
25128 /*
25129@@ -127,13 +127,13 @@ ENTRY(memmove)
25130 8:
25131 subq $0x20, %rdx
25132 movq -1*8(%rsi), %r11
25133- movq -2*8(%rsi), %r10
25134+ movq -2*8(%rsi), %rcx
25135 movq -3*8(%rsi), %r9
25136 movq -4*8(%rsi), %r8
25137 leaq -4*8(%rsi), %rsi
25138
25139 movq %r11, -1*8(%rdi)
25140- movq %r10, -2*8(%rdi)
25141+ movq %rcx, -2*8(%rdi)
25142 movq %r9, -3*8(%rdi)
25143 movq %r8, -4*8(%rdi)
25144 leaq -4*8(%rdi), %rdi
25145@@ -151,11 +151,11 @@ ENTRY(memmove)
25146 * Move data from 16 bytes to 31 bytes.
25147 */
25148 movq 0*8(%rsi), %r11
25149- movq 1*8(%rsi), %r10
25150+ movq 1*8(%rsi), %rcx
25151 movq -2*8(%rsi, %rdx), %r9
25152 movq -1*8(%rsi, %rdx), %r8
25153 movq %r11, 0*8(%rdi)
25154- movq %r10, 1*8(%rdi)
25155+ movq %rcx, 1*8(%rdi)
25156 movq %r9, -2*8(%rdi, %rdx)
25157 movq %r8, -1*8(%rdi, %rdx)
25158 jmp 13f
25159@@ -167,9 +167,9 @@ ENTRY(memmove)
25160 * Move data from 8 bytes to 15 bytes.
25161 */
25162 movq 0*8(%rsi), %r11
25163- movq -1*8(%rsi, %rdx), %r10
25164+ movq -1*8(%rsi, %rdx), %r9
25165 movq %r11, 0*8(%rdi)
25166- movq %r10, -1*8(%rdi, %rdx)
25167+ movq %r9, -1*8(%rdi, %rdx)
25168 jmp 13f
25169 10:
25170 cmpq $4, %rdx
25171@@ -178,9 +178,9 @@ ENTRY(memmove)
25172 * Move data from 4 bytes to 7 bytes.
25173 */
25174 movl (%rsi), %r11d
25175- movl -4(%rsi, %rdx), %r10d
25176+ movl -4(%rsi, %rdx), %r9d
25177 movl %r11d, (%rdi)
25178- movl %r10d, -4(%rdi, %rdx)
25179+ movl %r9d, -4(%rdi, %rdx)
25180 jmp 13f
25181 11:
25182 cmp $2, %rdx
25183@@ -189,9 +189,9 @@ ENTRY(memmove)
25184 * Move data from 2 bytes to 3 bytes.
25185 */
25186 movw (%rsi), %r11w
25187- movw -2(%rsi, %rdx), %r10w
25188+ movw -2(%rsi, %rdx), %r9w
25189 movw %r11w, (%rdi)
25190- movw %r10w, -2(%rdi, %rdx)
25191+ movw %r9w, -2(%rdi, %rdx)
25192 jmp 13f
25193 12:
25194 cmp $1, %rdx
25195@@ -202,6 +202,7 @@ ENTRY(memmove)
25196 movb (%rsi), %r11b
25197 movb %r11b, (%rdi)
25198 13:
25199+ pax_force_retaddr
25200 retq
25201 CFI_ENDPROC
25202
25203@@ -210,6 +211,7 @@ ENTRY(memmove)
25204 /* Forward moving data. */
25205 movq %rdx, %rcx
25206 rep movsb
25207+ pax_force_retaddr
25208 retq
25209 .Lmemmove_end_forward_efs:
25210 .previous
25211diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
25212index 2dcb380..963660a 100644
25213--- a/arch/x86/lib/memset_64.S
25214+++ b/arch/x86/lib/memset_64.S
25215@@ -30,6 +30,7 @@
25216 movl %edx,%ecx
25217 rep stosb
25218 movq %r9,%rax
25219+ pax_force_retaddr
25220 ret
25221 .Lmemset_e:
25222 .previous
25223@@ -52,6 +53,7 @@
25224 movq %rdx,%rcx
25225 rep stosb
25226 movq %r9,%rax
25227+ pax_force_retaddr
25228 ret
25229 .Lmemset_e_e:
25230 .previous
25231@@ -59,7 +61,7 @@
25232 ENTRY(memset)
25233 ENTRY(__memset)
25234 CFI_STARTPROC
25235- movq %rdi,%r10
25236+ movq %rdi,%r11
25237
25238 /* expand byte value */
25239 movzbl %sil,%ecx
25240@@ -117,7 +119,8 @@ ENTRY(__memset)
25241 jnz .Lloop_1
25242
25243 .Lende:
25244- movq %r10,%rax
25245+ movq %r11,%rax
25246+ pax_force_retaddr
25247 ret
25248
25249 CFI_RESTORE_STATE
25250diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
25251index c9f2d9b..e7fd2c0 100644
25252--- a/arch/x86/lib/mmx_32.c
25253+++ b/arch/x86/lib/mmx_32.c
25254@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25255 {
25256 void *p;
25257 int i;
25258+ unsigned long cr0;
25259
25260 if (unlikely(in_interrupt()))
25261 return __memcpy(to, from, len);
25262@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25263 kernel_fpu_begin();
25264
25265 __asm__ __volatile__ (
25266- "1: prefetch (%0)\n" /* This set is 28 bytes */
25267- " prefetch 64(%0)\n"
25268- " prefetch 128(%0)\n"
25269- " prefetch 192(%0)\n"
25270- " prefetch 256(%0)\n"
25271+ "1: prefetch (%1)\n" /* This set is 28 bytes */
25272+ " prefetch 64(%1)\n"
25273+ " prefetch 128(%1)\n"
25274+ " prefetch 192(%1)\n"
25275+ " prefetch 256(%1)\n"
25276 "2: \n"
25277 ".section .fixup, \"ax\"\n"
25278- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25279+ "3: \n"
25280+
25281+#ifdef CONFIG_PAX_KERNEXEC
25282+ " movl %%cr0, %0\n"
25283+ " movl %0, %%eax\n"
25284+ " andl $0xFFFEFFFF, %%eax\n"
25285+ " movl %%eax, %%cr0\n"
25286+#endif
25287+
25288+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25289+
25290+#ifdef CONFIG_PAX_KERNEXEC
25291+ " movl %0, %%cr0\n"
25292+#endif
25293+
25294 " jmp 2b\n"
25295 ".previous\n"
25296 _ASM_EXTABLE(1b, 3b)
25297- : : "r" (from));
25298+ : "=&r" (cr0) : "r" (from) : "ax");
25299
25300 for ( ; i > 5; i--) {
25301 __asm__ __volatile__ (
25302- "1: prefetch 320(%0)\n"
25303- "2: movq (%0), %%mm0\n"
25304- " movq 8(%0), %%mm1\n"
25305- " movq 16(%0), %%mm2\n"
25306- " movq 24(%0), %%mm3\n"
25307- " movq %%mm0, (%1)\n"
25308- " movq %%mm1, 8(%1)\n"
25309- " movq %%mm2, 16(%1)\n"
25310- " movq %%mm3, 24(%1)\n"
25311- " movq 32(%0), %%mm0\n"
25312- " movq 40(%0), %%mm1\n"
25313- " movq 48(%0), %%mm2\n"
25314- " movq 56(%0), %%mm3\n"
25315- " movq %%mm0, 32(%1)\n"
25316- " movq %%mm1, 40(%1)\n"
25317- " movq %%mm2, 48(%1)\n"
25318- " movq %%mm3, 56(%1)\n"
25319+ "1: prefetch 320(%1)\n"
25320+ "2: movq (%1), %%mm0\n"
25321+ " movq 8(%1), %%mm1\n"
25322+ " movq 16(%1), %%mm2\n"
25323+ " movq 24(%1), %%mm3\n"
25324+ " movq %%mm0, (%2)\n"
25325+ " movq %%mm1, 8(%2)\n"
25326+ " movq %%mm2, 16(%2)\n"
25327+ " movq %%mm3, 24(%2)\n"
25328+ " movq 32(%1), %%mm0\n"
25329+ " movq 40(%1), %%mm1\n"
25330+ " movq 48(%1), %%mm2\n"
25331+ " movq 56(%1), %%mm3\n"
25332+ " movq %%mm0, 32(%2)\n"
25333+ " movq %%mm1, 40(%2)\n"
25334+ " movq %%mm2, 48(%2)\n"
25335+ " movq %%mm3, 56(%2)\n"
25336 ".section .fixup, \"ax\"\n"
25337- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25338+ "3:\n"
25339+
25340+#ifdef CONFIG_PAX_KERNEXEC
25341+ " movl %%cr0, %0\n"
25342+ " movl %0, %%eax\n"
25343+ " andl $0xFFFEFFFF, %%eax\n"
25344+ " movl %%eax, %%cr0\n"
25345+#endif
25346+
25347+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25348+
25349+#ifdef CONFIG_PAX_KERNEXEC
25350+ " movl %0, %%cr0\n"
25351+#endif
25352+
25353 " jmp 2b\n"
25354 ".previous\n"
25355 _ASM_EXTABLE(1b, 3b)
25356- : : "r" (from), "r" (to) : "memory");
25357+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25358
25359 from += 64;
25360 to += 64;
25361@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
25362 static void fast_copy_page(void *to, void *from)
25363 {
25364 int i;
25365+ unsigned long cr0;
25366
25367 kernel_fpu_begin();
25368
25369@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
25370 * but that is for later. -AV
25371 */
25372 __asm__ __volatile__(
25373- "1: prefetch (%0)\n"
25374- " prefetch 64(%0)\n"
25375- " prefetch 128(%0)\n"
25376- " prefetch 192(%0)\n"
25377- " prefetch 256(%0)\n"
25378+ "1: prefetch (%1)\n"
25379+ " prefetch 64(%1)\n"
25380+ " prefetch 128(%1)\n"
25381+ " prefetch 192(%1)\n"
25382+ " prefetch 256(%1)\n"
25383 "2: \n"
25384 ".section .fixup, \"ax\"\n"
25385- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25386+ "3: \n"
25387+
25388+#ifdef CONFIG_PAX_KERNEXEC
25389+ " movl %%cr0, %0\n"
25390+ " movl %0, %%eax\n"
25391+ " andl $0xFFFEFFFF, %%eax\n"
25392+ " movl %%eax, %%cr0\n"
25393+#endif
25394+
25395+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25396+
25397+#ifdef CONFIG_PAX_KERNEXEC
25398+ " movl %0, %%cr0\n"
25399+#endif
25400+
25401 " jmp 2b\n"
25402 ".previous\n"
25403- _ASM_EXTABLE(1b, 3b) : : "r" (from));
25404+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
25405
25406 for (i = 0; i < (4096-320)/64; i++) {
25407 __asm__ __volatile__ (
25408- "1: prefetch 320(%0)\n"
25409- "2: movq (%0), %%mm0\n"
25410- " movntq %%mm0, (%1)\n"
25411- " movq 8(%0), %%mm1\n"
25412- " movntq %%mm1, 8(%1)\n"
25413- " movq 16(%0), %%mm2\n"
25414- " movntq %%mm2, 16(%1)\n"
25415- " movq 24(%0), %%mm3\n"
25416- " movntq %%mm3, 24(%1)\n"
25417- " movq 32(%0), %%mm4\n"
25418- " movntq %%mm4, 32(%1)\n"
25419- " movq 40(%0), %%mm5\n"
25420- " movntq %%mm5, 40(%1)\n"
25421- " movq 48(%0), %%mm6\n"
25422- " movntq %%mm6, 48(%1)\n"
25423- " movq 56(%0), %%mm7\n"
25424- " movntq %%mm7, 56(%1)\n"
25425+ "1: prefetch 320(%1)\n"
25426+ "2: movq (%1), %%mm0\n"
25427+ " movntq %%mm0, (%2)\n"
25428+ " movq 8(%1), %%mm1\n"
25429+ " movntq %%mm1, 8(%2)\n"
25430+ " movq 16(%1), %%mm2\n"
25431+ " movntq %%mm2, 16(%2)\n"
25432+ " movq 24(%1), %%mm3\n"
25433+ " movntq %%mm3, 24(%2)\n"
25434+ " movq 32(%1), %%mm4\n"
25435+ " movntq %%mm4, 32(%2)\n"
25436+ " movq 40(%1), %%mm5\n"
25437+ " movntq %%mm5, 40(%2)\n"
25438+ " movq 48(%1), %%mm6\n"
25439+ " movntq %%mm6, 48(%2)\n"
25440+ " movq 56(%1), %%mm7\n"
25441+ " movntq %%mm7, 56(%2)\n"
25442 ".section .fixup, \"ax\"\n"
25443- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25444+ "3:\n"
25445+
25446+#ifdef CONFIG_PAX_KERNEXEC
25447+ " movl %%cr0, %0\n"
25448+ " movl %0, %%eax\n"
25449+ " andl $0xFFFEFFFF, %%eax\n"
25450+ " movl %%eax, %%cr0\n"
25451+#endif
25452+
25453+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25454+
25455+#ifdef CONFIG_PAX_KERNEXEC
25456+ " movl %0, %%cr0\n"
25457+#endif
25458+
25459 " jmp 2b\n"
25460 ".previous\n"
25461- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
25462+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25463
25464 from += 64;
25465 to += 64;
25466@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
25467 static void fast_copy_page(void *to, void *from)
25468 {
25469 int i;
25470+ unsigned long cr0;
25471
25472 kernel_fpu_begin();
25473
25474 __asm__ __volatile__ (
25475- "1: prefetch (%0)\n"
25476- " prefetch 64(%0)\n"
25477- " prefetch 128(%0)\n"
25478- " prefetch 192(%0)\n"
25479- " prefetch 256(%0)\n"
25480+ "1: prefetch (%1)\n"
25481+ " prefetch 64(%1)\n"
25482+ " prefetch 128(%1)\n"
25483+ " prefetch 192(%1)\n"
25484+ " prefetch 256(%1)\n"
25485 "2: \n"
25486 ".section .fixup, \"ax\"\n"
25487- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25488+ "3: \n"
25489+
25490+#ifdef CONFIG_PAX_KERNEXEC
25491+ " movl %%cr0, %0\n"
25492+ " movl %0, %%eax\n"
25493+ " andl $0xFFFEFFFF, %%eax\n"
25494+ " movl %%eax, %%cr0\n"
25495+#endif
25496+
25497+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25498+
25499+#ifdef CONFIG_PAX_KERNEXEC
25500+ " movl %0, %%cr0\n"
25501+#endif
25502+
25503 " jmp 2b\n"
25504 ".previous\n"
25505- _ASM_EXTABLE(1b, 3b) : : "r" (from));
25506+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
25507
25508 for (i = 0; i < 4096/64; i++) {
25509 __asm__ __volatile__ (
25510- "1: prefetch 320(%0)\n"
25511- "2: movq (%0), %%mm0\n"
25512- " movq 8(%0), %%mm1\n"
25513- " movq 16(%0), %%mm2\n"
25514- " movq 24(%0), %%mm3\n"
25515- " movq %%mm0, (%1)\n"
25516- " movq %%mm1, 8(%1)\n"
25517- " movq %%mm2, 16(%1)\n"
25518- " movq %%mm3, 24(%1)\n"
25519- " movq 32(%0), %%mm0\n"
25520- " movq 40(%0), %%mm1\n"
25521- " movq 48(%0), %%mm2\n"
25522- " movq 56(%0), %%mm3\n"
25523- " movq %%mm0, 32(%1)\n"
25524- " movq %%mm1, 40(%1)\n"
25525- " movq %%mm2, 48(%1)\n"
25526- " movq %%mm3, 56(%1)\n"
25527+ "1: prefetch 320(%1)\n"
25528+ "2: movq (%1), %%mm0\n"
25529+ " movq 8(%1), %%mm1\n"
25530+ " movq 16(%1), %%mm2\n"
25531+ " movq 24(%1), %%mm3\n"
25532+ " movq %%mm0, (%2)\n"
25533+ " movq %%mm1, 8(%2)\n"
25534+ " movq %%mm2, 16(%2)\n"
25535+ " movq %%mm3, 24(%2)\n"
25536+ " movq 32(%1), %%mm0\n"
25537+ " movq 40(%1), %%mm1\n"
25538+ " movq 48(%1), %%mm2\n"
25539+ " movq 56(%1), %%mm3\n"
25540+ " movq %%mm0, 32(%2)\n"
25541+ " movq %%mm1, 40(%2)\n"
25542+ " movq %%mm2, 48(%2)\n"
25543+ " movq %%mm3, 56(%2)\n"
25544 ".section .fixup, \"ax\"\n"
25545- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25546+ "3:\n"
25547+
25548+#ifdef CONFIG_PAX_KERNEXEC
25549+ " movl %%cr0, %0\n"
25550+ " movl %0, %%eax\n"
25551+ " andl $0xFFFEFFFF, %%eax\n"
25552+ " movl %%eax, %%cr0\n"
25553+#endif
25554+
25555+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25556+
25557+#ifdef CONFIG_PAX_KERNEXEC
25558+ " movl %0, %%cr0\n"
25559+#endif
25560+
25561 " jmp 2b\n"
25562 ".previous\n"
25563 _ASM_EXTABLE(1b, 3b)
25564- : : "r" (from), "r" (to) : "memory");
25565+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25566
25567 from += 64;
25568 to += 64;
25569diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
25570index f6d13ee..aca5f0b 100644
25571--- a/arch/x86/lib/msr-reg.S
25572+++ b/arch/x86/lib/msr-reg.S
25573@@ -3,6 +3,7 @@
25574 #include <asm/dwarf2.h>
25575 #include <asm/asm.h>
25576 #include <asm/msr.h>
25577+#include <asm/alternative-asm.h>
25578
25579 #ifdef CONFIG_X86_64
25580 /*
25581@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
25582 CFI_STARTPROC
25583 pushq_cfi %rbx
25584 pushq_cfi %rbp
25585- movq %rdi, %r10 /* Save pointer */
25586+ movq %rdi, %r9 /* Save pointer */
25587 xorl %r11d, %r11d /* Return value */
25588 movl (%rdi), %eax
25589 movl 4(%rdi), %ecx
25590@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
25591 movl 28(%rdi), %edi
25592 CFI_REMEMBER_STATE
25593 1: \op
25594-2: movl %eax, (%r10)
25595+2: movl %eax, (%r9)
25596 movl %r11d, %eax /* Return value */
25597- movl %ecx, 4(%r10)
25598- movl %edx, 8(%r10)
25599- movl %ebx, 12(%r10)
25600- movl %ebp, 20(%r10)
25601- movl %esi, 24(%r10)
25602- movl %edi, 28(%r10)
25603+ movl %ecx, 4(%r9)
25604+ movl %edx, 8(%r9)
25605+ movl %ebx, 12(%r9)
25606+ movl %ebp, 20(%r9)
25607+ movl %esi, 24(%r9)
25608+ movl %edi, 28(%r9)
25609 popq_cfi %rbp
25610 popq_cfi %rbx
25611+ pax_force_retaddr
25612 ret
25613 3:
25614 CFI_RESTORE_STATE
25615diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
25616index fc6ba17..04471c5 100644
25617--- a/arch/x86/lib/putuser.S
25618+++ b/arch/x86/lib/putuser.S
25619@@ -16,7 +16,9 @@
25620 #include <asm/errno.h>
25621 #include <asm/asm.h>
25622 #include <asm/smap.h>
25623-
25624+#include <asm/segment.h>
25625+#include <asm/pgtable.h>
25626+#include <asm/alternative-asm.h>
25627
25628 /*
25629 * __put_user_X
25630@@ -30,57 +32,125 @@
25631 * as they get called from within inline assembly.
25632 */
25633
25634-#define ENTER CFI_STARTPROC ; \
25635- GET_THREAD_INFO(%_ASM_BX)
25636-#define EXIT ASM_CLAC ; \
25637- ret ; \
25638+#define ENTER CFI_STARTPROC
25639+#define EXIT ASM_CLAC ; \
25640+ pax_force_retaddr ; \
25641+ ret ; \
25642 CFI_ENDPROC
25643
25644+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25645+#define _DEST %_ASM_CX,%_ASM_BX
25646+#else
25647+#define _DEST %_ASM_CX
25648+#endif
25649+
25650+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25651+#define __copyuser_seg gs;
25652+#else
25653+#define __copyuser_seg
25654+#endif
25655+
25656 .text
25657 ENTRY(__put_user_1)
25658 ENTER
25659+
25660+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25661+ GET_THREAD_INFO(%_ASM_BX)
25662 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
25663 jae bad_put_user
25664 ASM_STAC
25665-1: movb %al,(%_ASM_CX)
25666+
25667+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25668+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
25669+ cmp %_ASM_BX,%_ASM_CX
25670+ jb 1234f
25671+ xor %ebx,%ebx
25672+1234:
25673+#endif
25674+
25675+#endif
25676+
25677+1: __copyuser_seg movb %al,(_DEST)
25678 xor %eax,%eax
25679 EXIT
25680 ENDPROC(__put_user_1)
25681
25682 ENTRY(__put_user_2)
25683 ENTER
25684+
25685+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25686+ GET_THREAD_INFO(%_ASM_BX)
25687 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
25688 sub $1,%_ASM_BX
25689 cmp %_ASM_BX,%_ASM_CX
25690 jae bad_put_user
25691 ASM_STAC
25692-2: movw %ax,(%_ASM_CX)
25693+
25694+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25695+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
25696+ cmp %_ASM_BX,%_ASM_CX
25697+ jb 1234f
25698+ xor %ebx,%ebx
25699+1234:
25700+#endif
25701+
25702+#endif
25703+
25704+2: __copyuser_seg movw %ax,(_DEST)
25705 xor %eax,%eax
25706 EXIT
25707 ENDPROC(__put_user_2)
25708
25709 ENTRY(__put_user_4)
25710 ENTER
25711+
25712+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25713+ GET_THREAD_INFO(%_ASM_BX)
25714 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
25715 sub $3,%_ASM_BX
25716 cmp %_ASM_BX,%_ASM_CX
25717 jae bad_put_user
25718 ASM_STAC
25719-3: movl %eax,(%_ASM_CX)
25720+
25721+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25722+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
25723+ cmp %_ASM_BX,%_ASM_CX
25724+ jb 1234f
25725+ xor %ebx,%ebx
25726+1234:
25727+#endif
25728+
25729+#endif
25730+
25731+3: __copyuser_seg movl %eax,(_DEST)
25732 xor %eax,%eax
25733 EXIT
25734 ENDPROC(__put_user_4)
25735
25736 ENTRY(__put_user_8)
25737 ENTER
25738+
25739+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25740+ GET_THREAD_INFO(%_ASM_BX)
25741 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
25742 sub $7,%_ASM_BX
25743 cmp %_ASM_BX,%_ASM_CX
25744 jae bad_put_user
25745 ASM_STAC
25746-4: mov %_ASM_AX,(%_ASM_CX)
25747+
25748+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25749+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
25750+ cmp %_ASM_BX,%_ASM_CX
25751+ jb 1234f
25752+ xor %ebx,%ebx
25753+1234:
25754+#endif
25755+
25756+#endif
25757+
25758+4: __copyuser_seg mov %_ASM_AX,(_DEST)
25759 #ifdef CONFIG_X86_32
25760-5: movl %edx,4(%_ASM_CX)
25761+5: __copyuser_seg movl %edx,4(_DEST)
25762 #endif
25763 xor %eax,%eax
25764 EXIT
25765diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
25766index 1cad221..de671ee 100644
25767--- a/arch/x86/lib/rwlock.S
25768+++ b/arch/x86/lib/rwlock.S
25769@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
25770 FRAME
25771 0: LOCK_PREFIX
25772 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
25773+
25774+#ifdef CONFIG_PAX_REFCOUNT
25775+ jno 1234f
25776+ LOCK_PREFIX
25777+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
25778+ int $4
25779+1234:
25780+ _ASM_EXTABLE(1234b, 1234b)
25781+#endif
25782+
25783 1: rep; nop
25784 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
25785 jne 1b
25786 LOCK_PREFIX
25787 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
25788+
25789+#ifdef CONFIG_PAX_REFCOUNT
25790+ jno 1234f
25791+ LOCK_PREFIX
25792+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
25793+ int $4
25794+1234:
25795+ _ASM_EXTABLE(1234b, 1234b)
25796+#endif
25797+
25798 jnz 0b
25799 ENDFRAME
25800+ pax_force_retaddr
25801 ret
25802 CFI_ENDPROC
25803 END(__write_lock_failed)
25804@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
25805 FRAME
25806 0: LOCK_PREFIX
25807 READ_LOCK_SIZE(inc) (%__lock_ptr)
25808+
25809+#ifdef CONFIG_PAX_REFCOUNT
25810+ jno 1234f
25811+ LOCK_PREFIX
25812+ READ_LOCK_SIZE(dec) (%__lock_ptr)
25813+ int $4
25814+1234:
25815+ _ASM_EXTABLE(1234b, 1234b)
25816+#endif
25817+
25818 1: rep; nop
25819 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
25820 js 1b
25821 LOCK_PREFIX
25822 READ_LOCK_SIZE(dec) (%__lock_ptr)
25823+
25824+#ifdef CONFIG_PAX_REFCOUNT
25825+ jno 1234f
25826+ LOCK_PREFIX
25827+ READ_LOCK_SIZE(inc) (%__lock_ptr)
25828+ int $4
25829+1234:
25830+ _ASM_EXTABLE(1234b, 1234b)
25831+#endif
25832+
25833 js 0b
25834 ENDFRAME
25835+ pax_force_retaddr
25836 ret
25837 CFI_ENDPROC
25838 END(__read_lock_failed)
25839diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
25840index 5dff5f0..cadebf4 100644
25841--- a/arch/x86/lib/rwsem.S
25842+++ b/arch/x86/lib/rwsem.S
25843@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
25844 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
25845 CFI_RESTORE __ASM_REG(dx)
25846 restore_common_regs
25847+ pax_force_retaddr
25848 ret
25849 CFI_ENDPROC
25850 ENDPROC(call_rwsem_down_read_failed)
25851@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
25852 movq %rax,%rdi
25853 call rwsem_down_write_failed
25854 restore_common_regs
25855+ pax_force_retaddr
25856 ret
25857 CFI_ENDPROC
25858 ENDPROC(call_rwsem_down_write_failed)
25859@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
25860 movq %rax,%rdi
25861 call rwsem_wake
25862 restore_common_regs
25863-1: ret
25864+1: pax_force_retaddr
25865+ ret
25866 CFI_ENDPROC
25867 ENDPROC(call_rwsem_wake)
25868
25869@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
25870 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
25871 CFI_RESTORE __ASM_REG(dx)
25872 restore_common_regs
25873+ pax_force_retaddr
25874 ret
25875 CFI_ENDPROC
25876 ENDPROC(call_rwsem_downgrade_wake)
25877diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
25878index a63efd6..ccecad8 100644
25879--- a/arch/x86/lib/thunk_64.S
25880+++ b/arch/x86/lib/thunk_64.S
25881@@ -8,6 +8,7 @@
25882 #include <linux/linkage.h>
25883 #include <asm/dwarf2.h>
25884 #include <asm/calling.h>
25885+#include <asm/alternative-asm.h>
25886
25887 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
25888 .macro THUNK name, func, put_ret_addr_in_rdi=0
25889@@ -41,5 +42,6 @@
25890 SAVE_ARGS
25891 restore:
25892 RESTORE_ARGS
25893+ pax_force_retaddr
25894 ret
25895 CFI_ENDPROC
25896diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
25897index f0312d7..9c39d63 100644
25898--- a/arch/x86/lib/usercopy_32.c
25899+++ b/arch/x86/lib/usercopy_32.c
25900@@ -42,11 +42,13 @@ do { \
25901 int __d0; \
25902 might_fault(); \
25903 __asm__ __volatile__( \
25904+ __COPYUSER_SET_ES \
25905 ASM_STAC "\n" \
25906 "0: rep; stosl\n" \
25907 " movl %2,%0\n" \
25908 "1: rep; stosb\n" \
25909 "2: " ASM_CLAC "\n" \
25910+ __COPYUSER_RESTORE_ES \
25911 ".section .fixup,\"ax\"\n" \
25912 "3: lea 0(%2,%0,4),%0\n" \
25913 " jmp 2b\n" \
25914@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
25915
25916 #ifdef CONFIG_X86_INTEL_USERCOPY
25917 static unsigned long
25918-__copy_user_intel(void __user *to, const void *from, unsigned long size)
25919+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
25920 {
25921 int d0, d1;
25922 __asm__ __volatile__(
25923@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
25924 " .align 2,0x90\n"
25925 "3: movl 0(%4), %%eax\n"
25926 "4: movl 4(%4), %%edx\n"
25927- "5: movl %%eax, 0(%3)\n"
25928- "6: movl %%edx, 4(%3)\n"
25929+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
25930+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
25931 "7: movl 8(%4), %%eax\n"
25932 "8: movl 12(%4),%%edx\n"
25933- "9: movl %%eax, 8(%3)\n"
25934- "10: movl %%edx, 12(%3)\n"
25935+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
25936+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
25937 "11: movl 16(%4), %%eax\n"
25938 "12: movl 20(%4), %%edx\n"
25939- "13: movl %%eax, 16(%3)\n"
25940- "14: movl %%edx, 20(%3)\n"
25941+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
25942+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
25943 "15: movl 24(%4), %%eax\n"
25944 "16: movl 28(%4), %%edx\n"
25945- "17: movl %%eax, 24(%3)\n"
25946- "18: movl %%edx, 28(%3)\n"
25947+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
25948+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
25949 "19: movl 32(%4), %%eax\n"
25950 "20: movl 36(%4), %%edx\n"
25951- "21: movl %%eax, 32(%3)\n"
25952- "22: movl %%edx, 36(%3)\n"
25953+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
25954+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
25955 "23: movl 40(%4), %%eax\n"
25956 "24: movl 44(%4), %%edx\n"
25957- "25: movl %%eax, 40(%3)\n"
25958- "26: movl %%edx, 44(%3)\n"
25959+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
25960+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
25961 "27: movl 48(%4), %%eax\n"
25962 "28: movl 52(%4), %%edx\n"
25963- "29: movl %%eax, 48(%3)\n"
25964- "30: movl %%edx, 52(%3)\n"
25965+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
25966+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
25967 "31: movl 56(%4), %%eax\n"
25968 "32: movl 60(%4), %%edx\n"
25969- "33: movl %%eax, 56(%3)\n"
25970- "34: movl %%edx, 60(%3)\n"
25971+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
25972+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
25973 " addl $-64, %0\n"
25974 " addl $64, %4\n"
25975 " addl $64, %3\n"
25976@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
25977 " shrl $2, %0\n"
25978 " andl $3, %%eax\n"
25979 " cld\n"
25980+ __COPYUSER_SET_ES
25981 "99: rep; movsl\n"
25982 "36: movl %%eax, %0\n"
25983 "37: rep; movsb\n"
25984 "100:\n"
25985+ __COPYUSER_RESTORE_ES
25986 ".section .fixup,\"ax\"\n"
25987 "101: lea 0(%%eax,%0,4),%0\n"
25988 " jmp 100b\n"
25989@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
25990 }
25991
25992 static unsigned long
25993+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
25994+{
25995+ int d0, d1;
25996+ __asm__ __volatile__(
25997+ " .align 2,0x90\n"
25998+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
25999+ " cmpl $67, %0\n"
26000+ " jbe 3f\n"
26001+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
26002+ " .align 2,0x90\n"
26003+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
26004+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
26005+ "5: movl %%eax, 0(%3)\n"
26006+ "6: movl %%edx, 4(%3)\n"
26007+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
26008+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
26009+ "9: movl %%eax, 8(%3)\n"
26010+ "10: movl %%edx, 12(%3)\n"
26011+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
26012+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
26013+ "13: movl %%eax, 16(%3)\n"
26014+ "14: movl %%edx, 20(%3)\n"
26015+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
26016+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
26017+ "17: movl %%eax, 24(%3)\n"
26018+ "18: movl %%edx, 28(%3)\n"
26019+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
26020+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
26021+ "21: movl %%eax, 32(%3)\n"
26022+ "22: movl %%edx, 36(%3)\n"
26023+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
26024+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
26025+ "25: movl %%eax, 40(%3)\n"
26026+ "26: movl %%edx, 44(%3)\n"
26027+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
26028+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
26029+ "29: movl %%eax, 48(%3)\n"
26030+ "30: movl %%edx, 52(%3)\n"
26031+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
26032+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
26033+ "33: movl %%eax, 56(%3)\n"
26034+ "34: movl %%edx, 60(%3)\n"
26035+ " addl $-64, %0\n"
26036+ " addl $64, %4\n"
26037+ " addl $64, %3\n"
26038+ " cmpl $63, %0\n"
26039+ " ja 1b\n"
26040+ "35: movl %0, %%eax\n"
26041+ " shrl $2, %0\n"
26042+ " andl $3, %%eax\n"
26043+ " cld\n"
26044+ "99: rep; "__copyuser_seg" movsl\n"
26045+ "36: movl %%eax, %0\n"
26046+ "37: rep; "__copyuser_seg" movsb\n"
26047+ "100:\n"
26048+ ".section .fixup,\"ax\"\n"
26049+ "101: lea 0(%%eax,%0,4),%0\n"
26050+ " jmp 100b\n"
26051+ ".previous\n"
26052+ _ASM_EXTABLE(1b,100b)
26053+ _ASM_EXTABLE(2b,100b)
26054+ _ASM_EXTABLE(3b,100b)
26055+ _ASM_EXTABLE(4b,100b)
26056+ _ASM_EXTABLE(5b,100b)
26057+ _ASM_EXTABLE(6b,100b)
26058+ _ASM_EXTABLE(7b,100b)
26059+ _ASM_EXTABLE(8b,100b)
26060+ _ASM_EXTABLE(9b,100b)
26061+ _ASM_EXTABLE(10b,100b)
26062+ _ASM_EXTABLE(11b,100b)
26063+ _ASM_EXTABLE(12b,100b)
26064+ _ASM_EXTABLE(13b,100b)
26065+ _ASM_EXTABLE(14b,100b)
26066+ _ASM_EXTABLE(15b,100b)
26067+ _ASM_EXTABLE(16b,100b)
26068+ _ASM_EXTABLE(17b,100b)
26069+ _ASM_EXTABLE(18b,100b)
26070+ _ASM_EXTABLE(19b,100b)
26071+ _ASM_EXTABLE(20b,100b)
26072+ _ASM_EXTABLE(21b,100b)
26073+ _ASM_EXTABLE(22b,100b)
26074+ _ASM_EXTABLE(23b,100b)
26075+ _ASM_EXTABLE(24b,100b)
26076+ _ASM_EXTABLE(25b,100b)
26077+ _ASM_EXTABLE(26b,100b)
26078+ _ASM_EXTABLE(27b,100b)
26079+ _ASM_EXTABLE(28b,100b)
26080+ _ASM_EXTABLE(29b,100b)
26081+ _ASM_EXTABLE(30b,100b)
26082+ _ASM_EXTABLE(31b,100b)
26083+ _ASM_EXTABLE(32b,100b)
26084+ _ASM_EXTABLE(33b,100b)
26085+ _ASM_EXTABLE(34b,100b)
26086+ _ASM_EXTABLE(35b,100b)
26087+ _ASM_EXTABLE(36b,100b)
26088+ _ASM_EXTABLE(37b,100b)
26089+ _ASM_EXTABLE(99b,101b)
26090+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
26091+ : "1"(to), "2"(from), "0"(size)
26092+ : "eax", "edx", "memory");
26093+ return size;
26094+}
26095+
26096+static unsigned long __size_overflow(3)
26097 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26098 {
26099 int d0, d1;
26100 __asm__ __volatile__(
26101 " .align 2,0x90\n"
26102- "0: movl 32(%4), %%eax\n"
26103+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26104 " cmpl $67, %0\n"
26105 " jbe 2f\n"
26106- "1: movl 64(%4), %%eax\n"
26107+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26108 " .align 2,0x90\n"
26109- "2: movl 0(%4), %%eax\n"
26110- "21: movl 4(%4), %%edx\n"
26111+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26112+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26113 " movl %%eax, 0(%3)\n"
26114 " movl %%edx, 4(%3)\n"
26115- "3: movl 8(%4), %%eax\n"
26116- "31: movl 12(%4),%%edx\n"
26117+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26118+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26119 " movl %%eax, 8(%3)\n"
26120 " movl %%edx, 12(%3)\n"
26121- "4: movl 16(%4), %%eax\n"
26122- "41: movl 20(%4), %%edx\n"
26123+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26124+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26125 " movl %%eax, 16(%3)\n"
26126 " movl %%edx, 20(%3)\n"
26127- "10: movl 24(%4), %%eax\n"
26128- "51: movl 28(%4), %%edx\n"
26129+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26130+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26131 " movl %%eax, 24(%3)\n"
26132 " movl %%edx, 28(%3)\n"
26133- "11: movl 32(%4), %%eax\n"
26134- "61: movl 36(%4), %%edx\n"
26135+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26136+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26137 " movl %%eax, 32(%3)\n"
26138 " movl %%edx, 36(%3)\n"
26139- "12: movl 40(%4), %%eax\n"
26140- "71: movl 44(%4), %%edx\n"
26141+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26142+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26143 " movl %%eax, 40(%3)\n"
26144 " movl %%edx, 44(%3)\n"
26145- "13: movl 48(%4), %%eax\n"
26146- "81: movl 52(%4), %%edx\n"
26147+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26148+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26149 " movl %%eax, 48(%3)\n"
26150 " movl %%edx, 52(%3)\n"
26151- "14: movl 56(%4), %%eax\n"
26152- "91: movl 60(%4), %%edx\n"
26153+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26154+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26155 " movl %%eax, 56(%3)\n"
26156 " movl %%edx, 60(%3)\n"
26157 " addl $-64, %0\n"
26158@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26159 " shrl $2, %0\n"
26160 " andl $3, %%eax\n"
26161 " cld\n"
26162- "6: rep; movsl\n"
26163+ "6: rep; "__copyuser_seg" movsl\n"
26164 " movl %%eax,%0\n"
26165- "7: rep; movsb\n"
26166+ "7: rep; "__copyuser_seg" movsb\n"
26167 "8:\n"
26168 ".section .fixup,\"ax\"\n"
26169 "9: lea 0(%%eax,%0,4),%0\n"
26170@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26171 * hyoshiok@miraclelinux.com
26172 */
26173
26174-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26175+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
26176 const void __user *from, unsigned long size)
26177 {
26178 int d0, d1;
26179
26180 __asm__ __volatile__(
26181 " .align 2,0x90\n"
26182- "0: movl 32(%4), %%eax\n"
26183+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26184 " cmpl $67, %0\n"
26185 " jbe 2f\n"
26186- "1: movl 64(%4), %%eax\n"
26187+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26188 " .align 2,0x90\n"
26189- "2: movl 0(%4), %%eax\n"
26190- "21: movl 4(%4), %%edx\n"
26191+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26192+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26193 " movnti %%eax, 0(%3)\n"
26194 " movnti %%edx, 4(%3)\n"
26195- "3: movl 8(%4), %%eax\n"
26196- "31: movl 12(%4),%%edx\n"
26197+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26198+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26199 " movnti %%eax, 8(%3)\n"
26200 " movnti %%edx, 12(%3)\n"
26201- "4: movl 16(%4), %%eax\n"
26202- "41: movl 20(%4), %%edx\n"
26203+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26204+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26205 " movnti %%eax, 16(%3)\n"
26206 " movnti %%edx, 20(%3)\n"
26207- "10: movl 24(%4), %%eax\n"
26208- "51: movl 28(%4), %%edx\n"
26209+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26210+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26211 " movnti %%eax, 24(%3)\n"
26212 " movnti %%edx, 28(%3)\n"
26213- "11: movl 32(%4), %%eax\n"
26214- "61: movl 36(%4), %%edx\n"
26215+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26216+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26217 " movnti %%eax, 32(%3)\n"
26218 " movnti %%edx, 36(%3)\n"
26219- "12: movl 40(%4), %%eax\n"
26220- "71: movl 44(%4), %%edx\n"
26221+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26222+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26223 " movnti %%eax, 40(%3)\n"
26224 " movnti %%edx, 44(%3)\n"
26225- "13: movl 48(%4), %%eax\n"
26226- "81: movl 52(%4), %%edx\n"
26227+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26228+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26229 " movnti %%eax, 48(%3)\n"
26230 " movnti %%edx, 52(%3)\n"
26231- "14: movl 56(%4), %%eax\n"
26232- "91: movl 60(%4), %%edx\n"
26233+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26234+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26235 " movnti %%eax, 56(%3)\n"
26236 " movnti %%edx, 60(%3)\n"
26237 " addl $-64, %0\n"
26238@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26239 " shrl $2, %0\n"
26240 " andl $3, %%eax\n"
26241 " cld\n"
26242- "6: rep; movsl\n"
26243+ "6: rep; "__copyuser_seg" movsl\n"
26244 " movl %%eax,%0\n"
26245- "7: rep; movsb\n"
26246+ "7: rep; "__copyuser_seg" movsb\n"
26247 "8:\n"
26248 ".section .fixup,\"ax\"\n"
26249 "9: lea 0(%%eax,%0,4),%0\n"
26250@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26251 return size;
26252 }
26253
26254-static unsigned long __copy_user_intel_nocache(void *to,
26255+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
26256 const void __user *from, unsigned long size)
26257 {
26258 int d0, d1;
26259
26260 __asm__ __volatile__(
26261 " .align 2,0x90\n"
26262- "0: movl 32(%4), %%eax\n"
26263+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26264 " cmpl $67, %0\n"
26265 " jbe 2f\n"
26266- "1: movl 64(%4), %%eax\n"
26267+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26268 " .align 2,0x90\n"
26269- "2: movl 0(%4), %%eax\n"
26270- "21: movl 4(%4), %%edx\n"
26271+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26272+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26273 " movnti %%eax, 0(%3)\n"
26274 " movnti %%edx, 4(%3)\n"
26275- "3: movl 8(%4), %%eax\n"
26276- "31: movl 12(%4),%%edx\n"
26277+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26278+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26279 " movnti %%eax, 8(%3)\n"
26280 " movnti %%edx, 12(%3)\n"
26281- "4: movl 16(%4), %%eax\n"
26282- "41: movl 20(%4), %%edx\n"
26283+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26284+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26285 " movnti %%eax, 16(%3)\n"
26286 " movnti %%edx, 20(%3)\n"
26287- "10: movl 24(%4), %%eax\n"
26288- "51: movl 28(%4), %%edx\n"
26289+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26290+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26291 " movnti %%eax, 24(%3)\n"
26292 " movnti %%edx, 28(%3)\n"
26293- "11: movl 32(%4), %%eax\n"
26294- "61: movl 36(%4), %%edx\n"
26295+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26296+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26297 " movnti %%eax, 32(%3)\n"
26298 " movnti %%edx, 36(%3)\n"
26299- "12: movl 40(%4), %%eax\n"
26300- "71: movl 44(%4), %%edx\n"
26301+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26302+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26303 " movnti %%eax, 40(%3)\n"
26304 " movnti %%edx, 44(%3)\n"
26305- "13: movl 48(%4), %%eax\n"
26306- "81: movl 52(%4), %%edx\n"
26307+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26308+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26309 " movnti %%eax, 48(%3)\n"
26310 " movnti %%edx, 52(%3)\n"
26311- "14: movl 56(%4), %%eax\n"
26312- "91: movl 60(%4), %%edx\n"
26313+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26314+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26315 " movnti %%eax, 56(%3)\n"
26316 " movnti %%edx, 60(%3)\n"
26317 " addl $-64, %0\n"
26318@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
26319 " shrl $2, %0\n"
26320 " andl $3, %%eax\n"
26321 " cld\n"
26322- "6: rep; movsl\n"
26323+ "6: rep; "__copyuser_seg" movsl\n"
26324 " movl %%eax,%0\n"
26325- "7: rep; movsb\n"
26326+ "7: rep; "__copyuser_seg" movsb\n"
26327 "8:\n"
26328 ".section .fixup,\"ax\"\n"
26329 "9: lea 0(%%eax,%0,4),%0\n"
26330@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
26331 */
26332 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
26333 unsigned long size);
26334-unsigned long __copy_user_intel(void __user *to, const void *from,
26335+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
26336+ unsigned long size);
26337+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
26338 unsigned long size);
26339 unsigned long __copy_user_zeroing_intel_nocache(void *to,
26340 const void __user *from, unsigned long size);
26341 #endif /* CONFIG_X86_INTEL_USERCOPY */
26342
26343 /* Generic arbitrary sized copy. */
26344-#define __copy_user(to, from, size) \
26345+#define __copy_user(to, from, size, prefix, set, restore) \
26346 do { \
26347 int __d0, __d1, __d2; \
26348 __asm__ __volatile__( \
26349+ set \
26350 " cmp $7,%0\n" \
26351 " jbe 1f\n" \
26352 " movl %1,%0\n" \
26353 " negl %0\n" \
26354 " andl $7,%0\n" \
26355 " subl %0,%3\n" \
26356- "4: rep; movsb\n" \
26357+ "4: rep; "prefix"movsb\n" \
26358 " movl %3,%0\n" \
26359 " shrl $2,%0\n" \
26360 " andl $3,%3\n" \
26361 " .align 2,0x90\n" \
26362- "0: rep; movsl\n" \
26363+ "0: rep; "prefix"movsl\n" \
26364 " movl %3,%0\n" \
26365- "1: rep; movsb\n" \
26366+ "1: rep; "prefix"movsb\n" \
26367 "2:\n" \
26368+ restore \
26369 ".section .fixup,\"ax\"\n" \
26370 "5: addl %3,%0\n" \
26371 " jmp 2b\n" \
26372@@ -538,14 +650,14 @@ do { \
26373 " negl %0\n" \
26374 " andl $7,%0\n" \
26375 " subl %0,%3\n" \
26376- "4: rep; movsb\n" \
26377+ "4: rep; "__copyuser_seg"movsb\n" \
26378 " movl %3,%0\n" \
26379 " shrl $2,%0\n" \
26380 " andl $3,%3\n" \
26381 " .align 2,0x90\n" \
26382- "0: rep; movsl\n" \
26383+ "0: rep; "__copyuser_seg"movsl\n" \
26384 " movl %3,%0\n" \
26385- "1: rep; movsb\n" \
26386+ "1: rep; "__copyuser_seg"movsb\n" \
26387 "2:\n" \
26388 ".section .fixup,\"ax\"\n" \
26389 "5: addl %3,%0\n" \
26390@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
26391 {
26392 stac();
26393 if (movsl_is_ok(to, from, n))
26394- __copy_user(to, from, n);
26395+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
26396 else
26397- n = __copy_user_intel(to, from, n);
26398+ n = __generic_copy_to_user_intel(to, from, n);
26399 clac();
26400 return n;
26401 }
26402@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
26403 {
26404 stac();
26405 if (movsl_is_ok(to, from, n))
26406- __copy_user(to, from, n);
26407+ __copy_user(to, from, n, __copyuser_seg, "", "");
26408 else
26409- n = __copy_user_intel((void __user *)to,
26410- (const void *)from, n);
26411+ n = __generic_copy_from_user_intel(to, from, n);
26412 clac();
26413 return n;
26414 }
26415@@ -632,66 +743,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
26416 if (n > 64 && cpu_has_xmm2)
26417 n = __copy_user_intel_nocache(to, from, n);
26418 else
26419- __copy_user(to, from, n);
26420+ __copy_user(to, from, n, __copyuser_seg, "", "");
26421 #else
26422- __copy_user(to, from, n);
26423+ __copy_user(to, from, n, __copyuser_seg, "", "");
26424 #endif
26425 clac();
26426 return n;
26427 }
26428 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
26429
26430-/**
26431- * copy_to_user: - Copy a block of data into user space.
26432- * @to: Destination address, in user space.
26433- * @from: Source address, in kernel space.
26434- * @n: Number of bytes to copy.
26435- *
26436- * Context: User context only. This function may sleep.
26437- *
26438- * Copy data from kernel space to user space.
26439- *
26440- * Returns number of bytes that could not be copied.
26441- * On success, this will be zero.
26442- */
26443-unsigned long
26444-copy_to_user(void __user *to, const void *from, unsigned long n)
26445-{
26446- if (access_ok(VERIFY_WRITE, to, n))
26447- n = __copy_to_user(to, from, n);
26448- return n;
26449-}
26450-EXPORT_SYMBOL(copy_to_user);
26451-
26452-/**
26453- * copy_from_user: - Copy a block of data from user space.
26454- * @to: Destination address, in kernel space.
26455- * @from: Source address, in user space.
26456- * @n: Number of bytes to copy.
26457- *
26458- * Context: User context only. This function may sleep.
26459- *
26460- * Copy data from user space to kernel space.
26461- *
26462- * Returns number of bytes that could not be copied.
26463- * On success, this will be zero.
26464- *
26465- * If some data could not be copied, this function will pad the copied
26466- * data to the requested size using zero bytes.
26467- */
26468-unsigned long
26469-_copy_from_user(void *to, const void __user *from, unsigned long n)
26470-{
26471- if (access_ok(VERIFY_READ, from, n))
26472- n = __copy_from_user(to, from, n);
26473- else
26474- memset(to, 0, n);
26475- return n;
26476-}
26477-EXPORT_SYMBOL(_copy_from_user);
26478-
26479 void copy_from_user_overflow(void)
26480 {
26481 WARN(1, "Buffer overflow detected!\n");
26482 }
26483 EXPORT_SYMBOL(copy_from_user_overflow);
26484+
26485+void copy_to_user_overflow(void)
26486+{
26487+ WARN(1, "Buffer overflow detected!\n");
26488+}
26489+EXPORT_SYMBOL(copy_to_user_overflow);
26490+
26491+#ifdef CONFIG_PAX_MEMORY_UDEREF
26492+void __set_fs(mm_segment_t x)
26493+{
26494+ switch (x.seg) {
26495+ case 0:
26496+ loadsegment(gs, 0);
26497+ break;
26498+ case TASK_SIZE_MAX:
26499+ loadsegment(gs, __USER_DS);
26500+ break;
26501+ case -1UL:
26502+ loadsegment(gs, __KERNEL_DS);
26503+ break;
26504+ default:
26505+ BUG();
26506+ }
26507+ return;
26508+}
26509+EXPORT_SYMBOL(__set_fs);
26510+
26511+void set_fs(mm_segment_t x)
26512+{
26513+ current_thread_info()->addr_limit = x;
26514+ __set_fs(x);
26515+}
26516+EXPORT_SYMBOL(set_fs);
26517+#endif
26518diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
26519index 05928aa..b33dea1 100644
26520--- a/arch/x86/lib/usercopy_64.c
26521+++ b/arch/x86/lib/usercopy_64.c
26522@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
26523 _ASM_EXTABLE(0b,3b)
26524 _ASM_EXTABLE(1b,2b)
26525 : [size8] "=&c"(size), [dst] "=&D" (__d0)
26526- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
26527+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
26528 [zero] "r" (0UL), [eight] "r" (8UL));
26529 clac();
26530 return size;
26531@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
26532 }
26533 EXPORT_SYMBOL(clear_user);
26534
26535-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
26536+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
26537 {
26538- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
26539- return copy_user_generic((__force void *)to, (__force void *)from, len);
26540- }
26541- return len;
26542+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
26543+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
26544+ return len;
26545 }
26546 EXPORT_SYMBOL(copy_in_user);
26547
26548@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
26549 * it is not necessary to optimize tail handling.
26550 */
26551 unsigned long
26552-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
26553+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
26554 {
26555 char c;
26556 unsigned zero_len;
26557@@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
26558 clac();
26559 return len;
26560 }
26561+
26562+void copy_from_user_overflow(void)
26563+{
26564+ WARN(1, "Buffer overflow detected!\n");
26565+}
26566+EXPORT_SYMBOL(copy_from_user_overflow);
26567+
26568+void copy_to_user_overflow(void)
26569+{
26570+ WARN(1, "Buffer overflow detected!\n");
26571+}
26572+EXPORT_SYMBOL(copy_to_user_overflow);
26573diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
26574index 903ec1e..c4166b2 100644
26575--- a/arch/x86/mm/extable.c
26576+++ b/arch/x86/mm/extable.c
26577@@ -6,12 +6,24 @@
26578 static inline unsigned long
26579 ex_insn_addr(const struct exception_table_entry *x)
26580 {
26581- return (unsigned long)&x->insn + x->insn;
26582+ unsigned long reloc = 0;
26583+
26584+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26585+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
26586+#endif
26587+
26588+ return (unsigned long)&x->insn + x->insn + reloc;
26589 }
26590 static inline unsigned long
26591 ex_fixup_addr(const struct exception_table_entry *x)
26592 {
26593- return (unsigned long)&x->fixup + x->fixup;
26594+ unsigned long reloc = 0;
26595+
26596+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26597+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
26598+#endif
26599+
26600+ return (unsigned long)&x->fixup + x->fixup + reloc;
26601 }
26602
26603 int fixup_exception(struct pt_regs *regs)
26604@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
26605 unsigned long new_ip;
26606
26607 #ifdef CONFIG_PNPBIOS
26608- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
26609+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
26610 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
26611 extern u32 pnp_bios_is_utter_crap;
26612 pnp_bios_is_utter_crap = 1;
26613@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
26614 i += 4;
26615 p->fixup -= i;
26616 i += 4;
26617+
26618+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26619+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
26620+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
26621+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
26622+#endif
26623+
26624 }
26625 }
26626
26627diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
26628index fb674fd..272f369 100644
26629--- a/arch/x86/mm/fault.c
26630+++ b/arch/x86/mm/fault.c
26631@@ -13,12 +13,19 @@
26632 #include <linux/perf_event.h> /* perf_sw_event */
26633 #include <linux/hugetlb.h> /* hstate_index_to_shift */
26634 #include <linux/prefetch.h> /* prefetchw */
26635+#include <linux/unistd.h>
26636+#include <linux/compiler.h>
26637
26638 #include <asm/traps.h> /* dotraplinkage, ... */
26639 #include <asm/pgalloc.h> /* pgd_*(), ... */
26640 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
26641 #include <asm/fixmap.h> /* VSYSCALL_START */
26642 #include <asm/context_tracking.h> /* exception_enter(), ... */
26643+#include <asm/tlbflush.h>
26644+
26645+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26646+#include <asm/stacktrace.h>
26647+#endif
26648
26649 /*
26650 * Page fault error code bits:
26651@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
26652 int ret = 0;
26653
26654 /* kprobe_running() needs smp_processor_id() */
26655- if (kprobes_built_in() && !user_mode_vm(regs)) {
26656+ if (kprobes_built_in() && !user_mode(regs)) {
26657 preempt_disable();
26658 if (kprobe_running() && kprobe_fault_handler(regs, 14))
26659 ret = 1;
26660@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
26661 return !instr_lo || (instr_lo>>1) == 1;
26662 case 0x00:
26663 /* Prefetch instruction is 0x0F0D or 0x0F18 */
26664- if (probe_kernel_address(instr, opcode))
26665+ if (user_mode(regs)) {
26666+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
26667+ return 0;
26668+ } else if (probe_kernel_address(instr, opcode))
26669 return 0;
26670
26671 *prefetch = (instr_lo == 0xF) &&
26672@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
26673 while (instr < max_instr) {
26674 unsigned char opcode;
26675
26676- if (probe_kernel_address(instr, opcode))
26677+ if (user_mode(regs)) {
26678+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
26679+ break;
26680+ } else if (probe_kernel_address(instr, opcode))
26681 break;
26682
26683 instr++;
26684@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
26685 force_sig_info(si_signo, &info, tsk);
26686 }
26687
26688+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26689+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
26690+#endif
26691+
26692+#ifdef CONFIG_PAX_EMUTRAMP
26693+static int pax_handle_fetch_fault(struct pt_regs *regs);
26694+#endif
26695+
26696+#ifdef CONFIG_PAX_PAGEEXEC
26697+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
26698+{
26699+ pgd_t *pgd;
26700+ pud_t *pud;
26701+ pmd_t *pmd;
26702+
26703+ pgd = pgd_offset(mm, address);
26704+ if (!pgd_present(*pgd))
26705+ return NULL;
26706+ pud = pud_offset(pgd, address);
26707+ if (!pud_present(*pud))
26708+ return NULL;
26709+ pmd = pmd_offset(pud, address);
26710+ if (!pmd_present(*pmd))
26711+ return NULL;
26712+ return pmd;
26713+}
26714+#endif
26715+
26716 DEFINE_SPINLOCK(pgd_lock);
26717 LIST_HEAD(pgd_list);
26718
26719@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
26720 for (address = VMALLOC_START & PMD_MASK;
26721 address >= TASK_SIZE && address < FIXADDR_TOP;
26722 address += PMD_SIZE) {
26723+
26724+#ifdef CONFIG_PAX_PER_CPU_PGD
26725+ unsigned long cpu;
26726+#else
26727 struct page *page;
26728+#endif
26729
26730 spin_lock(&pgd_lock);
26731+
26732+#ifdef CONFIG_PAX_PER_CPU_PGD
26733+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
26734+ pgd_t *pgd = get_cpu_pgd(cpu);
26735+ pmd_t *ret;
26736+#else
26737 list_for_each_entry(page, &pgd_list, lru) {
26738+ pgd_t *pgd = page_address(page);
26739 spinlock_t *pgt_lock;
26740 pmd_t *ret;
26741
26742@@ -243,8 +296,13 @@ void vmalloc_sync_all(void)
26743 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
26744
26745 spin_lock(pgt_lock);
26746- ret = vmalloc_sync_one(page_address(page), address);
26747+#endif
26748+
26749+ ret = vmalloc_sync_one(pgd, address);
26750+
26751+#ifndef CONFIG_PAX_PER_CPU_PGD
26752 spin_unlock(pgt_lock);
26753+#endif
26754
26755 if (!ret)
26756 break;
26757@@ -278,6 +336,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
26758 * an interrupt in the middle of a task switch..
26759 */
26760 pgd_paddr = read_cr3();
26761+
26762+#ifdef CONFIG_PAX_PER_CPU_PGD
26763+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
26764+#endif
26765+
26766 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
26767 if (!pmd_k)
26768 return -1;
26769@@ -373,7 +436,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
26770 * happen within a race in page table update. In the later
26771 * case just flush:
26772 */
26773+
26774+#ifdef CONFIG_PAX_PER_CPU_PGD
26775+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
26776+ pgd = pgd_offset_cpu(smp_processor_id(), address);
26777+#else
26778 pgd = pgd_offset(current->active_mm, address);
26779+#endif
26780+
26781 pgd_ref = pgd_offset_k(address);
26782 if (pgd_none(*pgd_ref))
26783 return -1;
26784@@ -541,7 +611,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
26785 static int is_errata100(struct pt_regs *regs, unsigned long address)
26786 {
26787 #ifdef CONFIG_X86_64
26788- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
26789+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
26790 return 1;
26791 #endif
26792 return 0;
26793@@ -568,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
26794 }
26795
26796 static const char nx_warning[] = KERN_CRIT
26797-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
26798+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
26799
26800 static void
26801 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
26802@@ -577,15 +647,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
26803 if (!oops_may_print())
26804 return;
26805
26806- if (error_code & PF_INSTR) {
26807+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
26808 unsigned int level;
26809
26810 pte_t *pte = lookup_address(address, &level);
26811
26812 if (pte && pte_present(*pte) && !pte_exec(*pte))
26813- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
26814+ printk(nx_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
26815 }
26816
26817+#ifdef CONFIG_PAX_KERNEXEC
26818+ if (init_mm.start_code <= address && address < init_mm.end_code) {
26819+ if (current->signal->curr_ip)
26820+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
26821+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
26822+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
26823+ else
26824+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
26825+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
26826+ }
26827+#endif
26828+
26829 printk(KERN_ALERT "BUG: unable to handle kernel ");
26830 if (address < PAGE_SIZE)
26831 printk(KERN_CONT "NULL pointer dereference");
26832@@ -748,6 +830,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
26833 return;
26834 }
26835 #endif
26836+
26837+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26838+ if (pax_is_fetch_fault(regs, error_code, address)) {
26839+
26840+#ifdef CONFIG_PAX_EMUTRAMP
26841+ switch (pax_handle_fetch_fault(regs)) {
26842+ case 2:
26843+ return;
26844+ }
26845+#endif
26846+
26847+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
26848+ do_group_exit(SIGKILL);
26849+ }
26850+#endif
26851+
26852 /* Kernel addresses are always protection faults: */
26853 if (address >= TASK_SIZE)
26854 error_code |= PF_PROT;
26855@@ -833,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
26856 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
26857 printk(KERN_ERR
26858 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
26859- tsk->comm, tsk->pid, address);
26860+ tsk->comm, task_pid_nr(tsk), address);
26861 code = BUS_MCEERR_AR;
26862 }
26863 #endif
26864@@ -896,6 +994,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
26865 return 1;
26866 }
26867
26868+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
26869+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
26870+{
26871+ pte_t *pte;
26872+ pmd_t *pmd;
26873+ spinlock_t *ptl;
26874+ unsigned char pte_mask;
26875+
26876+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
26877+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
26878+ return 0;
26879+
26880+ /* PaX: it's our fault, let's handle it if we can */
26881+
26882+ /* PaX: take a look at read faults before acquiring any locks */
26883+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
26884+ /* instruction fetch attempt from a protected page in user mode */
26885+ up_read(&mm->mmap_sem);
26886+
26887+#ifdef CONFIG_PAX_EMUTRAMP
26888+ switch (pax_handle_fetch_fault(regs)) {
26889+ case 2:
26890+ return 1;
26891+ }
26892+#endif
26893+
26894+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
26895+ do_group_exit(SIGKILL);
26896+ }
26897+
26898+ pmd = pax_get_pmd(mm, address);
26899+ if (unlikely(!pmd))
26900+ return 0;
26901+
26902+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
26903+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
26904+ pte_unmap_unlock(pte, ptl);
26905+ return 0;
26906+ }
26907+
26908+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
26909+ /* write attempt to a protected page in user mode */
26910+ pte_unmap_unlock(pte, ptl);
26911+ return 0;
26912+ }
26913+
26914+#ifdef CONFIG_SMP
26915+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
26916+#else
26917+ if (likely(address > get_limit(regs->cs)))
26918+#endif
26919+ {
26920+ set_pte(pte, pte_mkread(*pte));
26921+ __flush_tlb_one(address);
26922+ pte_unmap_unlock(pte, ptl);
26923+ up_read(&mm->mmap_sem);
26924+ return 1;
26925+ }
26926+
26927+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
26928+
26929+ /*
26930+ * PaX: fill DTLB with user rights and retry
26931+ */
26932+ __asm__ __volatile__ (
26933+ "orb %2,(%1)\n"
26934+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
26935+/*
26936+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
26937+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
26938+ * page fault when examined during a TLB load attempt. this is true not only
26939+ * for PTEs holding a non-present entry but also present entries that will
26940+ * raise a page fault (such as those set up by PaX, or the copy-on-write
26941+ * mechanism). in effect it means that we do *not* need to flush the TLBs
26942+ * for our target pages since their PTEs are simply not in the TLBs at all.
26943+
26944+ * the best thing in omitting it is that we gain around 15-20% speed in the
26945+ * fast path of the page fault handler and can get rid of tracing since we
26946+ * can no longer flush unintended entries.
26947+ */
26948+ "invlpg (%0)\n"
26949+#endif
26950+ __copyuser_seg"testb $0,(%0)\n"
26951+ "xorb %3,(%1)\n"
26952+ :
26953+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
26954+ : "memory", "cc");
26955+ pte_unmap_unlock(pte, ptl);
26956+ up_read(&mm->mmap_sem);
26957+ return 1;
26958+}
26959+#endif
26960+
26961 /*
26962 * Handle a spurious fault caused by a stale TLB entry.
26963 *
26964@@ -968,6 +1159,9 @@ int show_unhandled_signals = 1;
26965 static inline int
26966 access_error(unsigned long error_code, struct vm_area_struct *vma)
26967 {
26968+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
26969+ return 1;
26970+
26971 if (error_code & PF_WRITE) {
26972 /* write, present and write, not present: */
26973 if (unlikely(!(vma->vm_flags & VM_WRITE)))
26974@@ -996,7 +1190,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
26975 if (error_code & PF_USER)
26976 return false;
26977
26978- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
26979+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
26980 return false;
26981
26982 return true;
26983@@ -1012,18 +1206,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
26984 {
26985 struct vm_area_struct *vma;
26986 struct task_struct *tsk;
26987- unsigned long address;
26988 struct mm_struct *mm;
26989 int fault;
26990 int write = error_code & PF_WRITE;
26991 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
26992 (write ? FAULT_FLAG_WRITE : 0);
26993
26994- tsk = current;
26995- mm = tsk->mm;
26996-
26997 /* Get the faulting address: */
26998- address = read_cr2();
26999+ unsigned long address = read_cr2();
27000+
27001+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27002+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
27003+ if (!search_exception_tables(regs->ip)) {
27004+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27005+ bad_area_nosemaphore(regs, error_code, address);
27006+ return;
27007+ }
27008+ if (address < PAX_USER_SHADOW_BASE) {
27009+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27010+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
27011+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
27012+ } else
27013+ address -= PAX_USER_SHADOW_BASE;
27014+ }
27015+#endif
27016+
27017+ tsk = current;
27018+ mm = tsk->mm;
27019
27020 /*
27021 * Detect and handle instructions that would cause a page fault for
27022@@ -1084,7 +1293,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27023 * User-mode registers count as a user access even for any
27024 * potential system fault or CPU buglet:
27025 */
27026- if (user_mode_vm(regs)) {
27027+ if (user_mode(regs)) {
27028 local_irq_enable();
27029 error_code |= PF_USER;
27030 } else {
27031@@ -1146,6 +1355,11 @@ retry:
27032 might_sleep();
27033 }
27034
27035+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27036+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
27037+ return;
27038+#endif
27039+
27040 vma = find_vma(mm, address);
27041 if (unlikely(!vma)) {
27042 bad_area(regs, error_code, address);
27043@@ -1157,18 +1371,24 @@ retry:
27044 bad_area(regs, error_code, address);
27045 return;
27046 }
27047- if (error_code & PF_USER) {
27048- /*
27049- * Accessing the stack below %sp is always a bug.
27050- * The large cushion allows instructions like enter
27051- * and pusha to work. ("enter $65535, $31" pushes
27052- * 32 pointers and then decrements %sp by 65535.)
27053- */
27054- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
27055- bad_area(regs, error_code, address);
27056- return;
27057- }
27058+ /*
27059+ * Accessing the stack below %sp is always a bug.
27060+ * The large cushion allows instructions like enter
27061+ * and pusha to work. ("enter $65535, $31" pushes
27062+ * 32 pointers and then decrements %sp by 65535.)
27063+ */
27064+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
27065+ bad_area(regs, error_code, address);
27066+ return;
27067 }
27068+
27069+#ifdef CONFIG_PAX_SEGMEXEC
27070+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
27071+ bad_area(regs, error_code, address);
27072+ return;
27073+ }
27074+#endif
27075+
27076 if (unlikely(expand_stack(vma, address))) {
27077 bad_area(regs, error_code, address);
27078 return;
27079@@ -1232,3 +1452,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
27080 __do_page_fault(regs, error_code);
27081 exception_exit(regs);
27082 }
27083+
27084+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27085+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
27086+{
27087+ struct mm_struct *mm = current->mm;
27088+ unsigned long ip = regs->ip;
27089+
27090+ if (v8086_mode(regs))
27091+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
27092+
27093+#ifdef CONFIG_PAX_PAGEEXEC
27094+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
27095+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
27096+ return true;
27097+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
27098+ return true;
27099+ return false;
27100+ }
27101+#endif
27102+
27103+#ifdef CONFIG_PAX_SEGMEXEC
27104+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
27105+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
27106+ return true;
27107+ return false;
27108+ }
27109+#endif
27110+
27111+ return false;
27112+}
27113+#endif
27114+
27115+#ifdef CONFIG_PAX_EMUTRAMP
27116+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
27117+{
27118+ int err;
27119+
27120+ do { /* PaX: libffi trampoline emulation */
27121+ unsigned char mov, jmp;
27122+ unsigned int addr1, addr2;
27123+
27124+#ifdef CONFIG_X86_64
27125+ if ((regs->ip + 9) >> 32)
27126+ break;
27127+#endif
27128+
27129+ err = get_user(mov, (unsigned char __user *)regs->ip);
27130+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27131+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27132+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27133+
27134+ if (err)
27135+ break;
27136+
27137+ if (mov == 0xB8 && jmp == 0xE9) {
27138+ regs->ax = addr1;
27139+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27140+ return 2;
27141+ }
27142+ } while (0);
27143+
27144+ do { /* PaX: gcc trampoline emulation #1 */
27145+ unsigned char mov1, mov2;
27146+ unsigned short jmp;
27147+ unsigned int addr1, addr2;
27148+
27149+#ifdef CONFIG_X86_64
27150+ if ((regs->ip + 11) >> 32)
27151+ break;
27152+#endif
27153+
27154+ err = get_user(mov1, (unsigned char __user *)regs->ip);
27155+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27156+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
27157+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27158+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
27159+
27160+ if (err)
27161+ break;
27162+
27163+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
27164+ regs->cx = addr1;
27165+ regs->ax = addr2;
27166+ regs->ip = addr2;
27167+ return 2;
27168+ }
27169+ } while (0);
27170+
27171+ do { /* PaX: gcc trampoline emulation #2 */
27172+ unsigned char mov, jmp;
27173+ unsigned int addr1, addr2;
27174+
27175+#ifdef CONFIG_X86_64
27176+ if ((regs->ip + 9) >> 32)
27177+ break;
27178+#endif
27179+
27180+ err = get_user(mov, (unsigned char __user *)regs->ip);
27181+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27182+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27183+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27184+
27185+ if (err)
27186+ break;
27187+
27188+ if (mov == 0xB9 && jmp == 0xE9) {
27189+ regs->cx = addr1;
27190+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27191+ return 2;
27192+ }
27193+ } while (0);
27194+
27195+ return 1; /* PaX in action */
27196+}
27197+
27198+#ifdef CONFIG_X86_64
27199+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
27200+{
27201+ int err;
27202+
27203+ do { /* PaX: libffi trampoline emulation */
27204+ unsigned short mov1, mov2, jmp1;
27205+ unsigned char stcclc, jmp2;
27206+ unsigned long addr1, addr2;
27207+
27208+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27209+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27210+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27211+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27212+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
27213+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
27214+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
27215+
27216+ if (err)
27217+ break;
27218+
27219+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27220+ regs->r11 = addr1;
27221+ regs->r10 = addr2;
27222+ if (stcclc == 0xF8)
27223+ regs->flags &= ~X86_EFLAGS_CF;
27224+ else
27225+ regs->flags |= X86_EFLAGS_CF;
27226+ regs->ip = addr1;
27227+ return 2;
27228+ }
27229+ } while (0);
27230+
27231+ do { /* PaX: gcc trampoline emulation #1 */
27232+ unsigned short mov1, mov2, jmp1;
27233+ unsigned char jmp2;
27234+ unsigned int addr1;
27235+ unsigned long addr2;
27236+
27237+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27238+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
27239+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
27240+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
27241+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
27242+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
27243+
27244+ if (err)
27245+ break;
27246+
27247+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27248+ regs->r11 = addr1;
27249+ regs->r10 = addr2;
27250+ regs->ip = addr1;
27251+ return 2;
27252+ }
27253+ } while (0);
27254+
27255+ do { /* PaX: gcc trampoline emulation #2 */
27256+ unsigned short mov1, mov2, jmp1;
27257+ unsigned char jmp2;
27258+ unsigned long addr1, addr2;
27259+
27260+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27261+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27262+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27263+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27264+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
27265+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
27266+
27267+ if (err)
27268+ break;
27269+
27270+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27271+ regs->r11 = addr1;
27272+ regs->r10 = addr2;
27273+ regs->ip = addr1;
27274+ return 2;
27275+ }
27276+ } while (0);
27277+
27278+ return 1; /* PaX in action */
27279+}
27280+#endif
27281+
27282+/*
27283+ * PaX: decide what to do with offenders (regs->ip = fault address)
27284+ *
27285+ * returns 1 when task should be killed
27286+ * 2 when gcc trampoline was detected
27287+ */
27288+static int pax_handle_fetch_fault(struct pt_regs *regs)
27289+{
27290+ if (v8086_mode(regs))
27291+ return 1;
27292+
27293+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
27294+ return 1;
27295+
27296+#ifdef CONFIG_X86_32
27297+ return pax_handle_fetch_fault_32(regs);
27298+#else
27299+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
27300+ return pax_handle_fetch_fault_32(regs);
27301+ else
27302+ return pax_handle_fetch_fault_64(regs);
27303+#endif
27304+}
27305+#endif
27306+
27307+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27308+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
27309+{
27310+ long i;
27311+
27312+ printk(KERN_ERR "PAX: bytes at PC: ");
27313+ for (i = 0; i < 20; i++) {
27314+ unsigned char c;
27315+ if (get_user(c, (unsigned char __force_user *)pc+i))
27316+ printk(KERN_CONT "?? ");
27317+ else
27318+ printk(KERN_CONT "%02x ", c);
27319+ }
27320+ printk("\n");
27321+
27322+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
27323+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
27324+ unsigned long c;
27325+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
27326+#ifdef CONFIG_X86_32
27327+ printk(KERN_CONT "???????? ");
27328+#else
27329+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
27330+ printk(KERN_CONT "???????? ???????? ");
27331+ else
27332+ printk(KERN_CONT "???????????????? ");
27333+#endif
27334+ } else {
27335+#ifdef CONFIG_X86_64
27336+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
27337+ printk(KERN_CONT "%08x ", (unsigned int)c);
27338+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
27339+ } else
27340+#endif
27341+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
27342+ }
27343+ }
27344+ printk("\n");
27345+}
27346+#endif
27347+
27348+/**
27349+ * probe_kernel_write(): safely attempt to write to a location
27350+ * @dst: address to write to
27351+ * @src: pointer to the data that shall be written
27352+ * @size: size of the data chunk
27353+ *
27354+ * Safely write to address @dst from the buffer at @src. If a kernel fault
27355+ * happens, handle that and return -EFAULT.
27356+ */
27357+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
27358+{
27359+ long ret;
27360+ mm_segment_t old_fs = get_fs();
27361+
27362+ set_fs(KERNEL_DS);
27363+ pagefault_disable();
27364+ pax_open_kernel();
27365+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
27366+ pax_close_kernel();
27367+ pagefault_enable();
27368+ set_fs(old_fs);
27369+
27370+ return ret ? -EFAULT : 0;
27371+}
27372diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
27373index dd74e46..7d26398 100644
27374--- a/arch/x86/mm/gup.c
27375+++ b/arch/x86/mm/gup.c
27376@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
27377 addr = start;
27378 len = (unsigned long) nr_pages << PAGE_SHIFT;
27379 end = start + len;
27380- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27381+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27382 (void __user *)start, len)))
27383 return 0;
27384
27385diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
27386index 6f31ee5..8ee4164 100644
27387--- a/arch/x86/mm/highmem_32.c
27388+++ b/arch/x86/mm/highmem_32.c
27389@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
27390 idx = type + KM_TYPE_NR*smp_processor_id();
27391 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
27392 BUG_ON(!pte_none(*(kmap_pte-idx)));
27393+
27394+ pax_open_kernel();
27395 set_pte(kmap_pte-idx, mk_pte(page, prot));
27396+ pax_close_kernel();
27397+
27398 arch_flush_lazy_mmu_mode();
27399
27400 return (void *)vaddr;
27401diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
27402index ae1aa71..56316db 100644
27403--- a/arch/x86/mm/hugetlbpage.c
27404+++ b/arch/x86/mm/hugetlbpage.c
27405@@ -279,6 +279,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
27406 info.flags = 0;
27407 info.length = len;
27408 info.low_limit = TASK_UNMAPPED_BASE;
27409+
27410+#ifdef CONFIG_PAX_RANDMMAP
27411+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
27412+ info.low_limit += current->mm->delta_mmap;
27413+#endif
27414+
27415 info.high_limit = TASK_SIZE;
27416 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
27417 info.align_offset = 0;
27418@@ -311,6 +317,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
27419 VM_BUG_ON(addr != -ENOMEM);
27420 info.flags = 0;
27421 info.low_limit = TASK_UNMAPPED_BASE;
27422+
27423+#ifdef CONFIG_PAX_RANDMMAP
27424+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
27425+ info.low_limit += current->mm->delta_mmap;
27426+#endif
27427+
27428 info.high_limit = TASK_SIZE;
27429 addr = vm_unmapped_area(&info);
27430 }
27431@@ -325,10 +337,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
27432 struct hstate *h = hstate_file(file);
27433 struct mm_struct *mm = current->mm;
27434 struct vm_area_struct *vma;
27435+ unsigned long pax_task_size = TASK_SIZE;
27436+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
27437
27438 if (len & ~huge_page_mask(h))
27439 return -EINVAL;
27440- if (len > TASK_SIZE)
27441+
27442+#ifdef CONFIG_PAX_SEGMEXEC
27443+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27444+ pax_task_size = SEGMEXEC_TASK_SIZE;
27445+#endif
27446+
27447+ pax_task_size -= PAGE_SIZE;
27448+
27449+ if (len > pax_task_size)
27450 return -ENOMEM;
27451
27452 if (flags & MAP_FIXED) {
27453@@ -337,11 +359,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
27454 return addr;
27455 }
27456
27457+#ifdef CONFIG_PAX_RANDMMAP
27458+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27459+#endif
27460+
27461 if (addr) {
27462 addr = ALIGN(addr, huge_page_size(h));
27463 vma = find_vma(mm, addr);
27464- if (TASK_SIZE - len >= addr &&
27465- (!vma || addr + len <= vma->vm_start))
27466+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27467 return addr;
27468 }
27469 if (mm->get_unmapped_area == arch_get_unmapped_area)
27470diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
27471index d7aea41..0fc945b 100644
27472--- a/arch/x86/mm/init.c
27473+++ b/arch/x86/mm/init.c
27474@@ -4,6 +4,7 @@
27475 #include <linux/swap.h>
27476 #include <linux/memblock.h>
27477 #include <linux/bootmem.h> /* for max_low_pfn */
27478+#include <linux/tboot.h>
27479
27480 #include <asm/cacheflush.h>
27481 #include <asm/e820.h>
27482@@ -16,6 +17,8 @@
27483 #include <asm/tlb.h>
27484 #include <asm/proto.h>
27485 #include <asm/dma.h> /* for MAX_DMA_PFN */
27486+#include <asm/desc.h>
27487+#include <asm/bios_ebda.h>
27488
27489 unsigned long __initdata pgt_buf_start;
27490 unsigned long __meminitdata pgt_buf_end;
27491@@ -44,7 +47,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
27492 {
27493 int i;
27494 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
27495- unsigned long start = 0, good_end;
27496+ unsigned long start = 0x100000, good_end;
27497 phys_addr_t base;
27498
27499 for (i = 0; i < nr_range; i++) {
27500@@ -321,10 +324,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
27501 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
27502 * mmio resources as well as potential bios/acpi data regions.
27503 */
27504+
27505+#ifdef CONFIG_GRKERNSEC_KMEM
27506+static unsigned int ebda_start __read_only;
27507+static unsigned int ebda_end __read_only;
27508+#endif
27509+
27510 int devmem_is_allowed(unsigned long pagenr)
27511 {
27512- if (pagenr < 256)
27513+#ifdef CONFIG_GRKERNSEC_KMEM
27514+ /* allow BDA */
27515+ if (!pagenr)
27516 return 1;
27517+ /* allow EBDA */
27518+ if (pagenr >= ebda_start && pagenr < ebda_end)
27519+ return 1;
27520+ /* if tboot is in use, allow access to its hardcoded serial log range */
27521+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
27522+ return 1;
27523+#else
27524+ if (!pagenr)
27525+ return 1;
27526+#ifdef CONFIG_VM86
27527+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
27528+ return 1;
27529+#endif
27530+#endif
27531+
27532+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
27533+ return 1;
27534+#ifdef CONFIG_GRKERNSEC_KMEM
27535+ /* throw out everything else below 1MB */
27536+ if (pagenr <= 256)
27537+ return 0;
27538+#endif
27539 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
27540 return 0;
27541 if (!page_is_ram(pagenr))
27542@@ -381,8 +414,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
27543 #endif
27544 }
27545
27546+#ifdef CONFIG_GRKERNSEC_KMEM
27547+static inline void gr_init_ebda(void)
27548+{
27549+ unsigned int ebda_addr;
27550+ unsigned int ebda_size = 0;
27551+
27552+ ebda_addr = get_bios_ebda();
27553+ if (ebda_addr) {
27554+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
27555+ ebda_size <<= 10;
27556+ }
27557+ if (ebda_addr && ebda_size) {
27558+ ebda_start = ebda_addr >> PAGE_SHIFT;
27559+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
27560+ } else {
27561+ ebda_start = 0x9f000 >> PAGE_SHIFT;
27562+ ebda_end = 0xa0000 >> PAGE_SHIFT;
27563+ }
27564+}
27565+#else
27566+static inline void gr_init_ebda(void) { }
27567+#endif
27568+
27569 void free_initmem(void)
27570 {
27571+#ifdef CONFIG_PAX_KERNEXEC
27572+#ifdef CONFIG_X86_32
27573+ /* PaX: limit KERNEL_CS to actual size */
27574+ unsigned long addr, limit;
27575+ struct desc_struct d;
27576+ int cpu;
27577+#else
27578+ pgd_t *pgd;
27579+ pud_t *pud;
27580+ pmd_t *pmd;
27581+ unsigned long addr, end;
27582+#endif
27583+#endif
27584+
27585+ gr_init_ebda();
27586+
27587+#ifdef CONFIG_PAX_KERNEXEC
27588+#ifdef CONFIG_X86_32
27589+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
27590+ limit = (limit - 1UL) >> PAGE_SHIFT;
27591+
27592+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
27593+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
27594+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
27595+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
27596+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
27597+ }
27598+
27599+ /* PaX: make KERNEL_CS read-only */
27600+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
27601+ if (!paravirt_enabled())
27602+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
27603+/*
27604+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
27605+ pgd = pgd_offset_k(addr);
27606+ pud = pud_offset(pgd, addr);
27607+ pmd = pmd_offset(pud, addr);
27608+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
27609+ }
27610+*/
27611+#ifdef CONFIG_X86_PAE
27612+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
27613+/*
27614+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
27615+ pgd = pgd_offset_k(addr);
27616+ pud = pud_offset(pgd, addr);
27617+ pmd = pmd_offset(pud, addr);
27618+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
27619+ }
27620+*/
27621+#endif
27622+
27623+#ifdef CONFIG_MODULES
27624+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
27625+#endif
27626+
27627+#else
27628+ /* PaX: make kernel code/rodata read-only, rest non-executable */
27629+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
27630+ pgd = pgd_offset_k(addr);
27631+ pud = pud_offset(pgd, addr);
27632+ pmd = pmd_offset(pud, addr);
27633+ if (!pmd_present(*pmd))
27634+ continue;
27635+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
27636+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
27637+ else
27638+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
27639+ }
27640+
27641+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
27642+ end = addr + KERNEL_IMAGE_SIZE;
27643+ for (; addr < end; addr += PMD_SIZE) {
27644+ pgd = pgd_offset_k(addr);
27645+ pud = pud_offset(pgd, addr);
27646+ pmd = pmd_offset(pud, addr);
27647+ if (!pmd_present(*pmd))
27648+ continue;
27649+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
27650+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
27651+ }
27652+#endif
27653+
27654+ flush_tlb_all();
27655+#endif
27656+
27657 free_init_pages("unused kernel memory",
27658 (unsigned long)(&__init_begin),
27659 (unsigned long)(&__init_end));
27660diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
27661index 745d66b..56bf568 100644
27662--- a/arch/x86/mm/init_32.c
27663+++ b/arch/x86/mm/init_32.c
27664@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
27665 }
27666
27667 /*
27668- * Creates a middle page table and puts a pointer to it in the
27669- * given global directory entry. This only returns the gd entry
27670- * in non-PAE compilation mode, since the middle layer is folded.
27671- */
27672-static pmd_t * __init one_md_table_init(pgd_t *pgd)
27673-{
27674- pud_t *pud;
27675- pmd_t *pmd_table;
27676-
27677-#ifdef CONFIG_X86_PAE
27678- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
27679- if (after_bootmem)
27680- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
27681- else
27682- pmd_table = (pmd_t *)alloc_low_page();
27683- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
27684- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
27685- pud = pud_offset(pgd, 0);
27686- BUG_ON(pmd_table != pmd_offset(pud, 0));
27687-
27688- return pmd_table;
27689- }
27690-#endif
27691- pud = pud_offset(pgd, 0);
27692- pmd_table = pmd_offset(pud, 0);
27693-
27694- return pmd_table;
27695-}
27696-
27697-/*
27698 * Create a page table and place a pointer to it in a middle page
27699 * directory entry:
27700 */
27701@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
27702 page_table = (pte_t *)alloc_low_page();
27703
27704 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
27705+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27706+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
27707+#else
27708 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
27709+#endif
27710 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
27711 }
27712
27713 return pte_offset_kernel(pmd, 0);
27714 }
27715
27716+static pmd_t * __init one_md_table_init(pgd_t *pgd)
27717+{
27718+ pud_t *pud;
27719+ pmd_t *pmd_table;
27720+
27721+ pud = pud_offset(pgd, 0);
27722+ pmd_table = pmd_offset(pud, 0);
27723+
27724+ return pmd_table;
27725+}
27726+
27727 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
27728 {
27729 int pgd_idx = pgd_index(vaddr);
27730@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
27731 int pgd_idx, pmd_idx;
27732 unsigned long vaddr;
27733 pgd_t *pgd;
27734+ pud_t *pud;
27735 pmd_t *pmd;
27736 pte_t *pte = NULL;
27737
27738@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
27739 pgd = pgd_base + pgd_idx;
27740
27741 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
27742- pmd = one_md_table_init(pgd);
27743- pmd = pmd + pmd_index(vaddr);
27744+ pud = pud_offset(pgd, vaddr);
27745+ pmd = pmd_offset(pud, vaddr);
27746+
27747+#ifdef CONFIG_X86_PAE
27748+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
27749+#endif
27750+
27751 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
27752 pmd++, pmd_idx++) {
27753 pte = page_table_kmap_check(one_page_table_init(pmd),
27754@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
27755 }
27756 }
27757
27758-static inline int is_kernel_text(unsigned long addr)
27759+static inline int is_kernel_text(unsigned long start, unsigned long end)
27760 {
27761- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
27762- return 1;
27763- return 0;
27764+ if ((start > ktla_ktva((unsigned long)_etext) ||
27765+ end <= ktla_ktva((unsigned long)_stext)) &&
27766+ (start > ktla_ktva((unsigned long)_einittext) ||
27767+ end <= ktla_ktva((unsigned long)_sinittext)) &&
27768+
27769+#ifdef CONFIG_ACPI_SLEEP
27770+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
27771+#endif
27772+
27773+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
27774+ return 0;
27775+ return 1;
27776 }
27777
27778 /*
27779@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
27780 unsigned long last_map_addr = end;
27781 unsigned long start_pfn, end_pfn;
27782 pgd_t *pgd_base = swapper_pg_dir;
27783- int pgd_idx, pmd_idx, pte_ofs;
27784+ unsigned int pgd_idx, pmd_idx, pte_ofs;
27785 unsigned long pfn;
27786 pgd_t *pgd;
27787+ pud_t *pud;
27788 pmd_t *pmd;
27789 pte_t *pte;
27790 unsigned pages_2m, pages_4k;
27791@@ -280,8 +281,13 @@ repeat:
27792 pfn = start_pfn;
27793 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
27794 pgd = pgd_base + pgd_idx;
27795- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
27796- pmd = one_md_table_init(pgd);
27797+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
27798+ pud = pud_offset(pgd, 0);
27799+ pmd = pmd_offset(pud, 0);
27800+
27801+#ifdef CONFIG_X86_PAE
27802+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
27803+#endif
27804
27805 if (pfn >= end_pfn)
27806 continue;
27807@@ -293,14 +299,13 @@ repeat:
27808 #endif
27809 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
27810 pmd++, pmd_idx++) {
27811- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
27812+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
27813
27814 /*
27815 * Map with big pages if possible, otherwise
27816 * create normal page tables:
27817 */
27818 if (use_pse) {
27819- unsigned int addr2;
27820 pgprot_t prot = PAGE_KERNEL_LARGE;
27821 /*
27822 * first pass will use the same initial
27823@@ -310,11 +315,7 @@ repeat:
27824 __pgprot(PTE_IDENT_ATTR |
27825 _PAGE_PSE);
27826
27827- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
27828- PAGE_OFFSET + PAGE_SIZE-1;
27829-
27830- if (is_kernel_text(addr) ||
27831- is_kernel_text(addr2))
27832+ if (is_kernel_text(address, address + PMD_SIZE))
27833 prot = PAGE_KERNEL_LARGE_EXEC;
27834
27835 pages_2m++;
27836@@ -331,7 +332,7 @@ repeat:
27837 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
27838 pte += pte_ofs;
27839 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
27840- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
27841+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
27842 pgprot_t prot = PAGE_KERNEL;
27843 /*
27844 * first pass will use the same initial
27845@@ -339,7 +340,7 @@ repeat:
27846 */
27847 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
27848
27849- if (is_kernel_text(addr))
27850+ if (is_kernel_text(address, address + PAGE_SIZE))
27851 prot = PAGE_KERNEL_EXEC;
27852
27853 pages_4k++;
27854@@ -465,7 +466,7 @@ void __init native_pagetable_init(void)
27855
27856 pud = pud_offset(pgd, va);
27857 pmd = pmd_offset(pud, va);
27858- if (!pmd_present(*pmd))
27859+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
27860 break;
27861
27862 pte = pte_offset_kernel(pmd, va);
27863@@ -514,12 +515,10 @@ void __init early_ioremap_page_table_range_init(void)
27864
27865 static void __init pagetable_init(void)
27866 {
27867- pgd_t *pgd_base = swapper_pg_dir;
27868-
27869- permanent_kmaps_init(pgd_base);
27870+ permanent_kmaps_init(swapper_pg_dir);
27871 }
27872
27873-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
27874+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
27875 EXPORT_SYMBOL_GPL(__supported_pte_mask);
27876
27877 /* user-defined highmem size */
27878@@ -728,6 +727,12 @@ void __init mem_init(void)
27879
27880 pci_iommu_alloc();
27881
27882+#ifdef CONFIG_PAX_PER_CPU_PGD
27883+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
27884+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27885+ KERNEL_PGD_PTRS);
27886+#endif
27887+
27888 #ifdef CONFIG_FLATMEM
27889 BUG_ON(!mem_map);
27890 #endif
27891@@ -754,7 +759,7 @@ void __init mem_init(void)
27892 reservedpages++;
27893
27894 codesize = (unsigned long) &_etext - (unsigned long) &_text;
27895- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
27896+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
27897 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
27898
27899 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
27900@@ -795,10 +800,10 @@ void __init mem_init(void)
27901 ((unsigned long)&__init_end -
27902 (unsigned long)&__init_begin) >> 10,
27903
27904- (unsigned long)&_etext, (unsigned long)&_edata,
27905- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
27906+ (unsigned long)&_sdata, (unsigned long)&_edata,
27907+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
27908
27909- (unsigned long)&_text, (unsigned long)&_etext,
27910+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
27911 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
27912
27913 /*
27914@@ -876,6 +881,7 @@ void set_kernel_text_rw(void)
27915 if (!kernel_set_to_readonly)
27916 return;
27917
27918+ start = ktla_ktva(start);
27919 pr_debug("Set kernel text: %lx - %lx for read write\n",
27920 start, start+size);
27921
27922@@ -890,6 +896,7 @@ void set_kernel_text_ro(void)
27923 if (!kernel_set_to_readonly)
27924 return;
27925
27926+ start = ktla_ktva(start);
27927 pr_debug("Set kernel text: %lx - %lx for read only\n",
27928 start, start+size);
27929
27930@@ -918,6 +925,7 @@ void mark_rodata_ro(void)
27931 unsigned long start = PFN_ALIGN(_text);
27932 unsigned long size = PFN_ALIGN(_etext) - start;
27933
27934+ start = ktla_ktva(start);
27935 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
27936 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
27937 size >> 10);
27938diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
27939index 75c9a6a..498d677 100644
27940--- a/arch/x86/mm/init_64.c
27941+++ b/arch/x86/mm/init_64.c
27942@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
27943 * around without checking the pgd every time.
27944 */
27945
27946-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
27947+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
27948 EXPORT_SYMBOL_GPL(__supported_pte_mask);
27949
27950 int force_personality32;
27951@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
27952
27953 for (address = start; address <= end; address += PGDIR_SIZE) {
27954 const pgd_t *pgd_ref = pgd_offset_k(address);
27955+
27956+#ifdef CONFIG_PAX_PER_CPU_PGD
27957+ unsigned long cpu;
27958+#else
27959 struct page *page;
27960+#endif
27961
27962 if (pgd_none(*pgd_ref))
27963 continue;
27964
27965 spin_lock(&pgd_lock);
27966+
27967+#ifdef CONFIG_PAX_PER_CPU_PGD
27968+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27969+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
27970+#else
27971 list_for_each_entry(page, &pgd_list, lru) {
27972 pgd_t *pgd;
27973 spinlock_t *pgt_lock;
27974@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
27975 /* the pgt_lock only for Xen */
27976 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
27977 spin_lock(pgt_lock);
27978+#endif
27979
27980 if (pgd_none(*pgd))
27981 set_pgd(pgd, *pgd_ref);
27982@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
27983 BUG_ON(pgd_page_vaddr(*pgd)
27984 != pgd_page_vaddr(*pgd_ref));
27985
27986+#ifndef CONFIG_PAX_PER_CPU_PGD
27987 spin_unlock(pgt_lock);
27988+#endif
27989+
27990 }
27991 spin_unlock(&pgd_lock);
27992 }
27993@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
27994 {
27995 if (pgd_none(*pgd)) {
27996 pud_t *pud = (pud_t *)spp_getpage();
27997- pgd_populate(&init_mm, pgd, pud);
27998+ pgd_populate_kernel(&init_mm, pgd, pud);
27999 if (pud != pud_offset(pgd, 0))
28000 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
28001 pud, pud_offset(pgd, 0));
28002@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
28003 {
28004 if (pud_none(*pud)) {
28005 pmd_t *pmd = (pmd_t *) spp_getpage();
28006- pud_populate(&init_mm, pud, pmd);
28007+ pud_populate_kernel(&init_mm, pud, pmd);
28008 if (pmd != pmd_offset(pud, 0))
28009 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
28010 pmd, pmd_offset(pud, 0));
28011@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
28012 pmd = fill_pmd(pud, vaddr);
28013 pte = fill_pte(pmd, vaddr);
28014
28015+ pax_open_kernel();
28016 set_pte(pte, new_pte);
28017+ pax_close_kernel();
28018
28019 /*
28020 * It's enough to flush this one mapping.
28021@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
28022 pgd = pgd_offset_k((unsigned long)__va(phys));
28023 if (pgd_none(*pgd)) {
28024 pud = (pud_t *) spp_getpage();
28025- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
28026- _PAGE_USER));
28027+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
28028 }
28029 pud = pud_offset(pgd, (unsigned long)__va(phys));
28030 if (pud_none(*pud)) {
28031 pmd = (pmd_t *) spp_getpage();
28032- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
28033- _PAGE_USER));
28034+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
28035 }
28036 pmd = pmd_offset(pud, phys);
28037 BUG_ON(!pmd_none(*pmd));
28038@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
28039 if (pfn >= pgt_buf_top)
28040 panic("alloc_low_page: ran out of memory");
28041
28042- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
28043+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
28044 clear_page(adr);
28045 *phys = pfn * PAGE_SIZE;
28046 return adr;
28047@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
28048
28049 phys = __pa(virt);
28050 left = phys & (PAGE_SIZE - 1);
28051- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
28052+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
28053 adr = (void *)(((unsigned long)adr) | left);
28054
28055 return adr;
28056@@ -553,7 +567,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
28057 unmap_low_page(pmd);
28058
28059 spin_lock(&init_mm.page_table_lock);
28060- pud_populate(&init_mm, pud, __va(pmd_phys));
28061+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
28062 spin_unlock(&init_mm.page_table_lock);
28063 }
28064 __flush_tlb_all();
28065@@ -599,7 +613,7 @@ kernel_physical_mapping_init(unsigned long start,
28066 unmap_low_page(pud);
28067
28068 spin_lock(&init_mm.page_table_lock);
28069- pgd_populate(&init_mm, pgd, __va(pud_phys));
28070+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
28071 spin_unlock(&init_mm.page_table_lock);
28072 pgd_changed = true;
28073 }
28074@@ -693,6 +707,12 @@ void __init mem_init(void)
28075
28076 pci_iommu_alloc();
28077
28078+#ifdef CONFIG_PAX_PER_CPU_PGD
28079+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28080+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28081+ KERNEL_PGD_PTRS);
28082+#endif
28083+
28084 /* clear_bss() already clear the empty_zero_page */
28085
28086 reservedpages = 0;
28087@@ -856,8 +876,8 @@ int kern_addr_valid(unsigned long addr)
28088 static struct vm_area_struct gate_vma = {
28089 .vm_start = VSYSCALL_START,
28090 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
28091- .vm_page_prot = PAGE_READONLY_EXEC,
28092- .vm_flags = VM_READ | VM_EXEC
28093+ .vm_page_prot = PAGE_READONLY,
28094+ .vm_flags = VM_READ
28095 };
28096
28097 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
28098@@ -891,7 +911,7 @@ int in_gate_area_no_mm(unsigned long addr)
28099
28100 const char *arch_vma_name(struct vm_area_struct *vma)
28101 {
28102- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28103+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28104 return "[vdso]";
28105 if (vma == &gate_vma)
28106 return "[vsyscall]";
28107diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
28108index 7b179b4..6bd1777 100644
28109--- a/arch/x86/mm/iomap_32.c
28110+++ b/arch/x86/mm/iomap_32.c
28111@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
28112 type = kmap_atomic_idx_push();
28113 idx = type + KM_TYPE_NR * smp_processor_id();
28114 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28115+
28116+ pax_open_kernel();
28117 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
28118+ pax_close_kernel();
28119+
28120 arch_flush_lazy_mmu_mode();
28121
28122 return (void *)vaddr;
28123diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
28124index 78fe3f1..2f9433c 100644
28125--- a/arch/x86/mm/ioremap.c
28126+++ b/arch/x86/mm/ioremap.c
28127@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
28128 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
28129 int is_ram = page_is_ram(pfn);
28130
28131- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
28132+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
28133 return NULL;
28134 WARN_ON_ONCE(is_ram);
28135 }
28136@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
28137 *
28138 * Caller must ensure there is only one unmapping for the same pointer.
28139 */
28140-void iounmap(volatile void __iomem *addr)
28141+void iounmap(const volatile void __iomem *addr)
28142 {
28143 struct vm_struct *p, *o;
28144
28145@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28146
28147 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
28148 if (page_is_ram(start >> PAGE_SHIFT))
28149+#ifdef CONFIG_HIGHMEM
28150+ if ((start >> PAGE_SHIFT) < max_low_pfn)
28151+#endif
28152 return __va(phys);
28153
28154 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
28155@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
28156 early_param("early_ioremap_debug", early_ioremap_debug_setup);
28157
28158 static __initdata int after_paging_init;
28159-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
28160+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
28161
28162 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
28163 {
28164@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
28165 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
28166
28167 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
28168- memset(bm_pte, 0, sizeof(bm_pte));
28169- pmd_populate_kernel(&init_mm, pmd, bm_pte);
28170+ pmd_populate_user(&init_mm, pmd, bm_pte);
28171
28172 /*
28173 * The boot-ioremap range spans multiple pmds, for which
28174diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
28175index d87dd6d..bf3fa66 100644
28176--- a/arch/x86/mm/kmemcheck/kmemcheck.c
28177+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
28178@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
28179 * memory (e.g. tracked pages)? For now, we need this to avoid
28180 * invoking kmemcheck for PnP BIOS calls.
28181 */
28182- if (regs->flags & X86_VM_MASK)
28183+ if (v8086_mode(regs))
28184 return false;
28185- if (regs->cs != __KERNEL_CS)
28186+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
28187 return false;
28188
28189 pte = kmemcheck_pte_lookup(address);
28190diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
28191index 845df68..1d8d29f 100644
28192--- a/arch/x86/mm/mmap.c
28193+++ b/arch/x86/mm/mmap.c
28194@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
28195 * Leave an at least ~128 MB hole with possible stack randomization.
28196 */
28197 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
28198-#define MAX_GAP (TASK_SIZE/6*5)
28199+#define MAX_GAP (pax_task_size/6*5)
28200
28201 static int mmap_is_legacy(void)
28202 {
28203@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
28204 return rnd << PAGE_SHIFT;
28205 }
28206
28207-static unsigned long mmap_base(void)
28208+static unsigned long mmap_base(struct mm_struct *mm)
28209 {
28210 unsigned long gap = rlimit(RLIMIT_STACK);
28211+ unsigned long pax_task_size = TASK_SIZE;
28212+
28213+#ifdef CONFIG_PAX_SEGMEXEC
28214+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28215+ pax_task_size = SEGMEXEC_TASK_SIZE;
28216+#endif
28217
28218 if (gap < MIN_GAP)
28219 gap = MIN_GAP;
28220 else if (gap > MAX_GAP)
28221 gap = MAX_GAP;
28222
28223- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
28224+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
28225 }
28226
28227 /*
28228 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
28229 * does, but not when emulating X86_32
28230 */
28231-static unsigned long mmap_legacy_base(void)
28232+static unsigned long mmap_legacy_base(struct mm_struct *mm)
28233 {
28234- if (mmap_is_ia32())
28235+ if (mmap_is_ia32()) {
28236+
28237+#ifdef CONFIG_PAX_SEGMEXEC
28238+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28239+ return SEGMEXEC_TASK_UNMAPPED_BASE;
28240+ else
28241+#endif
28242+
28243 return TASK_UNMAPPED_BASE;
28244- else
28245+ } else
28246 return TASK_UNMAPPED_BASE + mmap_rnd();
28247 }
28248
28249@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
28250 void arch_pick_mmap_layout(struct mm_struct *mm)
28251 {
28252 if (mmap_is_legacy()) {
28253- mm->mmap_base = mmap_legacy_base();
28254+ mm->mmap_base = mmap_legacy_base(mm);
28255+
28256+#ifdef CONFIG_PAX_RANDMMAP
28257+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28258+ mm->mmap_base += mm->delta_mmap;
28259+#endif
28260+
28261 mm->get_unmapped_area = arch_get_unmapped_area;
28262 mm->unmap_area = arch_unmap_area;
28263 } else {
28264- mm->mmap_base = mmap_base();
28265+ mm->mmap_base = mmap_base(mm);
28266+
28267+#ifdef CONFIG_PAX_RANDMMAP
28268+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28269+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
28270+#endif
28271+
28272 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
28273 mm->unmap_area = arch_unmap_area_topdown;
28274 }
28275diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
28276index dc0b727..f612039 100644
28277--- a/arch/x86/mm/mmio-mod.c
28278+++ b/arch/x86/mm/mmio-mod.c
28279@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
28280 break;
28281 default:
28282 {
28283- unsigned char *ip = (unsigned char *)instptr;
28284+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
28285 my_trace->opcode = MMIO_UNKNOWN_OP;
28286 my_trace->width = 0;
28287 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
28288@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
28289 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28290 void __iomem *addr)
28291 {
28292- static atomic_t next_id;
28293+ static atomic_unchecked_t next_id;
28294 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
28295 /* These are page-unaligned. */
28296 struct mmiotrace_map map = {
28297@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28298 .private = trace
28299 },
28300 .phys = offset,
28301- .id = atomic_inc_return(&next_id)
28302+ .id = atomic_inc_return_unchecked(&next_id)
28303 };
28304 map.map_id = trace->id;
28305
28306@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
28307 ioremap_trace_core(offset, size, addr);
28308 }
28309
28310-static void iounmap_trace_core(volatile void __iomem *addr)
28311+static void iounmap_trace_core(const volatile void __iomem *addr)
28312 {
28313 struct mmiotrace_map map = {
28314 .phys = 0,
28315@@ -328,7 +328,7 @@ not_enabled:
28316 }
28317 }
28318
28319-void mmiotrace_iounmap(volatile void __iomem *addr)
28320+void mmiotrace_iounmap(const volatile void __iomem *addr)
28321 {
28322 might_sleep();
28323 if (is_enabled()) /* recheck and proper locking in *_core() */
28324diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
28325index b008656..773eac2 100644
28326--- a/arch/x86/mm/pageattr-test.c
28327+++ b/arch/x86/mm/pageattr-test.c
28328@@ -36,7 +36,7 @@ enum {
28329
28330 static int pte_testbit(pte_t pte)
28331 {
28332- return pte_flags(pte) & _PAGE_UNUSED1;
28333+ return pte_flags(pte) & _PAGE_CPA_TEST;
28334 }
28335
28336 struct split_state {
28337diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
28338index a718e0d..77419bc 100644
28339--- a/arch/x86/mm/pageattr.c
28340+++ b/arch/x86/mm/pageattr.c
28341@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28342 */
28343 #ifdef CONFIG_PCI_BIOS
28344 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
28345- pgprot_val(forbidden) |= _PAGE_NX;
28346+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28347 #endif
28348
28349 /*
28350@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28351 * Does not cover __inittext since that is gone later on. On
28352 * 64bit we do not enforce !NX on the low mapping
28353 */
28354- if (within(address, (unsigned long)_text, (unsigned long)_etext))
28355- pgprot_val(forbidden) |= _PAGE_NX;
28356+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
28357+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28358
28359+#ifdef CONFIG_DEBUG_RODATA
28360 /*
28361 * The .rodata section needs to be read-only. Using the pfn
28362 * catches all aliases.
28363@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28364 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
28365 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
28366 pgprot_val(forbidden) |= _PAGE_RW;
28367+#endif
28368
28369 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
28370 /*
28371@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28372 }
28373 #endif
28374
28375+#ifdef CONFIG_PAX_KERNEXEC
28376+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
28377+ pgprot_val(forbidden) |= _PAGE_RW;
28378+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28379+ }
28380+#endif
28381+
28382 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
28383
28384 return prot;
28385@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
28386 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
28387 {
28388 /* change init_mm */
28389+ pax_open_kernel();
28390 set_pte_atomic(kpte, pte);
28391+
28392 #ifdef CONFIG_X86_32
28393 if (!SHARED_KERNEL_PMD) {
28394+
28395+#ifdef CONFIG_PAX_PER_CPU_PGD
28396+ unsigned long cpu;
28397+#else
28398 struct page *page;
28399+#endif
28400
28401+#ifdef CONFIG_PAX_PER_CPU_PGD
28402+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28403+ pgd_t *pgd = get_cpu_pgd(cpu);
28404+#else
28405 list_for_each_entry(page, &pgd_list, lru) {
28406- pgd_t *pgd;
28407+ pgd_t *pgd = (pgd_t *)page_address(page);
28408+#endif
28409+
28410 pud_t *pud;
28411 pmd_t *pmd;
28412
28413- pgd = (pgd_t *)page_address(page) + pgd_index(address);
28414+ pgd += pgd_index(address);
28415 pud = pud_offset(pgd, address);
28416 pmd = pmd_offset(pud, address);
28417 set_pte_atomic((pte_t *)pmd, pte);
28418 }
28419 }
28420 #endif
28421+ pax_close_kernel();
28422 }
28423
28424 static int
28425diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
28426index 0eb572e..92f5c1e 100644
28427--- a/arch/x86/mm/pat.c
28428+++ b/arch/x86/mm/pat.c
28429@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
28430
28431 if (!entry) {
28432 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
28433- current->comm, current->pid, start, end - 1);
28434+ current->comm, task_pid_nr(current), start, end - 1);
28435 return -EINVAL;
28436 }
28437
28438@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28439
28440 while (cursor < to) {
28441 if (!devmem_is_allowed(pfn)) {
28442- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
28443- current->comm, from, to - 1);
28444+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
28445+ current->comm, from, to - 1, cursor);
28446 return 0;
28447 }
28448 cursor += PAGE_SIZE;
28449@@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
28450 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
28451 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
28452 "for [mem %#010Lx-%#010Lx]\n",
28453- current->comm, current->pid,
28454+ current->comm, task_pid_nr(current),
28455 cattr_name(flags),
28456 base, (unsigned long long)(base + size-1));
28457 return -EINVAL;
28458@@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
28459 flags = lookup_memtype(paddr);
28460 if (want_flags != flags) {
28461 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
28462- current->comm, current->pid,
28463+ current->comm, task_pid_nr(current),
28464 cattr_name(want_flags),
28465 (unsigned long long)paddr,
28466 (unsigned long long)(paddr + size - 1),
28467@@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
28468 free_memtype(paddr, paddr + size);
28469 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
28470 " for [mem %#010Lx-%#010Lx], got %s\n",
28471- current->comm, current->pid,
28472+ current->comm, task_pid_nr(current),
28473 cattr_name(want_flags),
28474 (unsigned long long)paddr,
28475 (unsigned long long)(paddr + size - 1),
28476diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
28477index 9f0614d..92ae64a 100644
28478--- a/arch/x86/mm/pf_in.c
28479+++ b/arch/x86/mm/pf_in.c
28480@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
28481 int i;
28482 enum reason_type rv = OTHERS;
28483
28484- p = (unsigned char *)ins_addr;
28485+ p = (unsigned char *)ktla_ktva(ins_addr);
28486 p += skip_prefix(p, &prf);
28487 p += get_opcode(p, &opcode);
28488
28489@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
28490 struct prefix_bits prf;
28491 int i;
28492
28493- p = (unsigned char *)ins_addr;
28494+ p = (unsigned char *)ktla_ktva(ins_addr);
28495 p += skip_prefix(p, &prf);
28496 p += get_opcode(p, &opcode);
28497
28498@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
28499 struct prefix_bits prf;
28500 int i;
28501
28502- p = (unsigned char *)ins_addr;
28503+ p = (unsigned char *)ktla_ktva(ins_addr);
28504 p += skip_prefix(p, &prf);
28505 p += get_opcode(p, &opcode);
28506
28507@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
28508 struct prefix_bits prf;
28509 int i;
28510
28511- p = (unsigned char *)ins_addr;
28512+ p = (unsigned char *)ktla_ktva(ins_addr);
28513 p += skip_prefix(p, &prf);
28514 p += get_opcode(p, &opcode);
28515 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
28516@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
28517 struct prefix_bits prf;
28518 int i;
28519
28520- p = (unsigned char *)ins_addr;
28521+ p = (unsigned char *)ktla_ktva(ins_addr);
28522 p += skip_prefix(p, &prf);
28523 p += get_opcode(p, &opcode);
28524 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
28525diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
28526index e27fbf8..8b56dc9 100644
28527--- a/arch/x86/mm/pgtable.c
28528+++ b/arch/x86/mm/pgtable.c
28529@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
28530 list_del(&page->lru);
28531 }
28532
28533-#define UNSHARED_PTRS_PER_PGD \
28534- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
28535+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28536+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
28537
28538+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
28539+{
28540+ unsigned int count = USER_PGD_PTRS;
28541
28542+ while (count--)
28543+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
28544+}
28545+#endif
28546+
28547+#ifdef CONFIG_PAX_PER_CPU_PGD
28548+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
28549+{
28550+ unsigned int count = USER_PGD_PTRS;
28551+
28552+ while (count--) {
28553+ pgd_t pgd;
28554+
28555+#ifdef CONFIG_X86_64
28556+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
28557+#else
28558+ pgd = *src++;
28559+#endif
28560+
28561+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28562+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
28563+#endif
28564+
28565+ *dst++ = pgd;
28566+ }
28567+
28568+}
28569+#endif
28570+
28571+#ifdef CONFIG_X86_64
28572+#define pxd_t pud_t
28573+#define pyd_t pgd_t
28574+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
28575+#define pxd_free(mm, pud) pud_free((mm), (pud))
28576+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
28577+#define pyd_offset(mm, address) pgd_offset((mm), (address))
28578+#define PYD_SIZE PGDIR_SIZE
28579+#else
28580+#define pxd_t pmd_t
28581+#define pyd_t pud_t
28582+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
28583+#define pxd_free(mm, pud) pmd_free((mm), (pud))
28584+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
28585+#define pyd_offset(mm, address) pud_offset((mm), (address))
28586+#define PYD_SIZE PUD_SIZE
28587+#endif
28588+
28589+#ifdef CONFIG_PAX_PER_CPU_PGD
28590+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
28591+static inline void pgd_dtor(pgd_t *pgd) {}
28592+#else
28593 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
28594 {
28595 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
28596@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
28597 pgd_list_del(pgd);
28598 spin_unlock(&pgd_lock);
28599 }
28600+#endif
28601
28602 /*
28603 * List of all pgd's needed for non-PAE so it can invalidate entries
28604@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
28605 * -- nyc
28606 */
28607
28608-#ifdef CONFIG_X86_PAE
28609+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
28610 /*
28611 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
28612 * updating the top-level pagetable entries to guarantee the
28613@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
28614 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
28615 * and initialize the kernel pmds here.
28616 */
28617-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
28618+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
28619
28620 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
28621 {
28622@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
28623 */
28624 flush_tlb_mm(mm);
28625 }
28626+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
28627+#define PREALLOCATED_PXDS USER_PGD_PTRS
28628 #else /* !CONFIG_X86_PAE */
28629
28630 /* No need to prepopulate any pagetable entries in non-PAE modes. */
28631-#define PREALLOCATED_PMDS 0
28632+#define PREALLOCATED_PXDS 0
28633
28634 #endif /* CONFIG_X86_PAE */
28635
28636-static void free_pmds(pmd_t *pmds[])
28637+static void free_pxds(pxd_t *pxds[])
28638 {
28639 int i;
28640
28641- for(i = 0; i < PREALLOCATED_PMDS; i++)
28642- if (pmds[i])
28643- free_page((unsigned long)pmds[i]);
28644+ for(i = 0; i < PREALLOCATED_PXDS; i++)
28645+ if (pxds[i])
28646+ free_page((unsigned long)pxds[i]);
28647 }
28648
28649-static int preallocate_pmds(pmd_t *pmds[])
28650+static int preallocate_pxds(pxd_t *pxds[])
28651 {
28652 int i;
28653 bool failed = false;
28654
28655- for(i = 0; i < PREALLOCATED_PMDS; i++) {
28656- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
28657- if (pmd == NULL)
28658+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
28659+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
28660+ if (pxd == NULL)
28661 failed = true;
28662- pmds[i] = pmd;
28663+ pxds[i] = pxd;
28664 }
28665
28666 if (failed) {
28667- free_pmds(pmds);
28668+ free_pxds(pxds);
28669 return -ENOMEM;
28670 }
28671
28672@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
28673 * preallocate which never got a corresponding vma will need to be
28674 * freed manually.
28675 */
28676-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
28677+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
28678 {
28679 int i;
28680
28681- for(i = 0; i < PREALLOCATED_PMDS; i++) {
28682+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
28683 pgd_t pgd = pgdp[i];
28684
28685 if (pgd_val(pgd) != 0) {
28686- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
28687+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
28688
28689- pgdp[i] = native_make_pgd(0);
28690+ set_pgd(pgdp + i, native_make_pgd(0));
28691
28692- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
28693- pmd_free(mm, pmd);
28694+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
28695+ pxd_free(mm, pxd);
28696 }
28697 }
28698 }
28699
28700-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
28701+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
28702 {
28703- pud_t *pud;
28704+ pyd_t *pyd;
28705 unsigned long addr;
28706 int i;
28707
28708- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
28709+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
28710 return;
28711
28712- pud = pud_offset(pgd, 0);
28713+#ifdef CONFIG_X86_64
28714+ pyd = pyd_offset(mm, 0L);
28715+#else
28716+ pyd = pyd_offset(pgd, 0L);
28717+#endif
28718
28719- for (addr = i = 0; i < PREALLOCATED_PMDS;
28720- i++, pud++, addr += PUD_SIZE) {
28721- pmd_t *pmd = pmds[i];
28722+ for (addr = i = 0; i < PREALLOCATED_PXDS;
28723+ i++, pyd++, addr += PYD_SIZE) {
28724+ pxd_t *pxd = pxds[i];
28725
28726 if (i >= KERNEL_PGD_BOUNDARY)
28727- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
28728- sizeof(pmd_t) * PTRS_PER_PMD);
28729+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
28730+ sizeof(pxd_t) * PTRS_PER_PMD);
28731
28732- pud_populate(mm, pud, pmd);
28733+ pyd_populate(mm, pyd, pxd);
28734 }
28735 }
28736
28737 pgd_t *pgd_alloc(struct mm_struct *mm)
28738 {
28739 pgd_t *pgd;
28740- pmd_t *pmds[PREALLOCATED_PMDS];
28741+ pxd_t *pxds[PREALLOCATED_PXDS];
28742
28743 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
28744
28745@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
28746
28747 mm->pgd = pgd;
28748
28749- if (preallocate_pmds(pmds) != 0)
28750+ if (preallocate_pxds(pxds) != 0)
28751 goto out_free_pgd;
28752
28753 if (paravirt_pgd_alloc(mm) != 0)
28754- goto out_free_pmds;
28755+ goto out_free_pxds;
28756
28757 /*
28758 * Make sure that pre-populating the pmds is atomic with
28759@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
28760 spin_lock(&pgd_lock);
28761
28762 pgd_ctor(mm, pgd);
28763- pgd_prepopulate_pmd(mm, pgd, pmds);
28764+ pgd_prepopulate_pxd(mm, pgd, pxds);
28765
28766 spin_unlock(&pgd_lock);
28767
28768 return pgd;
28769
28770-out_free_pmds:
28771- free_pmds(pmds);
28772+out_free_pxds:
28773+ free_pxds(pxds);
28774 out_free_pgd:
28775 free_page((unsigned long)pgd);
28776 out:
28777@@ -295,7 +356,7 @@ out:
28778
28779 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
28780 {
28781- pgd_mop_up_pmds(mm, pgd);
28782+ pgd_mop_up_pxds(mm, pgd);
28783 pgd_dtor(pgd);
28784 paravirt_pgd_free(mm, pgd);
28785 free_page((unsigned long)pgd);
28786diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
28787index a69bcb8..19068ab 100644
28788--- a/arch/x86/mm/pgtable_32.c
28789+++ b/arch/x86/mm/pgtable_32.c
28790@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
28791 return;
28792 }
28793 pte = pte_offset_kernel(pmd, vaddr);
28794+
28795+ pax_open_kernel();
28796 if (pte_val(pteval))
28797 set_pte_at(&init_mm, vaddr, pte, pteval);
28798 else
28799 pte_clear(&init_mm, vaddr, pte);
28800+ pax_close_kernel();
28801
28802 /*
28803 * It's enough to flush this one mapping.
28804diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
28805index 410531d..0f16030 100644
28806--- a/arch/x86/mm/setup_nx.c
28807+++ b/arch/x86/mm/setup_nx.c
28808@@ -5,8 +5,10 @@
28809 #include <asm/pgtable.h>
28810 #include <asm/proto.h>
28811
28812+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
28813 static int disable_nx __cpuinitdata;
28814
28815+#ifndef CONFIG_PAX_PAGEEXEC
28816 /*
28817 * noexec = on|off
28818 *
28819@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
28820 return 0;
28821 }
28822 early_param("noexec", noexec_setup);
28823+#endif
28824+
28825+#endif
28826
28827 void __cpuinit x86_configure_nx(void)
28828 {
28829+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
28830 if (cpu_has_nx && !disable_nx)
28831 __supported_pte_mask |= _PAGE_NX;
28832 else
28833+#endif
28834 __supported_pte_mask &= ~_PAGE_NX;
28835 }
28836
28837diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
28838index 13a6b29..c2fff23 100644
28839--- a/arch/x86/mm/tlb.c
28840+++ b/arch/x86/mm/tlb.c
28841@@ -48,7 +48,11 @@ void leave_mm(int cpu)
28842 BUG();
28843 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
28844 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
28845+
28846+#ifndef CONFIG_PAX_PER_CPU_PGD
28847 load_cr3(swapper_pg_dir);
28848+#endif
28849+
28850 }
28851 }
28852 EXPORT_SYMBOL_GPL(leave_mm);
28853diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
28854index 877b9a1..a8ecf42 100644
28855--- a/arch/x86/net/bpf_jit.S
28856+++ b/arch/x86/net/bpf_jit.S
28857@@ -9,6 +9,7 @@
28858 */
28859 #include <linux/linkage.h>
28860 #include <asm/dwarf2.h>
28861+#include <asm/alternative-asm.h>
28862
28863 /*
28864 * Calling convention :
28865@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
28866 jle bpf_slow_path_word
28867 mov (SKBDATA,%rsi),%eax
28868 bswap %eax /* ntohl() */
28869+ pax_force_retaddr
28870 ret
28871
28872 sk_load_half:
28873@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
28874 jle bpf_slow_path_half
28875 movzwl (SKBDATA,%rsi),%eax
28876 rol $8,%ax # ntohs()
28877+ pax_force_retaddr
28878 ret
28879
28880 sk_load_byte:
28881@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
28882 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
28883 jle bpf_slow_path_byte
28884 movzbl (SKBDATA,%rsi),%eax
28885+ pax_force_retaddr
28886 ret
28887
28888 /**
28889@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
28890 movzbl (SKBDATA,%rsi),%ebx
28891 and $15,%bl
28892 shl $2,%bl
28893+ pax_force_retaddr
28894 ret
28895
28896 /* rsi contains offset and can be scratched */
28897@@ -109,6 +114,7 @@ bpf_slow_path_word:
28898 js bpf_error
28899 mov -12(%rbp),%eax
28900 bswap %eax
28901+ pax_force_retaddr
28902 ret
28903
28904 bpf_slow_path_half:
28905@@ -117,12 +123,14 @@ bpf_slow_path_half:
28906 mov -12(%rbp),%ax
28907 rol $8,%ax
28908 movzwl %ax,%eax
28909+ pax_force_retaddr
28910 ret
28911
28912 bpf_slow_path_byte:
28913 bpf_slow_path_common(1)
28914 js bpf_error
28915 movzbl -12(%rbp),%eax
28916+ pax_force_retaddr
28917 ret
28918
28919 bpf_slow_path_byte_msh:
28920@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
28921 and $15,%al
28922 shl $2,%al
28923 xchg %eax,%ebx
28924+ pax_force_retaddr
28925 ret
28926
28927 #define sk_negative_common(SIZE) \
28928@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
28929 sk_negative_common(4)
28930 mov (%rax), %eax
28931 bswap %eax
28932+ pax_force_retaddr
28933 ret
28934
28935 bpf_slow_path_half_neg:
28936@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
28937 mov (%rax),%ax
28938 rol $8,%ax
28939 movzwl %ax,%eax
28940+ pax_force_retaddr
28941 ret
28942
28943 bpf_slow_path_byte_neg:
28944@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
28945 .globl sk_load_byte_negative_offset
28946 sk_negative_common(1)
28947 movzbl (%rax), %eax
28948+ pax_force_retaddr
28949 ret
28950
28951 bpf_slow_path_byte_msh_neg:
28952@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
28953 and $15,%al
28954 shl $2,%al
28955 xchg %eax,%ebx
28956+ pax_force_retaddr
28957 ret
28958
28959 bpf_error:
28960@@ -197,4 +210,5 @@ bpf_error:
28961 xor %eax,%eax
28962 mov -8(%rbp),%rbx
28963 leaveq
28964+ pax_force_retaddr
28965 ret
28966diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
28967index d11a470..3f9adff3 100644
28968--- a/arch/x86/net/bpf_jit_comp.c
28969+++ b/arch/x86/net/bpf_jit_comp.c
28970@@ -12,6 +12,7 @@
28971 #include <linux/netdevice.h>
28972 #include <linux/filter.h>
28973 #include <linux/if_vlan.h>
28974+#include <linux/random.h>
28975
28976 /*
28977 * Conventions :
28978@@ -49,13 +50,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
28979 return ptr + len;
28980 }
28981
28982+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
28983+#define MAX_INSTR_CODE_SIZE 96
28984+#else
28985+#define MAX_INSTR_CODE_SIZE 64
28986+#endif
28987+
28988 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
28989
28990 #define EMIT1(b1) EMIT(b1, 1)
28991 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
28992 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
28993 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
28994+
28995+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
28996+/* original constant will appear in ecx */
28997+#define DILUTE_CONST_SEQUENCE(_off, _key) \
28998+do { \
28999+ /* mov ecx, randkey */ \
29000+ EMIT1(0xb9); \
29001+ EMIT(_key, 4); \
29002+ /* xor ecx, randkey ^ off */ \
29003+ EMIT2(0x81, 0xf1); \
29004+ EMIT((_key) ^ (_off), 4); \
29005+} while (0)
29006+
29007+#define EMIT1_off32(b1, _off) \
29008+do { \
29009+ switch (b1) { \
29010+ case 0x05: /* add eax, imm32 */ \
29011+ case 0x2d: /* sub eax, imm32 */ \
29012+ case 0x25: /* and eax, imm32 */ \
29013+ case 0x0d: /* or eax, imm32 */ \
29014+ case 0xb8: /* mov eax, imm32 */ \
29015+ case 0x3d: /* cmp eax, imm32 */ \
29016+ case 0xa9: /* test eax, imm32 */ \
29017+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29018+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
29019+ break; \
29020+ case 0xbb: /* mov ebx, imm32 */ \
29021+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29022+ /* mov ebx, ecx */ \
29023+ EMIT2(0x89, 0xcb); \
29024+ break; \
29025+ case 0xbe: /* mov esi, imm32 */ \
29026+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29027+ /* mov esi, ecx */ \
29028+ EMIT2(0x89, 0xce); \
29029+ break; \
29030+ case 0xe9: /* jmp rel imm32 */ \
29031+ EMIT1(b1); \
29032+ EMIT(_off, 4); \
29033+ /* prevent fall-through, we're not called if off = 0 */ \
29034+ EMIT(0xcccccccc, 4); \
29035+ EMIT(0xcccccccc, 4); \
29036+ break; \
29037+ default: \
29038+ EMIT1(b1); \
29039+ EMIT(_off, 4); \
29040+ } \
29041+} while (0)
29042+
29043+#define EMIT2_off32(b1, b2, _off) \
29044+do { \
29045+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
29046+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
29047+ EMIT(randkey, 4); \
29048+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
29049+ EMIT((_off) - randkey, 4); \
29050+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
29051+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29052+ /* imul eax, ecx */ \
29053+ EMIT3(0x0f, 0xaf, 0xc1); \
29054+ } else { \
29055+ EMIT2(b1, b2); \
29056+ EMIT(_off, 4); \
29057+ } \
29058+} while (0)
29059+#else
29060 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
29061+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
29062+#endif
29063
29064 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
29065 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
29066@@ -90,6 +165,24 @@ do { \
29067 #define X86_JBE 0x76
29068 #define X86_JA 0x77
29069
29070+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29071+#define APPEND_FLOW_VERIFY() \
29072+do { \
29073+ /* mov ecx, randkey */ \
29074+ EMIT1(0xb9); \
29075+ EMIT(randkey, 4); \
29076+ /* cmp ecx, randkey */ \
29077+ EMIT2(0x81, 0xf9); \
29078+ EMIT(randkey, 4); \
29079+ /* jz after 8 int 3s */ \
29080+ EMIT2(0x74, 0x08); \
29081+ EMIT(0xcccccccc, 4); \
29082+ EMIT(0xcccccccc, 4); \
29083+} while (0)
29084+#else
29085+#define APPEND_FLOW_VERIFY() do { } while (0)
29086+#endif
29087+
29088 #define EMIT_COND_JMP(op, offset) \
29089 do { \
29090 if (is_near(offset)) \
29091@@ -97,6 +190,7 @@ do { \
29092 else { \
29093 EMIT2(0x0f, op + 0x10); \
29094 EMIT(offset, 4); /* jxx .+off32 */ \
29095+ APPEND_FLOW_VERIFY(); \
29096 } \
29097 } while (0)
29098
29099@@ -121,12 +215,17 @@ static inline void bpf_flush_icache(void *start, void *end)
29100 set_fs(old_fs);
29101 }
29102
29103+struct bpf_jit_work {
29104+ struct work_struct work;
29105+ void *image;
29106+};
29107+
29108 #define CHOOSE_LOAD_FUNC(K, func) \
29109 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
29110
29111 void bpf_jit_compile(struct sk_filter *fp)
29112 {
29113- u8 temp[64];
29114+ u8 temp[MAX_INSTR_CODE_SIZE];
29115 u8 *prog;
29116 unsigned int proglen, oldproglen = 0;
29117 int ilen, i;
29118@@ -139,6 +238,9 @@ void bpf_jit_compile(struct sk_filter *fp)
29119 unsigned int *addrs;
29120 const struct sock_filter *filter = fp->insns;
29121 int flen = fp->len;
29122+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29123+ unsigned int randkey;
29124+#endif
29125
29126 if (!bpf_jit_enable)
29127 return;
29128@@ -147,11 +249,19 @@ void bpf_jit_compile(struct sk_filter *fp)
29129 if (addrs == NULL)
29130 return;
29131
29132+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
29133+ if (!fp->work)
29134+ goto out;
29135+
29136+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29137+ randkey = get_random_int();
29138+#endif
29139+
29140 /* Before first pass, make a rough estimation of addrs[]
29141- * each bpf instruction is translated to less than 64 bytes
29142+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
29143 */
29144 for (proglen = 0, i = 0; i < flen; i++) {
29145- proglen += 64;
29146+ proglen += MAX_INSTR_CODE_SIZE;
29147 addrs[i] = proglen;
29148 }
29149 cleanup_addr = proglen; /* epilogue address */
29150@@ -261,10 +371,8 @@ void bpf_jit_compile(struct sk_filter *fp)
29151 case BPF_S_ALU_MUL_K: /* A *= K */
29152 if (is_imm8(K))
29153 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
29154- else {
29155- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
29156- EMIT(K, 4);
29157- }
29158+ else
29159+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
29160 break;
29161 case BPF_S_ALU_DIV_X: /* A /= X; */
29162 seen |= SEEN_XREG;
29163@@ -304,13 +412,23 @@ void bpf_jit_compile(struct sk_filter *fp)
29164 break;
29165 case BPF_S_ALU_MOD_K: /* A %= K; */
29166 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
29167+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29168+ DILUTE_CONST_SEQUENCE(K, randkey);
29169+#else
29170 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
29171+#endif
29172 EMIT2(0xf7, 0xf1); /* div %ecx */
29173 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
29174 break;
29175 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
29176+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29177+ DILUTE_CONST_SEQUENCE(K, randkey);
29178+ // imul rax, rcx
29179+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
29180+#else
29181 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
29182 EMIT(K, 4);
29183+#endif
29184 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
29185 break;
29186 case BPF_S_ALU_AND_X:
29187@@ -564,8 +682,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
29188 if (is_imm8(K)) {
29189 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
29190 } else {
29191- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
29192- EMIT(K, 4);
29193+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
29194 }
29195 } else {
29196 EMIT2(0x89,0xde); /* mov %ebx,%esi */
29197@@ -648,17 +765,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29198 break;
29199 default:
29200 /* hmm, too complex filter, give up with jit compiler */
29201- goto out;
29202+ goto error;
29203 }
29204 ilen = prog - temp;
29205 if (image) {
29206 if (unlikely(proglen + ilen > oldproglen)) {
29207 pr_err("bpb_jit_compile fatal error\n");
29208- kfree(addrs);
29209- module_free(NULL, image);
29210- return;
29211+ module_free_exec(NULL, image);
29212+ goto error;
29213 }
29214+ pax_open_kernel();
29215 memcpy(image + proglen, temp, ilen);
29216+ pax_close_kernel();
29217 }
29218 proglen += ilen;
29219 addrs[i] = proglen;
29220@@ -679,11 +797,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29221 break;
29222 }
29223 if (proglen == oldproglen) {
29224- image = module_alloc(max_t(unsigned int,
29225- proglen,
29226- sizeof(struct work_struct)));
29227+ image = module_alloc_exec(proglen);
29228 if (!image)
29229- goto out;
29230+ goto error;
29231 }
29232 oldproglen = proglen;
29233 }
29234@@ -699,7 +815,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29235 bpf_flush_icache(image, image + proglen);
29236
29237 fp->bpf_func = (void *)image;
29238- }
29239+ } else
29240+error:
29241+ kfree(fp->work);
29242+
29243 out:
29244 kfree(addrs);
29245 return;
29246@@ -707,18 +826,20 @@ out:
29247
29248 static void jit_free_defer(struct work_struct *arg)
29249 {
29250- module_free(NULL, arg);
29251+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
29252+ kfree(arg);
29253 }
29254
29255 /* run from softirq, we must use a work_struct to call
29256- * module_free() from process context
29257+ * module_free_exec() from process context
29258 */
29259 void bpf_jit_free(struct sk_filter *fp)
29260 {
29261 if (fp->bpf_func != sk_run_filter) {
29262- struct work_struct *work = (struct work_struct *)fp->bpf_func;
29263+ struct work_struct *work = &fp->work->work;
29264
29265 INIT_WORK(work, jit_free_defer);
29266+ fp->work->image = fp->bpf_func;
29267 schedule_work(work);
29268 }
29269 }
29270diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
29271index d6aa6e8..266395a 100644
29272--- a/arch/x86/oprofile/backtrace.c
29273+++ b/arch/x86/oprofile/backtrace.c
29274@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
29275 struct stack_frame_ia32 *fp;
29276 unsigned long bytes;
29277
29278- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29279+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29280 if (bytes != sizeof(bufhead))
29281 return NULL;
29282
29283- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
29284+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
29285
29286 oprofile_add_trace(bufhead[0].return_address);
29287
29288@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
29289 struct stack_frame bufhead[2];
29290 unsigned long bytes;
29291
29292- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29293+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29294 if (bytes != sizeof(bufhead))
29295 return NULL;
29296
29297@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
29298 {
29299 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
29300
29301- if (!user_mode_vm(regs)) {
29302+ if (!user_mode(regs)) {
29303 unsigned long stack = kernel_stack_pointer(regs);
29304 if (depth)
29305 dump_trace(NULL, regs, (unsigned long *)stack, 0,
29306diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
29307index 48768df..ba9143c 100644
29308--- a/arch/x86/oprofile/nmi_int.c
29309+++ b/arch/x86/oprofile/nmi_int.c
29310@@ -23,6 +23,7 @@
29311 #include <asm/nmi.h>
29312 #include <asm/msr.h>
29313 #include <asm/apic.h>
29314+#include <asm/pgtable.h>
29315
29316 #include "op_counter.h"
29317 #include "op_x86_model.h"
29318@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
29319 if (ret)
29320 return ret;
29321
29322- if (!model->num_virt_counters)
29323- model->num_virt_counters = model->num_counters;
29324+ if (!model->num_virt_counters) {
29325+ pax_open_kernel();
29326+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
29327+ pax_close_kernel();
29328+ }
29329
29330 mux_init(ops);
29331
29332diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
29333index b2b9443..be58856 100644
29334--- a/arch/x86/oprofile/op_model_amd.c
29335+++ b/arch/x86/oprofile/op_model_amd.c
29336@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
29337 num_counters = AMD64_NUM_COUNTERS;
29338 }
29339
29340- op_amd_spec.num_counters = num_counters;
29341- op_amd_spec.num_controls = num_counters;
29342- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
29343+ pax_open_kernel();
29344+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
29345+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
29346+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
29347+ pax_close_kernel();
29348
29349 return 0;
29350 }
29351diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
29352index d90528e..0127e2b 100644
29353--- a/arch/x86/oprofile/op_model_ppro.c
29354+++ b/arch/x86/oprofile/op_model_ppro.c
29355@@ -19,6 +19,7 @@
29356 #include <asm/msr.h>
29357 #include <asm/apic.h>
29358 #include <asm/nmi.h>
29359+#include <asm/pgtable.h>
29360
29361 #include "op_x86_model.h"
29362 #include "op_counter.h"
29363@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
29364
29365 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
29366
29367- op_arch_perfmon_spec.num_counters = num_counters;
29368- op_arch_perfmon_spec.num_controls = num_counters;
29369+ pax_open_kernel();
29370+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
29371+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
29372+ pax_close_kernel();
29373 }
29374
29375 static int arch_perfmon_init(struct oprofile_operations *ignore)
29376diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
29377index 71e8a67..6a313bb 100644
29378--- a/arch/x86/oprofile/op_x86_model.h
29379+++ b/arch/x86/oprofile/op_x86_model.h
29380@@ -52,7 +52,7 @@ struct op_x86_model_spec {
29381 void (*switch_ctrl)(struct op_x86_model_spec const *model,
29382 struct op_msrs const * const msrs);
29383 #endif
29384-};
29385+} __do_const;
29386
29387 struct op_counter_config;
29388
29389diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
29390index e9e6ed5..e47ae67 100644
29391--- a/arch/x86/pci/amd_bus.c
29392+++ b/arch/x86/pci/amd_bus.c
29393@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
29394 return NOTIFY_OK;
29395 }
29396
29397-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
29398+static struct notifier_block amd_cpu_notifier = {
29399 .notifier_call = amd_cpu_notify,
29400 };
29401
29402diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
29403index 372e9b8..e775a6c 100644
29404--- a/arch/x86/pci/irq.c
29405+++ b/arch/x86/pci/irq.c
29406@@ -50,7 +50,7 @@ struct irq_router {
29407 struct irq_router_handler {
29408 u16 vendor;
29409 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
29410-};
29411+} __do_const;
29412
29413 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
29414 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
29415@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
29416 return 0;
29417 }
29418
29419-static __initdata struct irq_router_handler pirq_routers[] = {
29420+static __initconst const struct irq_router_handler pirq_routers[] = {
29421 { PCI_VENDOR_ID_INTEL, intel_router_probe },
29422 { PCI_VENDOR_ID_AL, ali_router_probe },
29423 { PCI_VENDOR_ID_ITE, ite_router_probe },
29424@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
29425 static void __init pirq_find_router(struct irq_router *r)
29426 {
29427 struct irq_routing_table *rt = pirq_table;
29428- struct irq_router_handler *h;
29429+ const struct irq_router_handler *h;
29430
29431 #ifdef CONFIG_PCI_BIOS
29432 if (!rt->signature) {
29433@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
29434 return 0;
29435 }
29436
29437-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
29438+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
29439 {
29440 .callback = fix_broken_hp_bios_irq9,
29441 .ident = "HP Pavilion N5400 Series Laptop",
29442diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
29443index 6eb18c4..20d83de 100644
29444--- a/arch/x86/pci/mrst.c
29445+++ b/arch/x86/pci/mrst.c
29446@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
29447 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
29448 pci_mmcfg_late_init();
29449 pcibios_enable_irq = mrst_pci_irq_enable;
29450- pci_root_ops = pci_mrst_ops;
29451+ pax_open_kernel();
29452+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
29453+ pax_close_kernel();
29454 pci_soc_mode = 1;
29455 /* Continue with standard init */
29456 return 1;
29457diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
29458index c77b24a..c979855 100644
29459--- a/arch/x86/pci/pcbios.c
29460+++ b/arch/x86/pci/pcbios.c
29461@@ -79,7 +79,7 @@ union bios32 {
29462 static struct {
29463 unsigned long address;
29464 unsigned short segment;
29465-} bios32_indirect = { 0, __KERNEL_CS };
29466+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
29467
29468 /*
29469 * Returns the entry point for the given service, NULL on error
29470@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
29471 unsigned long length; /* %ecx */
29472 unsigned long entry; /* %edx */
29473 unsigned long flags;
29474+ struct desc_struct d, *gdt;
29475
29476 local_irq_save(flags);
29477- __asm__("lcall *(%%edi); cld"
29478+
29479+ gdt = get_cpu_gdt_table(smp_processor_id());
29480+
29481+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
29482+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
29483+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
29484+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
29485+
29486+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
29487 : "=a" (return_code),
29488 "=b" (address),
29489 "=c" (length),
29490 "=d" (entry)
29491 : "0" (service),
29492 "1" (0),
29493- "D" (&bios32_indirect));
29494+ "D" (&bios32_indirect),
29495+ "r"(__PCIBIOS_DS)
29496+ : "memory");
29497+
29498+ pax_open_kernel();
29499+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
29500+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
29501+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
29502+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
29503+ pax_close_kernel();
29504+
29505 local_irq_restore(flags);
29506
29507 switch (return_code) {
29508- case 0:
29509- return address + entry;
29510- case 0x80: /* Not present */
29511- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
29512- return 0;
29513- default: /* Shouldn't happen */
29514- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
29515- service, return_code);
29516+ case 0: {
29517+ int cpu;
29518+ unsigned char flags;
29519+
29520+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
29521+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
29522+ printk(KERN_WARNING "bios32_service: not valid\n");
29523 return 0;
29524+ }
29525+ address = address + PAGE_OFFSET;
29526+ length += 16UL; /* some BIOSs underreport this... */
29527+ flags = 4;
29528+ if (length >= 64*1024*1024) {
29529+ length >>= PAGE_SHIFT;
29530+ flags |= 8;
29531+ }
29532+
29533+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
29534+ gdt = get_cpu_gdt_table(cpu);
29535+ pack_descriptor(&d, address, length, 0x9b, flags);
29536+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
29537+ pack_descriptor(&d, address, length, 0x93, flags);
29538+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
29539+ }
29540+ return entry;
29541+ }
29542+ case 0x80: /* Not present */
29543+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
29544+ return 0;
29545+ default: /* Shouldn't happen */
29546+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
29547+ service, return_code);
29548+ return 0;
29549 }
29550 }
29551
29552 static struct {
29553 unsigned long address;
29554 unsigned short segment;
29555-} pci_indirect = { 0, __KERNEL_CS };
29556+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
29557
29558-static int pci_bios_present;
29559+static int pci_bios_present __read_only;
29560
29561 static int check_pcibios(void)
29562 {
29563@@ -131,11 +174,13 @@ static int check_pcibios(void)
29564 unsigned long flags, pcibios_entry;
29565
29566 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
29567- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
29568+ pci_indirect.address = pcibios_entry;
29569
29570 local_irq_save(flags);
29571- __asm__(
29572- "lcall *(%%edi); cld\n\t"
29573+ __asm__("movw %w6, %%ds\n\t"
29574+ "lcall *%%ss:(%%edi); cld\n\t"
29575+ "push %%ss\n\t"
29576+ "pop %%ds\n\t"
29577 "jc 1f\n\t"
29578 "xor %%ah, %%ah\n"
29579 "1:"
29580@@ -144,7 +189,8 @@ static int check_pcibios(void)
29581 "=b" (ebx),
29582 "=c" (ecx)
29583 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
29584- "D" (&pci_indirect)
29585+ "D" (&pci_indirect),
29586+ "r" (__PCIBIOS_DS)
29587 : "memory");
29588 local_irq_restore(flags);
29589
29590@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29591
29592 switch (len) {
29593 case 1:
29594- __asm__("lcall *(%%esi); cld\n\t"
29595+ __asm__("movw %w6, %%ds\n\t"
29596+ "lcall *%%ss:(%%esi); cld\n\t"
29597+ "push %%ss\n\t"
29598+ "pop %%ds\n\t"
29599 "jc 1f\n\t"
29600 "xor %%ah, %%ah\n"
29601 "1:"
29602@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29603 : "1" (PCIBIOS_READ_CONFIG_BYTE),
29604 "b" (bx),
29605 "D" ((long)reg),
29606- "S" (&pci_indirect));
29607+ "S" (&pci_indirect),
29608+ "r" (__PCIBIOS_DS));
29609 /*
29610 * Zero-extend the result beyond 8 bits, do not trust the
29611 * BIOS having done it:
29612@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29613 *value &= 0xff;
29614 break;
29615 case 2:
29616- __asm__("lcall *(%%esi); cld\n\t"
29617+ __asm__("movw %w6, %%ds\n\t"
29618+ "lcall *%%ss:(%%esi); cld\n\t"
29619+ "push %%ss\n\t"
29620+ "pop %%ds\n\t"
29621 "jc 1f\n\t"
29622 "xor %%ah, %%ah\n"
29623 "1:"
29624@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29625 : "1" (PCIBIOS_READ_CONFIG_WORD),
29626 "b" (bx),
29627 "D" ((long)reg),
29628- "S" (&pci_indirect));
29629+ "S" (&pci_indirect),
29630+ "r" (__PCIBIOS_DS));
29631 /*
29632 * Zero-extend the result beyond 16 bits, do not trust the
29633 * BIOS having done it:
29634@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29635 *value &= 0xffff;
29636 break;
29637 case 4:
29638- __asm__("lcall *(%%esi); cld\n\t"
29639+ __asm__("movw %w6, %%ds\n\t"
29640+ "lcall *%%ss:(%%esi); cld\n\t"
29641+ "push %%ss\n\t"
29642+ "pop %%ds\n\t"
29643 "jc 1f\n\t"
29644 "xor %%ah, %%ah\n"
29645 "1:"
29646@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29647 : "1" (PCIBIOS_READ_CONFIG_DWORD),
29648 "b" (bx),
29649 "D" ((long)reg),
29650- "S" (&pci_indirect));
29651+ "S" (&pci_indirect),
29652+ "r" (__PCIBIOS_DS));
29653 break;
29654 }
29655
29656@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
29657
29658 switch (len) {
29659 case 1:
29660- __asm__("lcall *(%%esi); cld\n\t"
29661+ __asm__("movw %w6, %%ds\n\t"
29662+ "lcall *%%ss:(%%esi); cld\n\t"
29663+ "push %%ss\n\t"
29664+ "pop %%ds\n\t"
29665 "jc 1f\n\t"
29666 "xor %%ah, %%ah\n"
29667 "1:"
29668@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
29669 "c" (value),
29670 "b" (bx),
29671 "D" ((long)reg),
29672- "S" (&pci_indirect));
29673+ "S" (&pci_indirect),
29674+ "r" (__PCIBIOS_DS));
29675 break;
29676 case 2:
29677- __asm__("lcall *(%%esi); cld\n\t"
29678+ __asm__("movw %w6, %%ds\n\t"
29679+ "lcall *%%ss:(%%esi); cld\n\t"
29680+ "push %%ss\n\t"
29681+ "pop %%ds\n\t"
29682 "jc 1f\n\t"
29683 "xor %%ah, %%ah\n"
29684 "1:"
29685@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
29686 "c" (value),
29687 "b" (bx),
29688 "D" ((long)reg),
29689- "S" (&pci_indirect));
29690+ "S" (&pci_indirect),
29691+ "r" (__PCIBIOS_DS));
29692 break;
29693 case 4:
29694- __asm__("lcall *(%%esi); cld\n\t"
29695+ __asm__("movw %w6, %%ds\n\t"
29696+ "lcall *%%ss:(%%esi); cld\n\t"
29697+ "push %%ss\n\t"
29698+ "pop %%ds\n\t"
29699 "jc 1f\n\t"
29700 "xor %%ah, %%ah\n"
29701 "1:"
29702@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
29703 "c" (value),
29704 "b" (bx),
29705 "D" ((long)reg),
29706- "S" (&pci_indirect));
29707+ "S" (&pci_indirect),
29708+ "r" (__PCIBIOS_DS));
29709 break;
29710 }
29711
29712@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
29713
29714 DBG("PCI: Fetching IRQ routing table... ");
29715 __asm__("push %%es\n\t"
29716+ "movw %w8, %%ds\n\t"
29717 "push %%ds\n\t"
29718 "pop %%es\n\t"
29719- "lcall *(%%esi); cld\n\t"
29720+ "lcall *%%ss:(%%esi); cld\n\t"
29721 "pop %%es\n\t"
29722+ "push %%ss\n\t"
29723+ "pop %%ds\n"
29724 "jc 1f\n\t"
29725 "xor %%ah, %%ah\n"
29726 "1:"
29727@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
29728 "1" (0),
29729 "D" ((long) &opt),
29730 "S" (&pci_indirect),
29731- "m" (opt)
29732+ "m" (opt),
29733+ "r" (__PCIBIOS_DS)
29734 : "memory");
29735 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
29736 if (ret & 0xff00)
29737@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
29738 {
29739 int ret;
29740
29741- __asm__("lcall *(%%esi); cld\n\t"
29742+ __asm__("movw %w5, %%ds\n\t"
29743+ "lcall *%%ss:(%%esi); cld\n\t"
29744+ "push %%ss\n\t"
29745+ "pop %%ds\n"
29746 "jc 1f\n\t"
29747 "xor %%ah, %%ah\n"
29748 "1:"
29749@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
29750 : "0" (PCIBIOS_SET_PCI_HW_INT),
29751 "b" ((dev->bus->number << 8) | dev->devfn),
29752 "c" ((irq << 8) | (pin + 10)),
29753- "S" (&pci_indirect));
29754+ "S" (&pci_indirect),
29755+ "r" (__PCIBIOS_DS));
29756 return !(ret & 0xff00);
29757 }
29758 EXPORT_SYMBOL(pcibios_set_irq_routing);
29759diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
29760index 40e4469..1ab536e 100644
29761--- a/arch/x86/platform/efi/efi_32.c
29762+++ b/arch/x86/platform/efi/efi_32.c
29763@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
29764 {
29765 struct desc_ptr gdt_descr;
29766
29767+#ifdef CONFIG_PAX_KERNEXEC
29768+ struct desc_struct d;
29769+#endif
29770+
29771 local_irq_save(efi_rt_eflags);
29772
29773 load_cr3(initial_page_table);
29774 __flush_tlb_all();
29775
29776+#ifdef CONFIG_PAX_KERNEXEC
29777+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
29778+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
29779+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
29780+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
29781+#endif
29782+
29783 gdt_descr.address = __pa(get_cpu_gdt_table(0));
29784 gdt_descr.size = GDT_SIZE - 1;
29785 load_gdt(&gdt_descr);
29786@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
29787 {
29788 struct desc_ptr gdt_descr;
29789
29790+#ifdef CONFIG_PAX_KERNEXEC
29791+ struct desc_struct d;
29792+
29793+ memset(&d, 0, sizeof d);
29794+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
29795+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
29796+#endif
29797+
29798 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
29799 gdt_descr.size = GDT_SIZE - 1;
29800 load_gdt(&gdt_descr);
29801diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
29802index fbe66e6..eae5e38 100644
29803--- a/arch/x86/platform/efi/efi_stub_32.S
29804+++ b/arch/x86/platform/efi/efi_stub_32.S
29805@@ -6,7 +6,9 @@
29806 */
29807
29808 #include <linux/linkage.h>
29809+#include <linux/init.h>
29810 #include <asm/page_types.h>
29811+#include <asm/segment.h>
29812
29813 /*
29814 * efi_call_phys(void *, ...) is a function with variable parameters.
29815@@ -20,7 +22,7 @@
29816 * service functions will comply with gcc calling convention, too.
29817 */
29818
29819-.text
29820+__INIT
29821 ENTRY(efi_call_phys)
29822 /*
29823 * 0. The function can only be called in Linux kernel. So CS has been
29824@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
29825 * The mapping of lower virtual memory has been created in prelog and
29826 * epilog.
29827 */
29828- movl $1f, %edx
29829- subl $__PAGE_OFFSET, %edx
29830- jmp *%edx
29831+#ifdef CONFIG_PAX_KERNEXEC
29832+ movl $(__KERNEXEC_EFI_DS), %edx
29833+ mov %edx, %ds
29834+ mov %edx, %es
29835+ mov %edx, %ss
29836+ addl $2f,(1f)
29837+ ljmp *(1f)
29838+
29839+__INITDATA
29840+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
29841+.previous
29842+
29843+2:
29844+ subl $2b,(1b)
29845+#else
29846+ jmp 1f-__PAGE_OFFSET
29847 1:
29848+#endif
29849
29850 /*
29851 * 2. Now on the top of stack is the return
29852@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
29853 * parameter 2, ..., param n. To make things easy, we save the return
29854 * address of efi_call_phys in a global variable.
29855 */
29856- popl %edx
29857- movl %edx, saved_return_addr
29858- /* get the function pointer into ECX*/
29859- popl %ecx
29860- movl %ecx, efi_rt_function_ptr
29861- movl $2f, %edx
29862- subl $__PAGE_OFFSET, %edx
29863- pushl %edx
29864+ popl (saved_return_addr)
29865+ popl (efi_rt_function_ptr)
29866
29867 /*
29868 * 3. Clear PG bit in %CR0.
29869@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
29870 /*
29871 * 5. Call the physical function.
29872 */
29873- jmp *%ecx
29874+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
29875
29876-2:
29877 /*
29878 * 6. After EFI runtime service returns, control will return to
29879 * following instruction. We'd better readjust stack pointer first.
29880@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
29881 movl %cr0, %edx
29882 orl $0x80000000, %edx
29883 movl %edx, %cr0
29884- jmp 1f
29885-1:
29886+
29887 /*
29888 * 8. Now restore the virtual mode from flat mode by
29889 * adding EIP with PAGE_OFFSET.
29890 */
29891- movl $1f, %edx
29892- jmp *%edx
29893+#ifdef CONFIG_PAX_KERNEXEC
29894+ movl $(__KERNEL_DS), %edx
29895+ mov %edx, %ds
29896+ mov %edx, %es
29897+ mov %edx, %ss
29898+ ljmp $(__KERNEL_CS),$1f
29899+#else
29900+ jmp 1f+__PAGE_OFFSET
29901+#endif
29902 1:
29903
29904 /*
29905 * 9. Balance the stack. And because EAX contain the return value,
29906 * we'd better not clobber it.
29907 */
29908- leal efi_rt_function_ptr, %edx
29909- movl (%edx), %ecx
29910- pushl %ecx
29911+ pushl (efi_rt_function_ptr)
29912
29913 /*
29914- * 10. Push the saved return address onto the stack and return.
29915+ * 10. Return to the saved return address.
29916 */
29917- leal saved_return_addr, %edx
29918- movl (%edx), %ecx
29919- pushl %ecx
29920- ret
29921+ jmpl *(saved_return_addr)
29922 ENDPROC(efi_call_phys)
29923 .previous
29924
29925-.data
29926+__INITDATA
29927 saved_return_addr:
29928 .long 0
29929 efi_rt_function_ptr:
29930diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
29931index 4c07cca..2c8427d 100644
29932--- a/arch/x86/platform/efi/efi_stub_64.S
29933+++ b/arch/x86/platform/efi/efi_stub_64.S
29934@@ -7,6 +7,7 @@
29935 */
29936
29937 #include <linux/linkage.h>
29938+#include <asm/alternative-asm.h>
29939
29940 #define SAVE_XMM \
29941 mov %rsp, %rax; \
29942@@ -40,6 +41,7 @@ ENTRY(efi_call0)
29943 call *%rdi
29944 addq $32, %rsp
29945 RESTORE_XMM
29946+ pax_force_retaddr 0, 1
29947 ret
29948 ENDPROC(efi_call0)
29949
29950@@ -50,6 +52,7 @@ ENTRY(efi_call1)
29951 call *%rdi
29952 addq $32, %rsp
29953 RESTORE_XMM
29954+ pax_force_retaddr 0, 1
29955 ret
29956 ENDPROC(efi_call1)
29957
29958@@ -60,6 +63,7 @@ ENTRY(efi_call2)
29959 call *%rdi
29960 addq $32, %rsp
29961 RESTORE_XMM
29962+ pax_force_retaddr 0, 1
29963 ret
29964 ENDPROC(efi_call2)
29965
29966@@ -71,6 +75,7 @@ ENTRY(efi_call3)
29967 call *%rdi
29968 addq $32, %rsp
29969 RESTORE_XMM
29970+ pax_force_retaddr 0, 1
29971 ret
29972 ENDPROC(efi_call3)
29973
29974@@ -83,6 +88,7 @@ ENTRY(efi_call4)
29975 call *%rdi
29976 addq $32, %rsp
29977 RESTORE_XMM
29978+ pax_force_retaddr 0, 1
29979 ret
29980 ENDPROC(efi_call4)
29981
29982@@ -96,6 +102,7 @@ ENTRY(efi_call5)
29983 call *%rdi
29984 addq $48, %rsp
29985 RESTORE_XMM
29986+ pax_force_retaddr 0, 1
29987 ret
29988 ENDPROC(efi_call5)
29989
29990@@ -112,5 +119,6 @@ ENTRY(efi_call6)
29991 call *%rdi
29992 addq $48, %rsp
29993 RESTORE_XMM
29994+ pax_force_retaddr 0, 1
29995 ret
29996 ENDPROC(efi_call6)
29997diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
29998index e31bcd8..f12dc46 100644
29999--- a/arch/x86/platform/mrst/mrst.c
30000+++ b/arch/x86/platform/mrst/mrst.c
30001@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
30002 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
30003 int sfi_mrtc_num;
30004
30005-static void mrst_power_off(void)
30006+static __noreturn void mrst_power_off(void)
30007 {
30008+ BUG();
30009 }
30010
30011-static void mrst_reboot(void)
30012+static __noreturn void mrst_reboot(void)
30013 {
30014 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
30015+ BUG();
30016 }
30017
30018 /* parse all the mtimer info to a static mtimer array */
30019diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
30020index d6ee929..3637cb5 100644
30021--- a/arch/x86/platform/olpc/olpc_dt.c
30022+++ b/arch/x86/platform/olpc/olpc_dt.c
30023@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
30024 return res;
30025 }
30026
30027-static struct of_pdt_ops prom_olpc_ops __initdata = {
30028+static struct of_pdt_ops prom_olpc_ops __initconst = {
30029 .nextprop = olpc_dt_nextprop,
30030 .getproplen = olpc_dt_getproplen,
30031 .getproperty = olpc_dt_getproperty,
30032diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
30033index 120cee1..b2db75a 100644
30034--- a/arch/x86/power/cpu.c
30035+++ b/arch/x86/power/cpu.c
30036@@ -133,7 +133,7 @@ static void do_fpu_end(void)
30037 static void fix_processor_context(void)
30038 {
30039 int cpu = smp_processor_id();
30040- struct tss_struct *t = &per_cpu(init_tss, cpu);
30041+ struct tss_struct *t = init_tss + cpu;
30042
30043 set_tss_desc(cpu, t); /*
30044 * This just modifies memory; should not be
30045@@ -143,8 +143,6 @@ static void fix_processor_context(void)
30046 */
30047
30048 #ifdef CONFIG_X86_64
30049- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
30050-
30051 syscall_init(); /* This sets MSR_*STAR and related */
30052 #endif
30053 load_TR_desc(); /* This does ltr */
30054diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
30055index cbca565..bae7133 100644
30056--- a/arch/x86/realmode/init.c
30057+++ b/arch/x86/realmode/init.c
30058@@ -62,7 +62,13 @@ void __init setup_real_mode(void)
30059 __va(real_mode_header->trampoline_header);
30060
30061 #ifdef CONFIG_X86_32
30062- trampoline_header->start = __pa(startup_32_smp);
30063+ trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
30064+
30065+#ifdef CONFIG_PAX_KERNEXEC
30066+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
30067+#endif
30068+
30069+ trampoline_header->boot_cs = __BOOT_CS;
30070 trampoline_header->gdt_limit = __BOOT_DS + 7;
30071 trampoline_header->gdt_base = __pa(boot_gdt);
30072 #else
30073diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
30074index 8869287..d577672 100644
30075--- a/arch/x86/realmode/rm/Makefile
30076+++ b/arch/x86/realmode/rm/Makefile
30077@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
30078 $(call cc-option, -fno-unit-at-a-time)) \
30079 $(call cc-option, -fno-stack-protector) \
30080 $(call cc-option, -mpreferred-stack-boundary=2)
30081+ifdef CONSTIFY_PLUGIN
30082+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
30083+endif
30084 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
30085 GCOV_PROFILE := n
30086diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
30087index a28221d..93c40f1 100644
30088--- a/arch/x86/realmode/rm/header.S
30089+++ b/arch/x86/realmode/rm/header.S
30090@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
30091 #endif
30092 /* APM/BIOS reboot */
30093 .long pa_machine_real_restart_asm
30094-#ifdef CONFIG_X86_64
30095+#ifdef CONFIG_X86_32
30096+ .long __KERNEL_CS
30097+#else
30098 .long __KERNEL32_CS
30099 #endif
30100 END(real_mode_header)
30101diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
30102index c1b2791..f9e31c7 100644
30103--- a/arch/x86/realmode/rm/trampoline_32.S
30104+++ b/arch/x86/realmode/rm/trampoline_32.S
30105@@ -25,6 +25,12 @@
30106 #include <asm/page_types.h>
30107 #include "realmode.h"
30108
30109+#ifdef CONFIG_PAX_KERNEXEC
30110+#define ta(X) (X)
30111+#else
30112+#define ta(X) (pa_ ## X)
30113+#endif
30114+
30115 .text
30116 .code16
30117
30118@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
30119
30120 cli # We should be safe anyway
30121
30122- movl tr_start, %eax # where we need to go
30123-
30124 movl $0xA5A5A5A5, trampoline_status
30125 # write marker for master knows we're running
30126
30127@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
30128 movw $1, %dx # protected mode (PE) bit
30129 lmsw %dx # into protected mode
30130
30131- ljmpl $__BOOT_CS, $pa_startup_32
30132+ ljmpl *(trampoline_header)
30133
30134 .section ".text32","ax"
30135 .code32
30136@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
30137 .balign 8
30138 GLOBAL(trampoline_header)
30139 tr_start: .space 4
30140- tr_gdt_pad: .space 2
30141+ tr_boot_cs: .space 2
30142 tr_gdt: .space 6
30143 END(trampoline_header)
30144
30145diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
30146index bb360dc..3e5945f 100644
30147--- a/arch/x86/realmode/rm/trampoline_64.S
30148+++ b/arch/x86/realmode/rm/trampoline_64.S
30149@@ -107,7 +107,7 @@ ENTRY(startup_32)
30150 wrmsr
30151
30152 # Enable paging and in turn activate Long Mode
30153- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
30154+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
30155 movl %eax, %cr0
30156
30157 /*
30158diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
30159index 79d67bd..c7e1b90 100644
30160--- a/arch/x86/tools/relocs.c
30161+++ b/arch/x86/tools/relocs.c
30162@@ -12,10 +12,13 @@
30163 #include <regex.h>
30164 #include <tools/le_byteshift.h>
30165
30166+#include "../../../include/generated/autoconf.h"
30167+
30168 static void die(char *fmt, ...);
30169
30170 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
30171 static Elf32_Ehdr ehdr;
30172+static Elf32_Phdr *phdr;
30173 static unsigned long reloc_count, reloc_idx;
30174 static unsigned long *relocs;
30175 static unsigned long reloc16_count, reloc16_idx;
30176@@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
30177 }
30178 }
30179
30180+static void read_phdrs(FILE *fp)
30181+{
30182+ unsigned int i;
30183+
30184+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
30185+ if (!phdr) {
30186+ die("Unable to allocate %d program headers\n",
30187+ ehdr.e_phnum);
30188+ }
30189+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
30190+ die("Seek to %d failed: %s\n",
30191+ ehdr.e_phoff, strerror(errno));
30192+ }
30193+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
30194+ die("Cannot read ELF program headers: %s\n",
30195+ strerror(errno));
30196+ }
30197+ for(i = 0; i < ehdr.e_phnum; i++) {
30198+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
30199+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
30200+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
30201+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
30202+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
30203+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
30204+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
30205+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
30206+ }
30207+
30208+}
30209+
30210 static void read_shdrs(FILE *fp)
30211 {
30212- int i;
30213+ unsigned int i;
30214 Elf32_Shdr shdr;
30215
30216 secs = calloc(ehdr.e_shnum, sizeof(struct section));
30217@@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
30218
30219 static void read_strtabs(FILE *fp)
30220 {
30221- int i;
30222+ unsigned int i;
30223 for (i = 0; i < ehdr.e_shnum; i++) {
30224 struct section *sec = &secs[i];
30225 if (sec->shdr.sh_type != SHT_STRTAB) {
30226@@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
30227
30228 static void read_symtabs(FILE *fp)
30229 {
30230- int i,j;
30231+ unsigned int i,j;
30232 for (i = 0; i < ehdr.e_shnum; i++) {
30233 struct section *sec = &secs[i];
30234 if (sec->shdr.sh_type != SHT_SYMTAB) {
30235@@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
30236 }
30237
30238
30239-static void read_relocs(FILE *fp)
30240+static void read_relocs(FILE *fp, int use_real_mode)
30241 {
30242- int i,j;
30243+ unsigned int i,j;
30244+ uint32_t base;
30245+
30246 for (i = 0; i < ehdr.e_shnum; i++) {
30247 struct section *sec = &secs[i];
30248 if (sec->shdr.sh_type != SHT_REL) {
30249@@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
30250 die("Cannot read symbol table: %s\n",
30251 strerror(errno));
30252 }
30253+ base = 0;
30254+
30255+#ifdef CONFIG_X86_32
30256+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
30257+ if (phdr[j].p_type != PT_LOAD )
30258+ continue;
30259+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
30260+ continue;
30261+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
30262+ break;
30263+ }
30264+#endif
30265+
30266 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
30267 Elf32_Rel *rel = &sec->reltab[j];
30268- rel->r_offset = elf32_to_cpu(rel->r_offset);
30269+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
30270 rel->r_info = elf32_to_cpu(rel->r_info);
30271 }
30272 }
30273@@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
30274
30275 static void print_absolute_symbols(void)
30276 {
30277- int i;
30278+ unsigned int i;
30279 printf("Absolute symbols\n");
30280 printf(" Num: Value Size Type Bind Visibility Name\n");
30281 for (i = 0; i < ehdr.e_shnum; i++) {
30282 struct section *sec = &secs[i];
30283 char *sym_strtab;
30284- int j;
30285+ unsigned int j;
30286
30287 if (sec->shdr.sh_type != SHT_SYMTAB) {
30288 continue;
30289@@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
30290
30291 static void print_absolute_relocs(void)
30292 {
30293- int i, printed = 0;
30294+ unsigned int i, printed = 0;
30295
30296 for (i = 0; i < ehdr.e_shnum; i++) {
30297 struct section *sec = &secs[i];
30298 struct section *sec_applies, *sec_symtab;
30299 char *sym_strtab;
30300 Elf32_Sym *sh_symtab;
30301- int j;
30302+ unsigned int j;
30303 if (sec->shdr.sh_type != SHT_REL) {
30304 continue;
30305 }
30306@@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
30307 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
30308 int use_real_mode)
30309 {
30310- int i;
30311+ unsigned int i;
30312 /* Walk through the relocations */
30313 for (i = 0; i < ehdr.e_shnum; i++) {
30314 char *sym_strtab;
30315 Elf32_Sym *sh_symtab;
30316 struct section *sec_applies, *sec_symtab;
30317- int j;
30318+ unsigned int j;
30319 struct section *sec = &secs[i];
30320
30321 if (sec->shdr.sh_type != SHT_REL) {
30322@@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
30323 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
30324 r_type = ELF32_R_TYPE(rel->r_info);
30325
30326+ if (!use_real_mode) {
30327+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
30328+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
30329+ continue;
30330+
30331+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
30332+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
30333+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
30334+ continue;
30335+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
30336+ continue;
30337+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
30338+ continue;
30339+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
30340+ continue;
30341+#endif
30342+ }
30343+
30344 shn_abs = sym->st_shndx == SHN_ABS;
30345
30346 switch (r_type) {
30347@@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
30348
30349 static void emit_relocs(int as_text, int use_real_mode)
30350 {
30351- int i;
30352+ unsigned int i;
30353 /* Count how many relocations I have and allocate space for them. */
30354 reloc_count = 0;
30355 walk_relocs(count_reloc, use_real_mode);
30356@@ -808,10 +874,11 @@ int main(int argc, char **argv)
30357 fname, strerror(errno));
30358 }
30359 read_ehdr(fp);
30360+ read_phdrs(fp);
30361 read_shdrs(fp);
30362 read_strtabs(fp);
30363 read_symtabs(fp);
30364- read_relocs(fp);
30365+ read_relocs(fp, use_real_mode);
30366 if (show_absolute_syms) {
30367 print_absolute_symbols();
30368 goto out;
30369diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
30370index fd14be1..e3c79c0 100644
30371--- a/arch/x86/vdso/Makefile
30372+++ b/arch/x86/vdso/Makefile
30373@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
30374 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
30375 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
30376
30377-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
30378+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
30379 GCOV_PROFILE := n
30380
30381 #
30382diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
30383index 0faad64..39ef157 100644
30384--- a/arch/x86/vdso/vdso32-setup.c
30385+++ b/arch/x86/vdso/vdso32-setup.c
30386@@ -25,6 +25,7 @@
30387 #include <asm/tlbflush.h>
30388 #include <asm/vdso.h>
30389 #include <asm/proto.h>
30390+#include <asm/mman.h>
30391
30392 enum {
30393 VDSO_DISABLED = 0,
30394@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
30395 void enable_sep_cpu(void)
30396 {
30397 int cpu = get_cpu();
30398- struct tss_struct *tss = &per_cpu(init_tss, cpu);
30399+ struct tss_struct *tss = init_tss + cpu;
30400
30401 if (!boot_cpu_has(X86_FEATURE_SEP)) {
30402 put_cpu();
30403@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
30404 gate_vma.vm_start = FIXADDR_USER_START;
30405 gate_vma.vm_end = FIXADDR_USER_END;
30406 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
30407- gate_vma.vm_page_prot = __P101;
30408+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
30409
30410 return 0;
30411 }
30412@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
30413 if (compat)
30414 addr = VDSO_HIGH_BASE;
30415 else {
30416- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
30417+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
30418 if (IS_ERR_VALUE(addr)) {
30419 ret = addr;
30420 goto up_fail;
30421 }
30422 }
30423
30424- current->mm->context.vdso = (void *)addr;
30425+ current->mm->context.vdso = addr;
30426
30427 if (compat_uses_vma || !compat) {
30428 /*
30429@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
30430 }
30431
30432 current_thread_info()->sysenter_return =
30433- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
30434+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
30435
30436 up_fail:
30437 if (ret)
30438- current->mm->context.vdso = NULL;
30439+ current->mm->context.vdso = 0;
30440
30441 up_write(&mm->mmap_sem);
30442
30443@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
30444
30445 const char *arch_vma_name(struct vm_area_struct *vma)
30446 {
30447- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
30448+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
30449 return "[vdso]";
30450+
30451+#ifdef CONFIG_PAX_SEGMEXEC
30452+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
30453+ return "[vdso]";
30454+#endif
30455+
30456 return NULL;
30457 }
30458
30459@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
30460 * Check to see if the corresponding task was created in compat vdso
30461 * mode.
30462 */
30463- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
30464+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
30465 return &gate_vma;
30466 return NULL;
30467 }
30468diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
30469index 431e875..cbb23f3 100644
30470--- a/arch/x86/vdso/vma.c
30471+++ b/arch/x86/vdso/vma.c
30472@@ -16,8 +16,6 @@
30473 #include <asm/vdso.h>
30474 #include <asm/page.h>
30475
30476-unsigned int __read_mostly vdso_enabled = 1;
30477-
30478 extern char vdso_start[], vdso_end[];
30479 extern unsigned short vdso_sync_cpuid;
30480
30481@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
30482 * unaligned here as a result of stack start randomization.
30483 */
30484 addr = PAGE_ALIGN(addr);
30485- addr = align_vdso_addr(addr);
30486
30487 return addr;
30488 }
30489@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
30490 unsigned size)
30491 {
30492 struct mm_struct *mm = current->mm;
30493- unsigned long addr;
30494+ unsigned long addr = 0;
30495 int ret;
30496
30497- if (!vdso_enabled)
30498- return 0;
30499-
30500 down_write(&mm->mmap_sem);
30501+
30502+#ifdef CONFIG_PAX_RANDMMAP
30503+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
30504+#endif
30505+
30506 addr = vdso_addr(mm->start_stack, size);
30507+ addr = align_vdso_addr(addr);
30508 addr = get_unmapped_area(NULL, addr, size, 0, 0);
30509 if (IS_ERR_VALUE(addr)) {
30510 ret = addr;
30511 goto up_fail;
30512 }
30513
30514- current->mm->context.vdso = (void *)addr;
30515+ mm->context.vdso = addr;
30516
30517 ret = install_special_mapping(mm, addr, size,
30518 VM_READ|VM_EXEC|
30519 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
30520 pages);
30521- if (ret) {
30522- current->mm->context.vdso = NULL;
30523- goto up_fail;
30524- }
30525+ if (ret)
30526+ mm->context.vdso = 0;
30527
30528 up_fail:
30529 up_write(&mm->mmap_sem);
30530@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
30531 vdsox32_size);
30532 }
30533 #endif
30534-
30535-static __init int vdso_setup(char *s)
30536-{
30537- vdso_enabled = simple_strtoul(s, NULL, 0);
30538- return 0;
30539-}
30540-__setup("vdso=", vdso_setup);
30541diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
30542index e014092..c76ab69 100644
30543--- a/arch/x86/xen/enlighten.c
30544+++ b/arch/x86/xen/enlighten.c
30545@@ -99,8 +99,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
30546
30547 struct shared_info xen_dummy_shared_info;
30548
30549-void *xen_initial_gdt;
30550-
30551 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
30552 __read_mostly int xen_have_vector_callback;
30553 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
30554@@ -495,8 +493,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
30555 {
30556 unsigned long va = dtr->address;
30557 unsigned int size = dtr->size + 1;
30558- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
30559- unsigned long frames[pages];
30560+ unsigned long frames[65536 / PAGE_SIZE];
30561 int f;
30562
30563 /*
30564@@ -544,8 +541,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
30565 {
30566 unsigned long va = dtr->address;
30567 unsigned int size = dtr->size + 1;
30568- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
30569- unsigned long frames[pages];
30570+ unsigned long frames[65536 / PAGE_SIZE];
30571 int f;
30572
30573 /*
30574@@ -938,7 +934,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
30575 return 0;
30576 }
30577
30578-static void set_xen_basic_apic_ops(void)
30579+static void __init set_xen_basic_apic_ops(void)
30580 {
30581 apic->read = xen_apic_read;
30582 apic->write = xen_apic_write;
30583@@ -1244,30 +1240,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
30584 #endif
30585 };
30586
30587-static void xen_reboot(int reason)
30588+static __noreturn void xen_reboot(int reason)
30589 {
30590 struct sched_shutdown r = { .reason = reason };
30591
30592- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
30593- BUG();
30594+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
30595+ BUG();
30596 }
30597
30598-static void xen_restart(char *msg)
30599+static __noreturn void xen_restart(char *msg)
30600 {
30601 xen_reboot(SHUTDOWN_reboot);
30602 }
30603
30604-static void xen_emergency_restart(void)
30605+static __noreturn void xen_emergency_restart(void)
30606 {
30607 xen_reboot(SHUTDOWN_reboot);
30608 }
30609
30610-static void xen_machine_halt(void)
30611+static __noreturn void xen_machine_halt(void)
30612 {
30613 xen_reboot(SHUTDOWN_poweroff);
30614 }
30615
30616-static void xen_machine_power_off(void)
30617+static __noreturn void xen_machine_power_off(void)
30618 {
30619 if (pm_power_off)
30620 pm_power_off();
30621@@ -1369,7 +1365,17 @@ asmlinkage void __init xen_start_kernel(void)
30622 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
30623
30624 /* Work out if we support NX */
30625- x86_configure_nx();
30626+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
30627+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
30628+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
30629+ unsigned l, h;
30630+
30631+ __supported_pte_mask |= _PAGE_NX;
30632+ rdmsr(MSR_EFER, l, h);
30633+ l |= EFER_NX;
30634+ wrmsr(MSR_EFER, l, h);
30635+ }
30636+#endif
30637
30638 xen_setup_features();
30639
30640@@ -1398,14 +1404,7 @@ asmlinkage void __init xen_start_kernel(void)
30641 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
30642 }
30643
30644- machine_ops = xen_machine_ops;
30645-
30646- /*
30647- * The only reliable way to retain the initial address of the
30648- * percpu gdt_page is to remember it here, so we can go and
30649- * mark it RW later, when the initial percpu area is freed.
30650- */
30651- xen_initial_gdt = &per_cpu(gdt_page, 0);
30652+ memcpy((void *)&machine_ops, &xen_machine_ops, sizeof machine_ops);
30653
30654 xen_smp_init();
30655
30656@@ -1590,7 +1589,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
30657 return NOTIFY_OK;
30658 }
30659
30660-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
30661+static struct notifier_block xen_hvm_cpu_notifier = {
30662 .notifier_call = xen_hvm_cpu_notify,
30663 };
30664
30665diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
30666index 01de35c..0bda07b 100644
30667--- a/arch/x86/xen/mmu.c
30668+++ b/arch/x86/xen/mmu.c
30669@@ -1881,6 +1881,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
30670 /* L3_k[510] -> level2_kernel_pgt
30671 * L3_i[511] -> level2_fixmap_pgt */
30672 convert_pfn_mfn(level3_kernel_pgt);
30673+ convert_pfn_mfn(level3_vmalloc_start_pgt);
30674+ convert_pfn_mfn(level3_vmalloc_end_pgt);
30675+ convert_pfn_mfn(level3_vmemmap_pgt);
30676
30677 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
30678 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
30679@@ -1910,8 +1913,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
30680 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
30681 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
30682 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
30683+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
30684+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
30685+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
30686 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
30687 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
30688+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
30689 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
30690 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
30691
30692@@ -2097,6 +2104,7 @@ static void __init xen_post_allocator_init(void)
30693 pv_mmu_ops.set_pud = xen_set_pud;
30694 #if PAGETABLE_LEVELS == 4
30695 pv_mmu_ops.set_pgd = xen_set_pgd;
30696+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
30697 #endif
30698
30699 /* This will work as long as patching hasn't happened yet
30700@@ -2178,6 +2186,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
30701 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
30702 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
30703 .set_pgd = xen_set_pgd_hyper,
30704+ .set_pgd_batched = xen_set_pgd_hyper,
30705
30706 .alloc_pud = xen_alloc_pmd_init,
30707 .release_pud = xen_release_pmd_init,
30708diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
30709index 34bc4ce..c34aa24 100644
30710--- a/arch/x86/xen/smp.c
30711+++ b/arch/x86/xen/smp.c
30712@@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
30713 {
30714 BUG_ON(smp_processor_id() != 0);
30715 native_smp_prepare_boot_cpu();
30716-
30717- /* We've switched to the "real" per-cpu gdt, so make sure the
30718- old memory can be recycled */
30719- make_lowmem_page_readwrite(xen_initial_gdt);
30720-
30721 xen_filter_cpu_maps();
30722 xen_setup_vcpu_info_placement();
30723 }
30724@@ -300,12 +295,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
30725 gdt = get_cpu_gdt_table(cpu);
30726
30727 ctxt->flags = VGCF_IN_KERNEL;
30728- ctxt->user_regs.ds = __USER_DS;
30729- ctxt->user_regs.es = __USER_DS;
30730+ ctxt->user_regs.ds = __KERNEL_DS;
30731+ ctxt->user_regs.es = __KERNEL_DS;
30732 ctxt->user_regs.ss = __KERNEL_DS;
30733 #ifdef CONFIG_X86_32
30734 ctxt->user_regs.fs = __KERNEL_PERCPU;
30735- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
30736+ savesegment(gs, ctxt->user_regs.gs);
30737 #else
30738 ctxt->gs_base_kernel = per_cpu_offset(cpu);
30739 #endif
30740@@ -355,13 +350,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
30741 int rc;
30742
30743 per_cpu(current_task, cpu) = idle;
30744+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
30745 #ifdef CONFIG_X86_32
30746 irq_ctx_init(cpu);
30747 #else
30748 clear_tsk_thread_flag(idle, TIF_FORK);
30749- per_cpu(kernel_stack, cpu) =
30750- (unsigned long)task_stack_page(idle) -
30751- KERNEL_STACK_OFFSET + THREAD_SIZE;
30752+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
30753 #endif
30754 xen_setup_runstate_info(cpu);
30755 xen_setup_timer(cpu);
30756@@ -630,7 +624,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
30757
30758 void __init xen_smp_init(void)
30759 {
30760- smp_ops = xen_smp_ops;
30761+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
30762 xen_fill_possible_map();
30763 xen_init_spinlocks();
30764 }
30765diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
30766index 33ca6e4..0ded929 100644
30767--- a/arch/x86/xen/xen-asm_32.S
30768+++ b/arch/x86/xen/xen-asm_32.S
30769@@ -84,14 +84,14 @@ ENTRY(xen_iret)
30770 ESP_OFFSET=4 # bytes pushed onto stack
30771
30772 /*
30773- * Store vcpu_info pointer for easy access. Do it this way to
30774- * avoid having to reload %fs
30775+ * Store vcpu_info pointer for easy access.
30776 */
30777 #ifdef CONFIG_SMP
30778- GET_THREAD_INFO(%eax)
30779- movl %ss:TI_cpu(%eax), %eax
30780- movl %ss:__per_cpu_offset(,%eax,4), %eax
30781- mov %ss:xen_vcpu(%eax), %eax
30782+ push %fs
30783+ mov $(__KERNEL_PERCPU), %eax
30784+ mov %eax, %fs
30785+ mov PER_CPU_VAR(xen_vcpu), %eax
30786+ pop %fs
30787 #else
30788 movl %ss:xen_vcpu, %eax
30789 #endif
30790diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
30791index 7faed58..ba4427c 100644
30792--- a/arch/x86/xen/xen-head.S
30793+++ b/arch/x86/xen/xen-head.S
30794@@ -19,6 +19,17 @@ ENTRY(startup_xen)
30795 #ifdef CONFIG_X86_32
30796 mov %esi,xen_start_info
30797 mov $init_thread_union+THREAD_SIZE,%esp
30798+#ifdef CONFIG_SMP
30799+ movl $cpu_gdt_table,%edi
30800+ movl $__per_cpu_load,%eax
30801+ movw %ax,__KERNEL_PERCPU + 2(%edi)
30802+ rorl $16,%eax
30803+ movb %al,__KERNEL_PERCPU + 4(%edi)
30804+ movb %ah,__KERNEL_PERCPU + 7(%edi)
30805+ movl $__per_cpu_end - 1,%eax
30806+ subl $__per_cpu_start,%eax
30807+ movw %ax,__KERNEL_PERCPU + 0(%edi)
30808+#endif
30809 #else
30810 mov %rsi,xen_start_info
30811 mov $init_thread_union+THREAD_SIZE,%rsp
30812diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
30813index a95b417..b6dbd0b 100644
30814--- a/arch/x86/xen/xen-ops.h
30815+++ b/arch/x86/xen/xen-ops.h
30816@@ -10,8 +10,6 @@
30817 extern const char xen_hypervisor_callback[];
30818 extern const char xen_failsafe_callback[];
30819
30820-extern void *xen_initial_gdt;
30821-
30822 struct trap_info;
30823 void xen_copy_trap_info(struct trap_info *traps);
30824
30825diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
30826index 525bd3d..ef888b1 100644
30827--- a/arch/xtensa/variants/dc232b/include/variant/core.h
30828+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
30829@@ -119,9 +119,9 @@
30830 ----------------------------------------------------------------------*/
30831
30832 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
30833-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
30834 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
30835 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
30836+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
30837
30838 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
30839 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
30840diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
30841index 2f33760..835e50a 100644
30842--- a/arch/xtensa/variants/fsf/include/variant/core.h
30843+++ b/arch/xtensa/variants/fsf/include/variant/core.h
30844@@ -11,6 +11,7 @@
30845 #ifndef _XTENSA_CORE_H
30846 #define _XTENSA_CORE_H
30847
30848+#include <linux/const.h>
30849
30850 /****************************************************************************
30851 Parameters Useful for Any Code, USER or PRIVILEGED
30852@@ -112,9 +113,9 @@
30853 ----------------------------------------------------------------------*/
30854
30855 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
30856-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
30857 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
30858 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
30859+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
30860
30861 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
30862 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
30863diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
30864index af00795..2bb8105 100644
30865--- a/arch/xtensa/variants/s6000/include/variant/core.h
30866+++ b/arch/xtensa/variants/s6000/include/variant/core.h
30867@@ -11,6 +11,7 @@
30868 #ifndef _XTENSA_CORE_CONFIGURATION_H
30869 #define _XTENSA_CORE_CONFIGURATION_H
30870
30871+#include <linux/const.h>
30872
30873 /****************************************************************************
30874 Parameters Useful for Any Code, USER or PRIVILEGED
30875@@ -118,9 +119,9 @@
30876 ----------------------------------------------------------------------*/
30877
30878 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
30879-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
30880 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
30881 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
30882+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
30883
30884 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
30885 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
30886diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
30887index 58916af..eb9dbcf6 100644
30888--- a/block/blk-iopoll.c
30889+++ b/block/blk-iopoll.c
30890@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
30891 }
30892 EXPORT_SYMBOL(blk_iopoll_complete);
30893
30894-static void blk_iopoll_softirq(struct softirq_action *h)
30895+static void blk_iopoll_softirq(void)
30896 {
30897 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
30898 int rearm = 0, budget = blk_iopoll_budget;
30899@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
30900 return NOTIFY_OK;
30901 }
30902
30903-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
30904+static struct notifier_block blk_iopoll_cpu_notifier = {
30905 .notifier_call = blk_iopoll_cpu_notify,
30906 };
30907
30908diff --git a/block/blk-map.c b/block/blk-map.c
30909index 623e1cd..ca1e109 100644
30910--- a/block/blk-map.c
30911+++ b/block/blk-map.c
30912@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
30913 if (!len || !kbuf)
30914 return -EINVAL;
30915
30916- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
30917+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
30918 if (do_copy)
30919 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
30920 else
30921diff --git a/block/blk-softirq.c b/block/blk-softirq.c
30922index 467c8de..f3628c5 100644
30923--- a/block/blk-softirq.c
30924+++ b/block/blk-softirq.c
30925@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
30926 * Softirq action handler - move entries to local list and loop over them
30927 * while passing them to the queue registered handler.
30928 */
30929-static void blk_done_softirq(struct softirq_action *h)
30930+static void blk_done_softirq(void)
30931 {
30932 struct list_head *cpu_list, local_list;
30933
30934@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
30935 return NOTIFY_OK;
30936 }
30937
30938-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
30939+static struct notifier_block blk_cpu_notifier = {
30940 .notifier_call = blk_cpu_notify,
30941 };
30942
30943diff --git a/block/bsg.c b/block/bsg.c
30944index ff64ae3..593560c 100644
30945--- a/block/bsg.c
30946+++ b/block/bsg.c
30947@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
30948 struct sg_io_v4 *hdr, struct bsg_device *bd,
30949 fmode_t has_write_perm)
30950 {
30951+ unsigned char tmpcmd[sizeof(rq->__cmd)];
30952+ unsigned char *cmdptr;
30953+
30954 if (hdr->request_len > BLK_MAX_CDB) {
30955 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
30956 if (!rq->cmd)
30957 return -ENOMEM;
30958- }
30959+ cmdptr = rq->cmd;
30960+ } else
30961+ cmdptr = tmpcmd;
30962
30963- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
30964+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
30965 hdr->request_len))
30966 return -EFAULT;
30967
30968+ if (cmdptr != rq->cmd)
30969+ memcpy(rq->cmd, cmdptr, hdr->request_len);
30970+
30971 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
30972 if (blk_verify_command(rq->cmd, has_write_perm))
30973 return -EPERM;
30974diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
30975index 7c668c8..db3521c 100644
30976--- a/block/compat_ioctl.c
30977+++ b/block/compat_ioctl.c
30978@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
30979 err |= __get_user(f->spec1, &uf->spec1);
30980 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
30981 err |= __get_user(name, &uf->name);
30982- f->name = compat_ptr(name);
30983+ f->name = (void __force_kernel *)compat_ptr(name);
30984 if (err) {
30985 err = -EFAULT;
30986 goto out;
30987diff --git a/block/partitions/efi.c b/block/partitions/efi.c
30988index b62fb88..bdab4c4 100644
30989--- a/block/partitions/efi.c
30990+++ b/block/partitions/efi.c
30991@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
30992 if (!gpt)
30993 return NULL;
30994
30995+ if (!le32_to_cpu(gpt->num_partition_entries))
30996+ return NULL;
30997+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
30998+ if (!pte)
30999+ return NULL;
31000+
31001 count = le32_to_cpu(gpt->num_partition_entries) *
31002 le32_to_cpu(gpt->sizeof_partition_entry);
31003- if (!count)
31004- return NULL;
31005- pte = kzalloc(count, GFP_KERNEL);
31006- if (!pte)
31007- return NULL;
31008-
31009 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
31010 (u8 *) pte,
31011 count) < count) {
31012diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
31013index 9a87daa..fb17486 100644
31014--- a/block/scsi_ioctl.c
31015+++ b/block/scsi_ioctl.c
31016@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
31017 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
31018 struct sg_io_hdr *hdr, fmode_t mode)
31019 {
31020- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
31021+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31022+ unsigned char *cmdptr;
31023+
31024+ if (rq->cmd != rq->__cmd)
31025+ cmdptr = rq->cmd;
31026+ else
31027+ cmdptr = tmpcmd;
31028+
31029+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
31030 return -EFAULT;
31031+
31032+ if (cmdptr != rq->cmd)
31033+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
31034+
31035 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
31036 return -EPERM;
31037
31038@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31039 int err;
31040 unsigned int in_len, out_len, bytes, opcode, cmdlen;
31041 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
31042+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31043+ unsigned char *cmdptr;
31044
31045 if (!sic)
31046 return -EINVAL;
31047@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31048 */
31049 err = -EFAULT;
31050 rq->cmd_len = cmdlen;
31051- if (copy_from_user(rq->cmd, sic->data, cmdlen))
31052+
31053+ if (rq->cmd != rq->__cmd)
31054+ cmdptr = rq->cmd;
31055+ else
31056+ cmdptr = tmpcmd;
31057+
31058+ if (copy_from_user(cmdptr, sic->data, cmdlen))
31059 goto error;
31060
31061+ if (rq->cmd != cmdptr)
31062+ memcpy(rq->cmd, cmdptr, cmdlen);
31063+
31064 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
31065 goto error;
31066
31067diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
31068index 533de95..7d4a8d2 100644
31069--- a/crypto/ablkcipher.c
31070+++ b/crypto/ablkcipher.c
31071@@ -388,9 +388,9 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
31072 {
31073 struct crypto_report_blkcipher rblkcipher;
31074
31075- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher");
31076- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
31077- alg->cra_ablkcipher.geniv ?: "<default>");
31078+ strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
31079+ strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
31080+ sizeof(rblkcipher.geniv));
31081
31082 rblkcipher.blocksize = alg->cra_blocksize;
31083 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
31084@@ -469,9 +469,9 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
31085 {
31086 struct crypto_report_blkcipher rblkcipher;
31087
31088- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher");
31089- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
31090- alg->cra_ablkcipher.geniv ?: "<built-in>");
31091+ strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
31092+ strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
31093+ sizeof(rblkcipher.geniv));
31094
31095 rblkcipher.blocksize = alg->cra_blocksize;
31096 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
31097diff --git a/crypto/aead.c b/crypto/aead.c
31098index 0b8121e..27bc487 100644
31099--- a/crypto/aead.c
31100+++ b/crypto/aead.c
31101@@ -117,9 +117,8 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
31102 struct crypto_report_aead raead;
31103 struct aead_alg *aead = &alg->cra_aead;
31104
31105- snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead");
31106- snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s",
31107- aead->geniv ?: "<built-in>");
31108+ strncpy(raead.type, "aead", sizeof(raead.type));
31109+ strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv));
31110
31111 raead.blocksize = alg->cra_blocksize;
31112 raead.maxauthsize = aead->maxauthsize;
31113@@ -203,8 +202,8 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
31114 struct crypto_report_aead raead;
31115 struct aead_alg *aead = &alg->cra_aead;
31116
31117- snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "nivaead");
31118- snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv);
31119+ strncpy(raead.type, "nivaead", sizeof(raead.type));
31120+ strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
31121
31122 raead.blocksize = alg->cra_blocksize;
31123 raead.maxauthsize = aead->maxauthsize;
31124diff --git a/crypto/ahash.c b/crypto/ahash.c
31125index 3887856..793a27f 100644
31126--- a/crypto/ahash.c
31127+++ b/crypto/ahash.c
31128@@ -404,7 +404,7 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
31129 {
31130 struct crypto_report_hash rhash;
31131
31132- snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash");
31133+ strncpy(rhash.type, "ahash", sizeof(rhash.type));
31134
31135 rhash.blocksize = alg->cra_blocksize;
31136 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
31137diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
31138index a8d85a1..c44e014 100644
31139--- a/crypto/blkcipher.c
31140+++ b/crypto/blkcipher.c
31141@@ -499,9 +499,9 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
31142 {
31143 struct crypto_report_blkcipher rblkcipher;
31144
31145- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher");
31146- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
31147- alg->cra_blkcipher.geniv ?: "<default>");
31148+ strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
31149+ strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
31150+ sizeof(rblkcipher.geniv));
31151
31152 rblkcipher.blocksize = alg->cra_blocksize;
31153 rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
31154diff --git a/crypto/cryptd.c b/crypto/cryptd.c
31155index 7bdd61b..afec999 100644
31156--- a/crypto/cryptd.c
31157+++ b/crypto/cryptd.c
31158@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
31159
31160 struct cryptd_blkcipher_request_ctx {
31161 crypto_completion_t complete;
31162-};
31163+} __no_const;
31164
31165 struct cryptd_hash_ctx {
31166 struct crypto_shash *child;
31167@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
31168
31169 struct cryptd_aead_request_ctx {
31170 crypto_completion_t complete;
31171-};
31172+} __no_const;
31173
31174 static void cryptd_queue_worker(struct work_struct *work);
31175
31176diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
31177index 35d700a..dfd511f 100644
31178--- a/crypto/crypto_user.c
31179+++ b/crypto/crypto_user.c
31180@@ -30,6 +30,8 @@
31181
31182 #include "internal.h"
31183
31184+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
31185+
31186 static DEFINE_MUTEX(crypto_cfg_mutex);
31187
31188 /* The crypto netlink socket */
31189@@ -75,7 +77,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
31190 {
31191 struct crypto_report_cipher rcipher;
31192
31193- snprintf(rcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "cipher");
31194+ strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
31195
31196 rcipher.blocksize = alg->cra_blocksize;
31197 rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
31198@@ -94,8 +96,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
31199 {
31200 struct crypto_report_comp rcomp;
31201
31202- snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
31203-
31204+ strncpy(rcomp.type, "compression", sizeof(rcomp.type));
31205 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
31206 sizeof(struct crypto_report_comp), &rcomp))
31207 goto nla_put_failure;
31208@@ -108,12 +109,14 @@ nla_put_failure:
31209 static int crypto_report_one(struct crypto_alg *alg,
31210 struct crypto_user_alg *ualg, struct sk_buff *skb)
31211 {
31212- memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name));
31213- memcpy(&ualg->cru_driver_name, &alg->cra_driver_name,
31214- sizeof(ualg->cru_driver_name));
31215- memcpy(&ualg->cru_module_name, module_name(alg->cra_module),
31216- CRYPTO_MAX_ALG_NAME);
31217+ strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
31218+ strncpy(ualg->cru_driver_name, alg->cra_driver_name,
31219+ sizeof(ualg->cru_driver_name));
31220+ strncpy(ualg->cru_module_name, module_name(alg->cra_module),
31221+ sizeof(ualg->cru_module_name));
31222
31223+ ualg->cru_type = 0;
31224+ ualg->cru_mask = 0;
31225 ualg->cru_flags = alg->cra_flags;
31226 ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
31227
31228@@ -122,8 +125,7 @@ static int crypto_report_one(struct crypto_alg *alg,
31229 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
31230 struct crypto_report_larval rl;
31231
31232- snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
31233-
31234+ strncpy(rl.type, "larval", sizeof(rl.type));
31235 if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
31236 sizeof(struct crypto_report_larval), &rl))
31237 goto nla_put_failure;
31238@@ -196,7 +198,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
31239 struct crypto_dump_info info;
31240 int err;
31241
31242- if (!p->cru_driver_name)
31243+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31244+ return -EINVAL;
31245+
31246+ if (!p->cru_driver_name[0])
31247 return -EINVAL;
31248
31249 alg = crypto_alg_match(p, 1);
31250@@ -260,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31251 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
31252 LIST_HEAD(list);
31253
31254+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31255+ return -EINVAL;
31256+
31257 if (priority && !strlen(p->cru_driver_name))
31258 return -EINVAL;
31259
31260@@ -287,6 +295,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31261 struct crypto_alg *alg;
31262 struct crypto_user_alg *p = nlmsg_data(nlh);
31263
31264+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31265+ return -EINVAL;
31266+
31267 alg = crypto_alg_match(p, 1);
31268 if (!alg)
31269 return -ENOENT;
31270@@ -368,6 +379,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31271 struct crypto_user_alg *p = nlmsg_data(nlh);
31272 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
31273
31274+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31275+ return -EINVAL;
31276+
31277 if (strlen(p->cru_driver_name))
31278 exact = 1;
31279
31280diff --git a/crypto/pcompress.c b/crypto/pcompress.c
31281index 04e083f..7140fe7 100644
31282--- a/crypto/pcompress.c
31283+++ b/crypto/pcompress.c
31284@@ -53,8 +53,7 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
31285 {
31286 struct crypto_report_comp rpcomp;
31287
31288- snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
31289-
31290+ strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type));
31291 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
31292 sizeof(struct crypto_report_comp), &rpcomp))
31293 goto nla_put_failure;
31294diff --git a/crypto/rng.c b/crypto/rng.c
31295index f3b7894..e0a25c2 100644
31296--- a/crypto/rng.c
31297+++ b/crypto/rng.c
31298@@ -65,7 +65,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
31299 {
31300 struct crypto_report_rng rrng;
31301
31302- snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng");
31303+ strncpy(rrng.type, "rng", sizeof(rrng.type));
31304
31305 rrng.seedsize = alg->cra_rng.seedsize;
31306
31307diff --git a/crypto/shash.c b/crypto/shash.c
31308index f426330f..929058a 100644
31309--- a/crypto/shash.c
31310+++ b/crypto/shash.c
31311@@ -530,7 +530,8 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
31312 struct crypto_report_hash rhash;
31313 struct shash_alg *salg = __crypto_shash_alg(alg);
31314
31315- snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash");
31316+ strncpy(rhash.type, "shash", sizeof(rhash.type));
31317+
31318 rhash.blocksize = alg->cra_blocksize;
31319 rhash.digestsize = salg->digestsize;
31320
31321diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
31322index f220d64..d359ad6 100644
31323--- a/drivers/acpi/apei/apei-internal.h
31324+++ b/drivers/acpi/apei/apei-internal.h
31325@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
31326 struct apei_exec_ins_type {
31327 u32 flags;
31328 apei_exec_ins_func_t run;
31329-};
31330+} __do_const;
31331
31332 struct apei_exec_context {
31333 u32 ip;
31334diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
31335index e6defd8..c26a225 100644
31336--- a/drivers/acpi/apei/cper.c
31337+++ b/drivers/acpi/apei/cper.c
31338@@ -38,12 +38,12 @@
31339 */
31340 u64 cper_next_record_id(void)
31341 {
31342- static atomic64_t seq;
31343+ static atomic64_unchecked_t seq;
31344
31345- if (!atomic64_read(&seq))
31346- atomic64_set(&seq, ((u64)get_seconds()) << 32);
31347+ if (!atomic64_read_unchecked(&seq))
31348+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
31349
31350- return atomic64_inc_return(&seq);
31351+ return atomic64_inc_return_unchecked(&seq);
31352 }
31353 EXPORT_SYMBOL_GPL(cper_next_record_id);
31354
31355diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
31356index be60399..778b33e8 100644
31357--- a/drivers/acpi/bgrt.c
31358+++ b/drivers/acpi/bgrt.c
31359@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
31360 return -ENODEV;
31361
31362 sysfs_bin_attr_init(&image_attr);
31363- image_attr.private = bgrt_image;
31364- image_attr.size = bgrt_image_size;
31365+ pax_open_kernel();
31366+ *(void **)&image_attr.private = bgrt_image;
31367+ *(size_t *)&image_attr.size = bgrt_image_size;
31368+ pax_close_kernel();
31369
31370 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
31371 if (!bgrt_kobj)
31372diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
31373index cb96296..2d6082b 100644
31374--- a/drivers/acpi/blacklist.c
31375+++ b/drivers/acpi/blacklist.c
31376@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
31377 return 0;
31378 }
31379
31380-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
31381+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
31382 {
31383 .callback = dmi_disable_osi_vista,
31384 .ident = "Fujitsu Siemens",
31385diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
31386index 7586544..636a2f0 100644
31387--- a/drivers/acpi/ec_sys.c
31388+++ b/drivers/acpi/ec_sys.c
31389@@ -12,6 +12,7 @@
31390 #include <linux/acpi.h>
31391 #include <linux/debugfs.h>
31392 #include <linux/module.h>
31393+#include <linux/uaccess.h>
31394 #include "internal.h"
31395
31396 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
31397@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
31398 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
31399 */
31400 unsigned int size = EC_SPACE_SIZE;
31401- u8 *data = (u8 *) buf;
31402+ u8 data;
31403 loff_t init_off = *off;
31404 int err = 0;
31405
31406@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
31407 size = count;
31408
31409 while (size) {
31410- err = ec_read(*off, &data[*off - init_off]);
31411+ err = ec_read(*off, &data);
31412 if (err)
31413 return err;
31414+ if (put_user(data, &buf[*off - init_off]))
31415+ return -EFAULT;
31416 *off += 1;
31417 size--;
31418 }
31419@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
31420
31421 unsigned int size = count;
31422 loff_t init_off = *off;
31423- u8 *data = (u8 *) buf;
31424 int err = 0;
31425
31426 if (*off >= EC_SPACE_SIZE)
31427@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
31428 }
31429
31430 while (size) {
31431- u8 byte_write = data[*off - init_off];
31432+ u8 byte_write;
31433+ if (get_user(byte_write, &buf[*off - init_off]))
31434+ return -EFAULT;
31435 err = ec_write(*off, byte_write);
31436 if (err)
31437 return err;
31438diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
31439index e83311b..142b5cc 100644
31440--- a/drivers/acpi/processor_driver.c
31441+++ b/drivers/acpi/processor_driver.c
31442@@ -558,7 +558,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
31443 return 0;
31444 #endif
31445
31446- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
31447+ BUG_ON(pr->id >= nr_cpu_ids);
31448
31449 /*
31450 * Buggy BIOS check
31451diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
31452index ed9a1cc..f4a354c 100644
31453--- a/drivers/acpi/processor_idle.c
31454+++ b/drivers/acpi/processor_idle.c
31455@@ -1005,7 +1005,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
31456 {
31457 int i, count = CPUIDLE_DRIVER_STATE_START;
31458 struct acpi_processor_cx *cx;
31459- struct cpuidle_state *state;
31460+ cpuidle_state_no_const *state;
31461 struct cpuidle_driver *drv = &acpi_idle_driver;
31462
31463 if (!pr->flags.power_setup_done)
31464diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
31465index ea61ca9..3fdd70d 100644
31466--- a/drivers/acpi/sysfs.c
31467+++ b/drivers/acpi/sysfs.c
31468@@ -420,11 +420,11 @@ static u32 num_counters;
31469 static struct attribute **all_attrs;
31470 static u32 acpi_gpe_count;
31471
31472-static struct attribute_group interrupt_stats_attr_group = {
31473+static attribute_group_no_const interrupt_stats_attr_group = {
31474 .name = "interrupts",
31475 };
31476
31477-static struct kobj_attribute *counter_attrs;
31478+static kobj_attribute_no_const *counter_attrs;
31479
31480 static void delete_gpe_attr_array(void)
31481 {
31482diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
31483index 46cd3f4..0871ad0 100644
31484--- a/drivers/ata/libata-core.c
31485+++ b/drivers/ata/libata-core.c
31486@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
31487 struct ata_port *ap;
31488 unsigned int tag;
31489
31490- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31491+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31492 ap = qc->ap;
31493
31494 qc->flags = 0;
31495@@ -4796,7 +4796,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
31496 struct ata_port *ap;
31497 struct ata_link *link;
31498
31499- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31500+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31501 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
31502 ap = qc->ap;
31503 link = qc->dev->link;
31504@@ -5892,6 +5892,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
31505 return;
31506
31507 spin_lock(&lock);
31508+ pax_open_kernel();
31509
31510 for (cur = ops->inherits; cur; cur = cur->inherits) {
31511 void **inherit = (void **)cur;
31512@@ -5905,8 +5906,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
31513 if (IS_ERR(*pp))
31514 *pp = NULL;
31515
31516- ops->inherits = NULL;
31517+ *(struct ata_port_operations **)&ops->inherits = NULL;
31518
31519+ pax_close_kernel();
31520 spin_unlock(&lock);
31521 }
31522
31523diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
31524index 405022d..fb70e53 100644
31525--- a/drivers/ata/pata_arasan_cf.c
31526+++ b/drivers/ata/pata_arasan_cf.c
31527@@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
31528 /* Handle platform specific quirks */
31529 if (pdata->quirk) {
31530 if (pdata->quirk & CF_BROKEN_PIO) {
31531- ap->ops->set_piomode = NULL;
31532+ pax_open_kernel();
31533+ *(void **)&ap->ops->set_piomode = NULL;
31534+ pax_close_kernel();
31535 ap->pio_mask = 0;
31536 }
31537 if (pdata->quirk & CF_BROKEN_MWDMA)
31538diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
31539index f9b983a..887b9d8 100644
31540--- a/drivers/atm/adummy.c
31541+++ b/drivers/atm/adummy.c
31542@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
31543 vcc->pop(vcc, skb);
31544 else
31545 dev_kfree_skb_any(skb);
31546- atomic_inc(&vcc->stats->tx);
31547+ atomic_inc_unchecked(&vcc->stats->tx);
31548
31549 return 0;
31550 }
31551diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
31552index 77a7480..05cde58 100644
31553--- a/drivers/atm/ambassador.c
31554+++ b/drivers/atm/ambassador.c
31555@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
31556 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
31557
31558 // VC layer stats
31559- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31560+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31561
31562 // free the descriptor
31563 kfree (tx_descr);
31564@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31565 dump_skb ("<<<", vc, skb);
31566
31567 // VC layer stats
31568- atomic_inc(&atm_vcc->stats->rx);
31569+ atomic_inc_unchecked(&atm_vcc->stats->rx);
31570 __net_timestamp(skb);
31571 // end of our responsibility
31572 atm_vcc->push (atm_vcc, skb);
31573@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31574 } else {
31575 PRINTK (KERN_INFO, "dropped over-size frame");
31576 // should we count this?
31577- atomic_inc(&atm_vcc->stats->rx_drop);
31578+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31579 }
31580
31581 } else {
31582@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
31583 }
31584
31585 if (check_area (skb->data, skb->len)) {
31586- atomic_inc(&atm_vcc->stats->tx_err);
31587+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
31588 return -ENOMEM; // ?
31589 }
31590
31591diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
31592index b22d71c..d6e1049 100644
31593--- a/drivers/atm/atmtcp.c
31594+++ b/drivers/atm/atmtcp.c
31595@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31596 if (vcc->pop) vcc->pop(vcc,skb);
31597 else dev_kfree_skb(skb);
31598 if (dev_data) return 0;
31599- atomic_inc(&vcc->stats->tx_err);
31600+ atomic_inc_unchecked(&vcc->stats->tx_err);
31601 return -ENOLINK;
31602 }
31603 size = skb->len+sizeof(struct atmtcp_hdr);
31604@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31605 if (!new_skb) {
31606 if (vcc->pop) vcc->pop(vcc,skb);
31607 else dev_kfree_skb(skb);
31608- atomic_inc(&vcc->stats->tx_err);
31609+ atomic_inc_unchecked(&vcc->stats->tx_err);
31610 return -ENOBUFS;
31611 }
31612 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
31613@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31614 if (vcc->pop) vcc->pop(vcc,skb);
31615 else dev_kfree_skb(skb);
31616 out_vcc->push(out_vcc,new_skb);
31617- atomic_inc(&vcc->stats->tx);
31618- atomic_inc(&out_vcc->stats->rx);
31619+ atomic_inc_unchecked(&vcc->stats->tx);
31620+ atomic_inc_unchecked(&out_vcc->stats->rx);
31621 return 0;
31622 }
31623
31624@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31625 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
31626 read_unlock(&vcc_sklist_lock);
31627 if (!out_vcc) {
31628- atomic_inc(&vcc->stats->tx_err);
31629+ atomic_inc_unchecked(&vcc->stats->tx_err);
31630 goto done;
31631 }
31632 skb_pull(skb,sizeof(struct atmtcp_hdr));
31633@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31634 __net_timestamp(new_skb);
31635 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
31636 out_vcc->push(out_vcc,new_skb);
31637- atomic_inc(&vcc->stats->tx);
31638- atomic_inc(&out_vcc->stats->rx);
31639+ atomic_inc_unchecked(&vcc->stats->tx);
31640+ atomic_inc_unchecked(&out_vcc->stats->rx);
31641 done:
31642 if (vcc->pop) vcc->pop(vcc,skb);
31643 else dev_kfree_skb(skb);
31644diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
31645index c1eb6fa..4c71be9 100644
31646--- a/drivers/atm/eni.c
31647+++ b/drivers/atm/eni.c
31648@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
31649 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
31650 vcc->dev->number);
31651 length = 0;
31652- atomic_inc(&vcc->stats->rx_err);
31653+ atomic_inc_unchecked(&vcc->stats->rx_err);
31654 }
31655 else {
31656 length = ATM_CELL_SIZE-1; /* no HEC */
31657@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31658 size);
31659 }
31660 eff = length = 0;
31661- atomic_inc(&vcc->stats->rx_err);
31662+ atomic_inc_unchecked(&vcc->stats->rx_err);
31663 }
31664 else {
31665 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
31666@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31667 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
31668 vcc->dev->number,vcc->vci,length,size << 2,descr);
31669 length = eff = 0;
31670- atomic_inc(&vcc->stats->rx_err);
31671+ atomic_inc_unchecked(&vcc->stats->rx_err);
31672 }
31673 }
31674 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
31675@@ -767,7 +767,7 @@ rx_dequeued++;
31676 vcc->push(vcc,skb);
31677 pushed++;
31678 }
31679- atomic_inc(&vcc->stats->rx);
31680+ atomic_inc_unchecked(&vcc->stats->rx);
31681 }
31682 wake_up(&eni_dev->rx_wait);
31683 }
31684@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
31685 PCI_DMA_TODEVICE);
31686 if (vcc->pop) vcc->pop(vcc,skb);
31687 else dev_kfree_skb_irq(skb);
31688- atomic_inc(&vcc->stats->tx);
31689+ atomic_inc_unchecked(&vcc->stats->tx);
31690 wake_up(&eni_dev->tx_wait);
31691 dma_complete++;
31692 }
31693diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
31694index b41c948..a002b17 100644
31695--- a/drivers/atm/firestream.c
31696+++ b/drivers/atm/firestream.c
31697@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
31698 }
31699 }
31700
31701- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31702+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31703
31704 fs_dprintk (FS_DEBUG_TXMEM, "i");
31705 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
31706@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31707 #endif
31708 skb_put (skb, qe->p1 & 0xffff);
31709 ATM_SKB(skb)->vcc = atm_vcc;
31710- atomic_inc(&atm_vcc->stats->rx);
31711+ atomic_inc_unchecked(&atm_vcc->stats->rx);
31712 __net_timestamp(skb);
31713 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
31714 atm_vcc->push (atm_vcc, skb);
31715@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31716 kfree (pe);
31717 }
31718 if (atm_vcc)
31719- atomic_inc(&atm_vcc->stats->rx_drop);
31720+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31721 break;
31722 case 0x1f: /* Reassembly abort: no buffers. */
31723 /* Silently increment error counter. */
31724 if (atm_vcc)
31725- atomic_inc(&atm_vcc->stats->rx_drop);
31726+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31727 break;
31728 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
31729 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
31730diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
31731index 204814e..cede831 100644
31732--- a/drivers/atm/fore200e.c
31733+++ b/drivers/atm/fore200e.c
31734@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
31735 #endif
31736 /* check error condition */
31737 if (*entry->status & STATUS_ERROR)
31738- atomic_inc(&vcc->stats->tx_err);
31739+ atomic_inc_unchecked(&vcc->stats->tx_err);
31740 else
31741- atomic_inc(&vcc->stats->tx);
31742+ atomic_inc_unchecked(&vcc->stats->tx);
31743 }
31744 }
31745
31746@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31747 if (skb == NULL) {
31748 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
31749
31750- atomic_inc(&vcc->stats->rx_drop);
31751+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31752 return -ENOMEM;
31753 }
31754
31755@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31756
31757 dev_kfree_skb_any(skb);
31758
31759- atomic_inc(&vcc->stats->rx_drop);
31760+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31761 return -ENOMEM;
31762 }
31763
31764 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31765
31766 vcc->push(vcc, skb);
31767- atomic_inc(&vcc->stats->rx);
31768+ atomic_inc_unchecked(&vcc->stats->rx);
31769
31770 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31771
31772@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
31773 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
31774 fore200e->atm_dev->number,
31775 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
31776- atomic_inc(&vcc->stats->rx_err);
31777+ atomic_inc_unchecked(&vcc->stats->rx_err);
31778 }
31779 }
31780
31781@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
31782 goto retry_here;
31783 }
31784
31785- atomic_inc(&vcc->stats->tx_err);
31786+ atomic_inc_unchecked(&vcc->stats->tx_err);
31787
31788 fore200e->tx_sat++;
31789 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
31790diff --git a/drivers/atm/he.c b/drivers/atm/he.c
31791index 72b6960..cf9167a 100644
31792--- a/drivers/atm/he.c
31793+++ b/drivers/atm/he.c
31794@@ -1699,7 +1699,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31795
31796 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
31797 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
31798- atomic_inc(&vcc->stats->rx_drop);
31799+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31800 goto return_host_buffers;
31801 }
31802
31803@@ -1726,7 +1726,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31804 RBRQ_LEN_ERR(he_dev->rbrq_head)
31805 ? "LEN_ERR" : "",
31806 vcc->vpi, vcc->vci);
31807- atomic_inc(&vcc->stats->rx_err);
31808+ atomic_inc_unchecked(&vcc->stats->rx_err);
31809 goto return_host_buffers;
31810 }
31811
31812@@ -1778,7 +1778,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31813 vcc->push(vcc, skb);
31814 spin_lock(&he_dev->global_lock);
31815
31816- atomic_inc(&vcc->stats->rx);
31817+ atomic_inc_unchecked(&vcc->stats->rx);
31818
31819 return_host_buffers:
31820 ++pdus_assembled;
31821@@ -2104,7 +2104,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
31822 tpd->vcc->pop(tpd->vcc, tpd->skb);
31823 else
31824 dev_kfree_skb_any(tpd->skb);
31825- atomic_inc(&tpd->vcc->stats->tx_err);
31826+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
31827 }
31828 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
31829 return;
31830@@ -2516,7 +2516,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31831 vcc->pop(vcc, skb);
31832 else
31833 dev_kfree_skb_any(skb);
31834- atomic_inc(&vcc->stats->tx_err);
31835+ atomic_inc_unchecked(&vcc->stats->tx_err);
31836 return -EINVAL;
31837 }
31838
31839@@ -2527,7 +2527,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31840 vcc->pop(vcc, skb);
31841 else
31842 dev_kfree_skb_any(skb);
31843- atomic_inc(&vcc->stats->tx_err);
31844+ atomic_inc_unchecked(&vcc->stats->tx_err);
31845 return -EINVAL;
31846 }
31847 #endif
31848@@ -2539,7 +2539,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31849 vcc->pop(vcc, skb);
31850 else
31851 dev_kfree_skb_any(skb);
31852- atomic_inc(&vcc->stats->tx_err);
31853+ atomic_inc_unchecked(&vcc->stats->tx_err);
31854 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31855 return -ENOMEM;
31856 }
31857@@ -2581,7 +2581,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31858 vcc->pop(vcc, skb);
31859 else
31860 dev_kfree_skb_any(skb);
31861- atomic_inc(&vcc->stats->tx_err);
31862+ atomic_inc_unchecked(&vcc->stats->tx_err);
31863 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31864 return -ENOMEM;
31865 }
31866@@ -2612,7 +2612,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31867 __enqueue_tpd(he_dev, tpd, cid);
31868 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31869
31870- atomic_inc(&vcc->stats->tx);
31871+ atomic_inc_unchecked(&vcc->stats->tx);
31872
31873 return 0;
31874 }
31875diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
31876index 1dc0519..1aadaf7 100644
31877--- a/drivers/atm/horizon.c
31878+++ b/drivers/atm/horizon.c
31879@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
31880 {
31881 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
31882 // VC layer stats
31883- atomic_inc(&vcc->stats->rx);
31884+ atomic_inc_unchecked(&vcc->stats->rx);
31885 __net_timestamp(skb);
31886 // end of our responsibility
31887 vcc->push (vcc, skb);
31888@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
31889 dev->tx_iovec = NULL;
31890
31891 // VC layer stats
31892- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31893+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31894
31895 // free the skb
31896 hrz_kfree_skb (skb);
31897diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
31898index 272f009..a18ba55 100644
31899--- a/drivers/atm/idt77252.c
31900+++ b/drivers/atm/idt77252.c
31901@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
31902 else
31903 dev_kfree_skb(skb);
31904
31905- atomic_inc(&vcc->stats->tx);
31906+ atomic_inc_unchecked(&vcc->stats->tx);
31907 }
31908
31909 atomic_dec(&scq->used);
31910@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31911 if ((sb = dev_alloc_skb(64)) == NULL) {
31912 printk("%s: Can't allocate buffers for aal0.\n",
31913 card->name);
31914- atomic_add(i, &vcc->stats->rx_drop);
31915+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
31916 break;
31917 }
31918 if (!atm_charge(vcc, sb->truesize)) {
31919 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
31920 card->name);
31921- atomic_add(i - 1, &vcc->stats->rx_drop);
31922+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
31923 dev_kfree_skb(sb);
31924 break;
31925 }
31926@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31927 ATM_SKB(sb)->vcc = vcc;
31928 __net_timestamp(sb);
31929 vcc->push(vcc, sb);
31930- atomic_inc(&vcc->stats->rx);
31931+ atomic_inc_unchecked(&vcc->stats->rx);
31932
31933 cell += ATM_CELL_PAYLOAD;
31934 }
31935@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31936 "(CDC: %08x)\n",
31937 card->name, len, rpp->len, readl(SAR_REG_CDC));
31938 recycle_rx_pool_skb(card, rpp);
31939- atomic_inc(&vcc->stats->rx_err);
31940+ atomic_inc_unchecked(&vcc->stats->rx_err);
31941 return;
31942 }
31943 if (stat & SAR_RSQE_CRC) {
31944 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
31945 recycle_rx_pool_skb(card, rpp);
31946- atomic_inc(&vcc->stats->rx_err);
31947+ atomic_inc_unchecked(&vcc->stats->rx_err);
31948 return;
31949 }
31950 if (skb_queue_len(&rpp->queue) > 1) {
31951@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31952 RXPRINTK("%s: Can't alloc RX skb.\n",
31953 card->name);
31954 recycle_rx_pool_skb(card, rpp);
31955- atomic_inc(&vcc->stats->rx_err);
31956+ atomic_inc_unchecked(&vcc->stats->rx_err);
31957 return;
31958 }
31959 if (!atm_charge(vcc, skb->truesize)) {
31960@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31961 __net_timestamp(skb);
31962
31963 vcc->push(vcc, skb);
31964- atomic_inc(&vcc->stats->rx);
31965+ atomic_inc_unchecked(&vcc->stats->rx);
31966
31967 return;
31968 }
31969@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31970 __net_timestamp(skb);
31971
31972 vcc->push(vcc, skb);
31973- atomic_inc(&vcc->stats->rx);
31974+ atomic_inc_unchecked(&vcc->stats->rx);
31975
31976 if (skb->truesize > SAR_FB_SIZE_3)
31977 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
31978@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
31979 if (vcc->qos.aal != ATM_AAL0) {
31980 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
31981 card->name, vpi, vci);
31982- atomic_inc(&vcc->stats->rx_drop);
31983+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31984 goto drop;
31985 }
31986
31987 if ((sb = dev_alloc_skb(64)) == NULL) {
31988 printk("%s: Can't allocate buffers for AAL0.\n",
31989 card->name);
31990- atomic_inc(&vcc->stats->rx_err);
31991+ atomic_inc_unchecked(&vcc->stats->rx_err);
31992 goto drop;
31993 }
31994
31995@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
31996 ATM_SKB(sb)->vcc = vcc;
31997 __net_timestamp(sb);
31998 vcc->push(vcc, sb);
31999- atomic_inc(&vcc->stats->rx);
32000+ atomic_inc_unchecked(&vcc->stats->rx);
32001
32002 drop:
32003 skb_pull(queue, 64);
32004@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32005
32006 if (vc == NULL) {
32007 printk("%s: NULL connection in send().\n", card->name);
32008- atomic_inc(&vcc->stats->tx_err);
32009+ atomic_inc_unchecked(&vcc->stats->tx_err);
32010 dev_kfree_skb(skb);
32011 return -EINVAL;
32012 }
32013 if (!test_bit(VCF_TX, &vc->flags)) {
32014 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
32015- atomic_inc(&vcc->stats->tx_err);
32016+ atomic_inc_unchecked(&vcc->stats->tx_err);
32017 dev_kfree_skb(skb);
32018 return -EINVAL;
32019 }
32020@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32021 break;
32022 default:
32023 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
32024- atomic_inc(&vcc->stats->tx_err);
32025+ atomic_inc_unchecked(&vcc->stats->tx_err);
32026 dev_kfree_skb(skb);
32027 return -EINVAL;
32028 }
32029
32030 if (skb_shinfo(skb)->nr_frags != 0) {
32031 printk("%s: No scatter-gather yet.\n", card->name);
32032- atomic_inc(&vcc->stats->tx_err);
32033+ atomic_inc_unchecked(&vcc->stats->tx_err);
32034 dev_kfree_skb(skb);
32035 return -EINVAL;
32036 }
32037@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32038
32039 err = queue_skb(card, vc, skb, oam);
32040 if (err) {
32041- atomic_inc(&vcc->stats->tx_err);
32042+ atomic_inc_unchecked(&vcc->stats->tx_err);
32043 dev_kfree_skb(skb);
32044 return err;
32045 }
32046@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
32047 skb = dev_alloc_skb(64);
32048 if (!skb) {
32049 printk("%s: Out of memory in send_oam().\n", card->name);
32050- atomic_inc(&vcc->stats->tx_err);
32051+ atomic_inc_unchecked(&vcc->stats->tx_err);
32052 return -ENOMEM;
32053 }
32054 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
32055diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
32056index 4217f29..88f547a 100644
32057--- a/drivers/atm/iphase.c
32058+++ b/drivers/atm/iphase.c
32059@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
32060 status = (u_short) (buf_desc_ptr->desc_mode);
32061 if (status & (RX_CER | RX_PTE | RX_OFL))
32062 {
32063- atomic_inc(&vcc->stats->rx_err);
32064+ atomic_inc_unchecked(&vcc->stats->rx_err);
32065 IF_ERR(printk("IA: bad packet, dropping it");)
32066 if (status & RX_CER) {
32067 IF_ERR(printk(" cause: packet CRC error\n");)
32068@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
32069 len = dma_addr - buf_addr;
32070 if (len > iadev->rx_buf_sz) {
32071 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
32072- atomic_inc(&vcc->stats->rx_err);
32073+ atomic_inc_unchecked(&vcc->stats->rx_err);
32074 goto out_free_desc;
32075 }
32076
32077@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32078 ia_vcc = INPH_IA_VCC(vcc);
32079 if (ia_vcc == NULL)
32080 {
32081- atomic_inc(&vcc->stats->rx_err);
32082+ atomic_inc_unchecked(&vcc->stats->rx_err);
32083 atm_return(vcc, skb->truesize);
32084 dev_kfree_skb_any(skb);
32085 goto INCR_DLE;
32086@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32087 if ((length > iadev->rx_buf_sz) || (length >
32088 (skb->len - sizeof(struct cpcs_trailer))))
32089 {
32090- atomic_inc(&vcc->stats->rx_err);
32091+ atomic_inc_unchecked(&vcc->stats->rx_err);
32092 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
32093 length, skb->len);)
32094 atm_return(vcc, skb->truesize);
32095@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32096
32097 IF_RX(printk("rx_dle_intr: skb push");)
32098 vcc->push(vcc,skb);
32099- atomic_inc(&vcc->stats->rx);
32100+ atomic_inc_unchecked(&vcc->stats->rx);
32101 iadev->rx_pkt_cnt++;
32102 }
32103 INCR_DLE:
32104@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
32105 {
32106 struct k_sonet_stats *stats;
32107 stats = &PRIV(_ia_dev[board])->sonet_stats;
32108- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
32109- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
32110- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
32111- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
32112- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
32113- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
32114- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
32115- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
32116- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
32117+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
32118+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
32119+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
32120+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
32121+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
32122+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
32123+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
32124+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
32125+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
32126 }
32127 ia_cmds.status = 0;
32128 break;
32129@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32130 if ((desc == 0) || (desc > iadev->num_tx_desc))
32131 {
32132 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
32133- atomic_inc(&vcc->stats->tx);
32134+ atomic_inc_unchecked(&vcc->stats->tx);
32135 if (vcc->pop)
32136 vcc->pop(vcc, skb);
32137 else
32138@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32139 ATM_DESC(skb) = vcc->vci;
32140 skb_queue_tail(&iadev->tx_dma_q, skb);
32141
32142- atomic_inc(&vcc->stats->tx);
32143+ atomic_inc_unchecked(&vcc->stats->tx);
32144 iadev->tx_pkt_cnt++;
32145 /* Increment transaction counter */
32146 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
32147
32148 #if 0
32149 /* add flow control logic */
32150- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
32151+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
32152 if (iavcc->vc_desc_cnt > 10) {
32153 vcc->tx_quota = vcc->tx_quota * 3 / 4;
32154 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
32155diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
32156index fa7d701..1e404c7 100644
32157--- a/drivers/atm/lanai.c
32158+++ b/drivers/atm/lanai.c
32159@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
32160 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
32161 lanai_endtx(lanai, lvcc);
32162 lanai_free_skb(lvcc->tx.atmvcc, skb);
32163- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
32164+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
32165 }
32166
32167 /* Try to fill the buffer - don't call unless there is backlog */
32168@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
32169 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
32170 __net_timestamp(skb);
32171 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
32172- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
32173+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
32174 out:
32175 lvcc->rx.buf.ptr = end;
32176 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
32177@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32178 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
32179 "vcc %d\n", lanai->number, (unsigned int) s, vci);
32180 lanai->stats.service_rxnotaal5++;
32181- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32182+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32183 return 0;
32184 }
32185 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
32186@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32187 int bytes;
32188 read_unlock(&vcc_sklist_lock);
32189 DPRINTK("got trashed rx pdu on vci %d\n", vci);
32190- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32191+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32192 lvcc->stats.x.aal5.service_trash++;
32193 bytes = (SERVICE_GET_END(s) * 16) -
32194 (((unsigned long) lvcc->rx.buf.ptr) -
32195@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32196 }
32197 if (s & SERVICE_STREAM) {
32198 read_unlock(&vcc_sklist_lock);
32199- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32200+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32201 lvcc->stats.x.aal5.service_stream++;
32202 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
32203 "PDU on VCI %d!\n", lanai->number, vci);
32204@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32205 return 0;
32206 }
32207 DPRINTK("got rx crc error on vci %d\n", vci);
32208- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32209+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32210 lvcc->stats.x.aal5.service_rxcrc++;
32211 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
32212 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
32213diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
32214index ed1d2b7..8cffc1f 100644
32215--- a/drivers/atm/nicstar.c
32216+++ b/drivers/atm/nicstar.c
32217@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32218 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
32219 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
32220 card->index);
32221- atomic_inc(&vcc->stats->tx_err);
32222+ atomic_inc_unchecked(&vcc->stats->tx_err);
32223 dev_kfree_skb_any(skb);
32224 return -EINVAL;
32225 }
32226@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32227 if (!vc->tx) {
32228 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
32229 card->index);
32230- atomic_inc(&vcc->stats->tx_err);
32231+ atomic_inc_unchecked(&vcc->stats->tx_err);
32232 dev_kfree_skb_any(skb);
32233 return -EINVAL;
32234 }
32235@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32236 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
32237 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
32238 card->index);
32239- atomic_inc(&vcc->stats->tx_err);
32240+ atomic_inc_unchecked(&vcc->stats->tx_err);
32241 dev_kfree_skb_any(skb);
32242 return -EINVAL;
32243 }
32244
32245 if (skb_shinfo(skb)->nr_frags != 0) {
32246 printk("nicstar%d: No scatter-gather yet.\n", card->index);
32247- atomic_inc(&vcc->stats->tx_err);
32248+ atomic_inc_unchecked(&vcc->stats->tx_err);
32249 dev_kfree_skb_any(skb);
32250 return -EINVAL;
32251 }
32252@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32253 }
32254
32255 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
32256- atomic_inc(&vcc->stats->tx_err);
32257+ atomic_inc_unchecked(&vcc->stats->tx_err);
32258 dev_kfree_skb_any(skb);
32259 return -EIO;
32260 }
32261- atomic_inc(&vcc->stats->tx);
32262+ atomic_inc_unchecked(&vcc->stats->tx);
32263
32264 return 0;
32265 }
32266@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32267 printk
32268 ("nicstar%d: Can't allocate buffers for aal0.\n",
32269 card->index);
32270- atomic_add(i, &vcc->stats->rx_drop);
32271+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32272 break;
32273 }
32274 if (!atm_charge(vcc, sb->truesize)) {
32275 RXPRINTK
32276 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
32277 card->index);
32278- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32279+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32280 dev_kfree_skb_any(sb);
32281 break;
32282 }
32283@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32284 ATM_SKB(sb)->vcc = vcc;
32285 __net_timestamp(sb);
32286 vcc->push(vcc, sb);
32287- atomic_inc(&vcc->stats->rx);
32288+ atomic_inc_unchecked(&vcc->stats->rx);
32289 cell += ATM_CELL_PAYLOAD;
32290 }
32291
32292@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32293 if (iovb == NULL) {
32294 printk("nicstar%d: Out of iovec buffers.\n",
32295 card->index);
32296- atomic_inc(&vcc->stats->rx_drop);
32297+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32298 recycle_rx_buf(card, skb);
32299 return;
32300 }
32301@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32302 small or large buffer itself. */
32303 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
32304 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
32305- atomic_inc(&vcc->stats->rx_err);
32306+ atomic_inc_unchecked(&vcc->stats->rx_err);
32307 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32308 NS_MAX_IOVECS);
32309 NS_PRV_IOVCNT(iovb) = 0;
32310@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32311 ("nicstar%d: Expected a small buffer, and this is not one.\n",
32312 card->index);
32313 which_list(card, skb);
32314- atomic_inc(&vcc->stats->rx_err);
32315+ atomic_inc_unchecked(&vcc->stats->rx_err);
32316 recycle_rx_buf(card, skb);
32317 vc->rx_iov = NULL;
32318 recycle_iov_buf(card, iovb);
32319@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32320 ("nicstar%d: Expected a large buffer, and this is not one.\n",
32321 card->index);
32322 which_list(card, skb);
32323- atomic_inc(&vcc->stats->rx_err);
32324+ atomic_inc_unchecked(&vcc->stats->rx_err);
32325 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32326 NS_PRV_IOVCNT(iovb));
32327 vc->rx_iov = NULL;
32328@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32329 printk(" - PDU size mismatch.\n");
32330 else
32331 printk(".\n");
32332- atomic_inc(&vcc->stats->rx_err);
32333+ atomic_inc_unchecked(&vcc->stats->rx_err);
32334 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32335 NS_PRV_IOVCNT(iovb));
32336 vc->rx_iov = NULL;
32337@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32338 /* skb points to a small buffer */
32339 if (!atm_charge(vcc, skb->truesize)) {
32340 push_rxbufs(card, skb);
32341- atomic_inc(&vcc->stats->rx_drop);
32342+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32343 } else {
32344 skb_put(skb, len);
32345 dequeue_sm_buf(card, skb);
32346@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32347 ATM_SKB(skb)->vcc = vcc;
32348 __net_timestamp(skb);
32349 vcc->push(vcc, skb);
32350- atomic_inc(&vcc->stats->rx);
32351+ atomic_inc_unchecked(&vcc->stats->rx);
32352 }
32353 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
32354 struct sk_buff *sb;
32355@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32356 if (len <= NS_SMBUFSIZE) {
32357 if (!atm_charge(vcc, sb->truesize)) {
32358 push_rxbufs(card, sb);
32359- atomic_inc(&vcc->stats->rx_drop);
32360+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32361 } else {
32362 skb_put(sb, len);
32363 dequeue_sm_buf(card, sb);
32364@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32365 ATM_SKB(sb)->vcc = vcc;
32366 __net_timestamp(sb);
32367 vcc->push(vcc, sb);
32368- atomic_inc(&vcc->stats->rx);
32369+ atomic_inc_unchecked(&vcc->stats->rx);
32370 }
32371
32372 push_rxbufs(card, skb);
32373@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32374
32375 if (!atm_charge(vcc, skb->truesize)) {
32376 push_rxbufs(card, skb);
32377- atomic_inc(&vcc->stats->rx_drop);
32378+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32379 } else {
32380 dequeue_lg_buf(card, skb);
32381 #ifdef NS_USE_DESTRUCTORS
32382@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32383 ATM_SKB(skb)->vcc = vcc;
32384 __net_timestamp(skb);
32385 vcc->push(vcc, skb);
32386- atomic_inc(&vcc->stats->rx);
32387+ atomic_inc_unchecked(&vcc->stats->rx);
32388 }
32389
32390 push_rxbufs(card, sb);
32391@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32392 printk
32393 ("nicstar%d: Out of huge buffers.\n",
32394 card->index);
32395- atomic_inc(&vcc->stats->rx_drop);
32396+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32397 recycle_iovec_rx_bufs(card,
32398 (struct iovec *)
32399 iovb->data,
32400@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32401 card->hbpool.count++;
32402 } else
32403 dev_kfree_skb_any(hb);
32404- atomic_inc(&vcc->stats->rx_drop);
32405+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32406 } else {
32407 /* Copy the small buffer to the huge buffer */
32408 sb = (struct sk_buff *)iov->iov_base;
32409@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32410 #endif /* NS_USE_DESTRUCTORS */
32411 __net_timestamp(hb);
32412 vcc->push(vcc, hb);
32413- atomic_inc(&vcc->stats->rx);
32414+ atomic_inc_unchecked(&vcc->stats->rx);
32415 }
32416 }
32417
32418diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
32419index 0474a89..06ea4a1 100644
32420--- a/drivers/atm/solos-pci.c
32421+++ b/drivers/atm/solos-pci.c
32422@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
32423 }
32424 atm_charge(vcc, skb->truesize);
32425 vcc->push(vcc, skb);
32426- atomic_inc(&vcc->stats->rx);
32427+ atomic_inc_unchecked(&vcc->stats->rx);
32428 break;
32429
32430 case PKT_STATUS:
32431@@ -1117,7 +1117,7 @@ static uint32_t fpga_tx(struct solos_card *card)
32432 vcc = SKB_CB(oldskb)->vcc;
32433
32434 if (vcc) {
32435- atomic_inc(&vcc->stats->tx);
32436+ atomic_inc_unchecked(&vcc->stats->tx);
32437 solos_pop(vcc, oldskb);
32438 } else {
32439 dev_kfree_skb_irq(oldskb);
32440diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
32441index 0215934..ce9f5b1 100644
32442--- a/drivers/atm/suni.c
32443+++ b/drivers/atm/suni.c
32444@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
32445
32446
32447 #define ADD_LIMITED(s,v) \
32448- atomic_add((v),&stats->s); \
32449- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
32450+ atomic_add_unchecked((v),&stats->s); \
32451+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
32452
32453
32454 static void suni_hz(unsigned long from_timer)
32455diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
32456index 5120a96..e2572bd 100644
32457--- a/drivers/atm/uPD98402.c
32458+++ b/drivers/atm/uPD98402.c
32459@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
32460 struct sonet_stats tmp;
32461 int error = 0;
32462
32463- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
32464+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
32465 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
32466 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
32467 if (zero && !error) {
32468@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
32469
32470
32471 #define ADD_LIMITED(s,v) \
32472- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
32473- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
32474- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
32475+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
32476+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
32477+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
32478
32479
32480 static void stat_event(struct atm_dev *dev)
32481@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
32482 if (reason & uPD98402_INT_PFM) stat_event(dev);
32483 if (reason & uPD98402_INT_PCO) {
32484 (void) GET(PCOCR); /* clear interrupt cause */
32485- atomic_add(GET(HECCT),
32486+ atomic_add_unchecked(GET(HECCT),
32487 &PRIV(dev)->sonet_stats.uncorr_hcs);
32488 }
32489 if ((reason & uPD98402_INT_RFO) &&
32490@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
32491 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
32492 uPD98402_INT_LOS),PIMR); /* enable them */
32493 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
32494- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
32495- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
32496- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
32497+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
32498+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
32499+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
32500 return 0;
32501 }
32502
32503diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
32504index 969c3c2..9b72956 100644
32505--- a/drivers/atm/zatm.c
32506+++ b/drivers/atm/zatm.c
32507@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32508 }
32509 if (!size) {
32510 dev_kfree_skb_irq(skb);
32511- if (vcc) atomic_inc(&vcc->stats->rx_err);
32512+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
32513 continue;
32514 }
32515 if (!atm_charge(vcc,skb->truesize)) {
32516@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32517 skb->len = size;
32518 ATM_SKB(skb)->vcc = vcc;
32519 vcc->push(vcc,skb);
32520- atomic_inc(&vcc->stats->rx);
32521+ atomic_inc_unchecked(&vcc->stats->rx);
32522 }
32523 zout(pos & 0xffff,MTA(mbx));
32524 #if 0 /* probably a stupid idea */
32525@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
32526 skb_queue_head(&zatm_vcc->backlog,skb);
32527 break;
32528 }
32529- atomic_inc(&vcc->stats->tx);
32530+ atomic_inc_unchecked(&vcc->stats->tx);
32531 wake_up(&zatm_vcc->tx_wait);
32532 }
32533
32534diff --git a/drivers/base/bus.c b/drivers/base/bus.c
32535index 6856303..0602d70 100644
32536--- a/drivers/base/bus.c
32537+++ b/drivers/base/bus.c
32538@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
32539 return -EINVAL;
32540
32541 mutex_lock(&subsys->p->mutex);
32542- list_add_tail(&sif->node, &subsys->p->interfaces);
32543+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
32544 if (sif->add_dev) {
32545 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
32546 while ((dev = subsys_dev_iter_next(&iter)))
32547@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
32548 subsys = sif->subsys;
32549
32550 mutex_lock(&subsys->p->mutex);
32551- list_del_init(&sif->node);
32552+ pax_list_del_init((struct list_head *)&sif->node);
32553 if (sif->remove_dev) {
32554 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
32555 while ((dev = subsys_dev_iter_next(&iter)))
32556diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
32557index 17cf7ca..7e553e1 100644
32558--- a/drivers/base/devtmpfs.c
32559+++ b/drivers/base/devtmpfs.c
32560@@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
32561 if (!thread)
32562 return 0;
32563
32564- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
32565+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
32566 if (err)
32567 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
32568 else
32569diff --git a/drivers/base/node.c b/drivers/base/node.c
32570index fac124a..66bd4ab 100644
32571--- a/drivers/base/node.c
32572+++ b/drivers/base/node.c
32573@@ -625,7 +625,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
32574 struct node_attr {
32575 struct device_attribute attr;
32576 enum node_states state;
32577-};
32578+} __do_const;
32579
32580 static ssize_t show_node_state(struct device *dev,
32581 struct device_attribute *attr, char *buf)
32582diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
32583index acc3a8d..981c236 100644
32584--- a/drivers/base/power/domain.c
32585+++ b/drivers/base/power/domain.c
32586@@ -1851,7 +1851,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
32587 {
32588 struct cpuidle_driver *cpuidle_drv;
32589 struct gpd_cpu_data *cpu_data;
32590- struct cpuidle_state *idle_state;
32591+ cpuidle_state_no_const *idle_state;
32592 int ret = 0;
32593
32594 if (IS_ERR_OR_NULL(genpd) || state < 0)
32595@@ -1919,7 +1919,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
32596 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
32597 {
32598 struct gpd_cpu_data *cpu_data;
32599- struct cpuidle_state *idle_state;
32600+ cpuidle_state_no_const *idle_state;
32601 int ret = 0;
32602
32603 if (IS_ERR_OR_NULL(genpd))
32604diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
32605index e6ee5e8..98ad7fc 100644
32606--- a/drivers/base/power/wakeup.c
32607+++ b/drivers/base/power/wakeup.c
32608@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
32609 * They need to be modified together atomically, so it's better to use one
32610 * atomic variable to hold them both.
32611 */
32612-static atomic_t combined_event_count = ATOMIC_INIT(0);
32613+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
32614
32615 #define IN_PROGRESS_BITS (sizeof(int) * 4)
32616 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
32617
32618 static void split_counters(unsigned int *cnt, unsigned int *inpr)
32619 {
32620- unsigned int comb = atomic_read(&combined_event_count);
32621+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
32622
32623 *cnt = (comb >> IN_PROGRESS_BITS);
32624 *inpr = comb & MAX_IN_PROGRESS;
32625@@ -389,7 +389,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
32626 ws->start_prevent_time = ws->last_time;
32627
32628 /* Increment the counter of events in progress. */
32629- cec = atomic_inc_return(&combined_event_count);
32630+ cec = atomic_inc_return_unchecked(&combined_event_count);
32631
32632 trace_wakeup_source_activate(ws->name, cec);
32633 }
32634@@ -515,7 +515,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
32635 * Increment the counter of registered wakeup events and decrement the
32636 * couter of wakeup events in progress simultaneously.
32637 */
32638- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
32639+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
32640 trace_wakeup_source_deactivate(ws->name, cec);
32641
32642 split_counters(&cnt, &inpr);
32643diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
32644index e8d11b6..7b1b36f 100644
32645--- a/drivers/base/syscore.c
32646+++ b/drivers/base/syscore.c
32647@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
32648 void register_syscore_ops(struct syscore_ops *ops)
32649 {
32650 mutex_lock(&syscore_ops_lock);
32651- list_add_tail(&ops->node, &syscore_ops_list);
32652+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
32653 mutex_unlock(&syscore_ops_lock);
32654 }
32655 EXPORT_SYMBOL_GPL(register_syscore_ops);
32656@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
32657 void unregister_syscore_ops(struct syscore_ops *ops)
32658 {
32659 mutex_lock(&syscore_ops_lock);
32660- list_del(&ops->node);
32661+ pax_list_del((struct list_head *)&ops->node);
32662 mutex_unlock(&syscore_ops_lock);
32663 }
32664 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
32665diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
32666index ade58bc..867143d 100644
32667--- a/drivers/block/cciss.c
32668+++ b/drivers/block/cciss.c
32669@@ -1196,6 +1196,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
32670 int err;
32671 u32 cp;
32672
32673+ memset(&arg64, 0, sizeof(arg64));
32674+
32675 err = 0;
32676 err |=
32677 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
32678@@ -3005,7 +3007,7 @@ static void start_io(ctlr_info_t *h)
32679 while (!list_empty(&h->reqQ)) {
32680 c = list_entry(h->reqQ.next, CommandList_struct, list);
32681 /* can't do anything if fifo is full */
32682- if ((h->access.fifo_full(h))) {
32683+ if ((h->access->fifo_full(h))) {
32684 dev_warn(&h->pdev->dev, "fifo full\n");
32685 break;
32686 }
32687@@ -3015,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
32688 h->Qdepth--;
32689
32690 /* Tell the controller execute command */
32691- h->access.submit_command(h, c);
32692+ h->access->submit_command(h, c);
32693
32694 /* Put job onto the completed Q */
32695 addQ(&h->cmpQ, c);
32696@@ -3441,17 +3443,17 @@ startio:
32697
32698 static inline unsigned long get_next_completion(ctlr_info_t *h)
32699 {
32700- return h->access.command_completed(h);
32701+ return h->access->command_completed(h);
32702 }
32703
32704 static inline int interrupt_pending(ctlr_info_t *h)
32705 {
32706- return h->access.intr_pending(h);
32707+ return h->access->intr_pending(h);
32708 }
32709
32710 static inline long interrupt_not_for_us(ctlr_info_t *h)
32711 {
32712- return ((h->access.intr_pending(h) == 0) ||
32713+ return ((h->access->intr_pending(h) == 0) ||
32714 (h->interrupts_enabled == 0));
32715 }
32716
32717@@ -3484,7 +3486,7 @@ static inline u32 next_command(ctlr_info_t *h)
32718 u32 a;
32719
32720 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
32721- return h->access.command_completed(h);
32722+ return h->access->command_completed(h);
32723
32724 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
32725 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
32726@@ -4041,7 +4043,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
32727 trans_support & CFGTBL_Trans_use_short_tags);
32728
32729 /* Change the access methods to the performant access methods */
32730- h->access = SA5_performant_access;
32731+ h->access = &SA5_performant_access;
32732 h->transMethod = CFGTBL_Trans_Performant;
32733
32734 return;
32735@@ -4310,7 +4312,7 @@ static int cciss_pci_init(ctlr_info_t *h)
32736 if (prod_index < 0)
32737 return -ENODEV;
32738 h->product_name = products[prod_index].product_name;
32739- h->access = *(products[prod_index].access);
32740+ h->access = products[prod_index].access;
32741
32742 if (cciss_board_disabled(h)) {
32743 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
32744@@ -5032,7 +5034,7 @@ reinit_after_soft_reset:
32745 }
32746
32747 /* make sure the board interrupts are off */
32748- h->access.set_intr_mask(h, CCISS_INTR_OFF);
32749+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
32750 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
32751 if (rc)
32752 goto clean2;
32753@@ -5082,7 +5084,7 @@ reinit_after_soft_reset:
32754 * fake ones to scoop up any residual completions.
32755 */
32756 spin_lock_irqsave(&h->lock, flags);
32757- h->access.set_intr_mask(h, CCISS_INTR_OFF);
32758+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
32759 spin_unlock_irqrestore(&h->lock, flags);
32760 free_irq(h->intr[h->intr_mode], h);
32761 rc = cciss_request_irq(h, cciss_msix_discard_completions,
32762@@ -5102,9 +5104,9 @@ reinit_after_soft_reset:
32763 dev_info(&h->pdev->dev, "Board READY.\n");
32764 dev_info(&h->pdev->dev,
32765 "Waiting for stale completions to drain.\n");
32766- h->access.set_intr_mask(h, CCISS_INTR_ON);
32767+ h->access->set_intr_mask(h, CCISS_INTR_ON);
32768 msleep(10000);
32769- h->access.set_intr_mask(h, CCISS_INTR_OFF);
32770+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
32771
32772 rc = controller_reset_failed(h->cfgtable);
32773 if (rc)
32774@@ -5127,7 +5129,7 @@ reinit_after_soft_reset:
32775 cciss_scsi_setup(h);
32776
32777 /* Turn the interrupts on so we can service requests */
32778- h->access.set_intr_mask(h, CCISS_INTR_ON);
32779+ h->access->set_intr_mask(h, CCISS_INTR_ON);
32780
32781 /* Get the firmware version */
32782 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
32783@@ -5199,7 +5201,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
32784 kfree(flush_buf);
32785 if (return_code != IO_OK)
32786 dev_warn(&h->pdev->dev, "Error flushing cache\n");
32787- h->access.set_intr_mask(h, CCISS_INTR_OFF);
32788+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
32789 free_irq(h->intr[h->intr_mode], h);
32790 }
32791
32792diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
32793index 7fda30e..eb5dfe0 100644
32794--- a/drivers/block/cciss.h
32795+++ b/drivers/block/cciss.h
32796@@ -101,7 +101,7 @@ struct ctlr_info
32797 /* information about each logical volume */
32798 drive_info_struct *drv[CISS_MAX_LUN];
32799
32800- struct access_method access;
32801+ struct access_method *access;
32802
32803 /* queue and queue Info */
32804 struct list_head reqQ;
32805diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
32806index 3f08713..56a586a 100644
32807--- a/drivers/block/cpqarray.c
32808+++ b/drivers/block/cpqarray.c
32809@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
32810 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
32811 goto Enomem4;
32812 }
32813- hba[i]->access.set_intr_mask(hba[i], 0);
32814+ hba[i]->access->set_intr_mask(hba[i], 0);
32815 if (request_irq(hba[i]->intr, do_ida_intr,
32816 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
32817 {
32818@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
32819 add_timer(&hba[i]->timer);
32820
32821 /* Enable IRQ now that spinlock and rate limit timer are set up */
32822- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32823+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32824
32825 for(j=0; j<NWD; j++) {
32826 struct gendisk *disk = ida_gendisk[i][j];
32827@@ -694,7 +694,7 @@ DBGINFO(
32828 for(i=0; i<NR_PRODUCTS; i++) {
32829 if (board_id == products[i].board_id) {
32830 c->product_name = products[i].product_name;
32831- c->access = *(products[i].access);
32832+ c->access = products[i].access;
32833 break;
32834 }
32835 }
32836@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
32837 hba[ctlr]->intr = intr;
32838 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
32839 hba[ctlr]->product_name = products[j].product_name;
32840- hba[ctlr]->access = *(products[j].access);
32841+ hba[ctlr]->access = products[j].access;
32842 hba[ctlr]->ctlr = ctlr;
32843 hba[ctlr]->board_id = board_id;
32844 hba[ctlr]->pci_dev = NULL; /* not PCI */
32845@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
32846
32847 while((c = h->reqQ) != NULL) {
32848 /* Can't do anything if we're busy */
32849- if (h->access.fifo_full(h) == 0)
32850+ if (h->access->fifo_full(h) == 0)
32851 return;
32852
32853 /* Get the first entry from the request Q */
32854@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
32855 h->Qdepth--;
32856
32857 /* Tell the controller to do our bidding */
32858- h->access.submit_command(h, c);
32859+ h->access->submit_command(h, c);
32860
32861 /* Get onto the completion Q */
32862 addQ(&h->cmpQ, c);
32863@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32864 unsigned long flags;
32865 __u32 a,a1;
32866
32867- istat = h->access.intr_pending(h);
32868+ istat = h->access->intr_pending(h);
32869 /* Is this interrupt for us? */
32870 if (istat == 0)
32871 return IRQ_NONE;
32872@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32873 */
32874 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
32875 if (istat & FIFO_NOT_EMPTY) {
32876- while((a = h->access.command_completed(h))) {
32877+ while((a = h->access->command_completed(h))) {
32878 a1 = a; a &= ~3;
32879 if ((c = h->cmpQ) == NULL)
32880 {
32881@@ -1449,11 +1449,11 @@ static int sendcmd(
32882 /*
32883 * Disable interrupt
32884 */
32885- info_p->access.set_intr_mask(info_p, 0);
32886+ info_p->access->set_intr_mask(info_p, 0);
32887 /* Make sure there is room in the command FIFO */
32888 /* Actually it should be completely empty at this time. */
32889 for (i = 200000; i > 0; i--) {
32890- temp = info_p->access.fifo_full(info_p);
32891+ temp = info_p->access->fifo_full(info_p);
32892 if (temp != 0) {
32893 break;
32894 }
32895@@ -1466,7 +1466,7 @@ DBG(
32896 /*
32897 * Send the cmd
32898 */
32899- info_p->access.submit_command(info_p, c);
32900+ info_p->access->submit_command(info_p, c);
32901 complete = pollcomplete(ctlr);
32902
32903 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
32904@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
32905 * we check the new geometry. Then turn interrupts back on when
32906 * we're done.
32907 */
32908- host->access.set_intr_mask(host, 0);
32909+ host->access->set_intr_mask(host, 0);
32910 getgeometry(ctlr);
32911- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
32912+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
32913
32914 for(i=0; i<NWD; i++) {
32915 struct gendisk *disk = ida_gendisk[ctlr][i];
32916@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
32917 /* Wait (up to 2 seconds) for a command to complete */
32918
32919 for (i = 200000; i > 0; i--) {
32920- done = hba[ctlr]->access.command_completed(hba[ctlr]);
32921+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
32922 if (done == 0) {
32923 udelay(10); /* a short fixed delay */
32924 } else
32925diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
32926index be73e9d..7fbf140 100644
32927--- a/drivers/block/cpqarray.h
32928+++ b/drivers/block/cpqarray.h
32929@@ -99,7 +99,7 @@ struct ctlr_info {
32930 drv_info_t drv[NWD];
32931 struct proc_dir_entry *proc;
32932
32933- struct access_method access;
32934+ struct access_method *access;
32935
32936 cmdlist_t *reqQ;
32937 cmdlist_t *cmpQ;
32938diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
32939index 6b51afa..17e1191 100644
32940--- a/drivers/block/drbd/drbd_int.h
32941+++ b/drivers/block/drbd/drbd_int.h
32942@@ -582,7 +582,7 @@ struct drbd_epoch {
32943 struct drbd_tconn *tconn;
32944 struct list_head list;
32945 unsigned int barrier_nr;
32946- atomic_t epoch_size; /* increased on every request added. */
32947+ atomic_unchecked_t epoch_size; /* increased on every request added. */
32948 atomic_t active; /* increased on every req. added, and dec on every finished. */
32949 unsigned long flags;
32950 };
32951@@ -1011,7 +1011,7 @@ struct drbd_conf {
32952 int al_tr_cycle;
32953 int al_tr_pos; /* position of the next transaction in the journal */
32954 wait_queue_head_t seq_wait;
32955- atomic_t packet_seq;
32956+ atomic_unchecked_t packet_seq;
32957 unsigned int peer_seq;
32958 spinlock_t peer_seq_lock;
32959 unsigned int minor;
32960@@ -1527,7 +1527,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
32961 char __user *uoptval;
32962 int err;
32963
32964- uoptval = (char __user __force *)optval;
32965+ uoptval = (char __force_user *)optval;
32966
32967 set_fs(KERNEL_DS);
32968 if (level == SOL_SOCKET)
32969diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
32970index 8c13eeb..217adee 100644
32971--- a/drivers/block/drbd/drbd_main.c
32972+++ b/drivers/block/drbd/drbd_main.c
32973@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
32974 p->sector = sector;
32975 p->block_id = block_id;
32976 p->blksize = blksize;
32977- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
32978+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
32979 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
32980 }
32981
32982@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
32983 return -EIO;
32984 p->sector = cpu_to_be64(req->i.sector);
32985 p->block_id = (unsigned long)req;
32986- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
32987+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
32988 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
32989 if (mdev->state.conn >= C_SYNC_SOURCE &&
32990 mdev->state.conn <= C_PAUSED_SYNC_T)
32991@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
32992 {
32993 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
32994
32995- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
32996- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
32997+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
32998+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
32999 kfree(tconn->current_epoch);
33000
33001 idr_destroy(&tconn->volumes);
33002diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
33003index a9eccfc..f5efe87 100644
33004--- a/drivers/block/drbd/drbd_receiver.c
33005+++ b/drivers/block/drbd/drbd_receiver.c
33006@@ -833,7 +833,7 @@ int drbd_connected(struct drbd_conf *mdev)
33007 {
33008 int err;
33009
33010- atomic_set(&mdev->packet_seq, 0);
33011+ atomic_set_unchecked(&mdev->packet_seq, 0);
33012 mdev->peer_seq = 0;
33013
33014 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
33015@@ -1191,7 +1191,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33016 do {
33017 next_epoch = NULL;
33018
33019- epoch_size = atomic_read(&epoch->epoch_size);
33020+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
33021
33022 switch (ev & ~EV_CLEANUP) {
33023 case EV_PUT:
33024@@ -1231,7 +1231,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33025 rv = FE_DESTROYED;
33026 } else {
33027 epoch->flags = 0;
33028- atomic_set(&epoch->epoch_size, 0);
33029+ atomic_set_unchecked(&epoch->epoch_size, 0);
33030 /* atomic_set(&epoch->active, 0); is already zero */
33031 if (rv == FE_STILL_LIVE)
33032 rv = FE_RECYCLED;
33033@@ -1449,7 +1449,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33034 conn_wait_active_ee_empty(tconn);
33035 drbd_flush(tconn);
33036
33037- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33038+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33039 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
33040 if (epoch)
33041 break;
33042@@ -1462,11 +1462,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33043 }
33044
33045 epoch->flags = 0;
33046- atomic_set(&epoch->epoch_size, 0);
33047+ atomic_set_unchecked(&epoch->epoch_size, 0);
33048 atomic_set(&epoch->active, 0);
33049
33050 spin_lock(&tconn->epoch_lock);
33051- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33052+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33053 list_add(&epoch->list, &tconn->current_epoch->list);
33054 tconn->current_epoch = epoch;
33055 tconn->epochs++;
33056@@ -2170,7 +2170,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33057
33058 err = wait_for_and_update_peer_seq(mdev, peer_seq);
33059 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
33060- atomic_inc(&tconn->current_epoch->epoch_size);
33061+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
33062 err2 = drbd_drain_block(mdev, pi->size);
33063 if (!err)
33064 err = err2;
33065@@ -2204,7 +2204,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33066
33067 spin_lock(&tconn->epoch_lock);
33068 peer_req->epoch = tconn->current_epoch;
33069- atomic_inc(&peer_req->epoch->epoch_size);
33070+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
33071 atomic_inc(&peer_req->epoch->active);
33072 spin_unlock(&tconn->epoch_lock);
33073
33074@@ -4346,7 +4346,7 @@ struct data_cmd {
33075 int expect_payload;
33076 size_t pkt_size;
33077 int (*fn)(struct drbd_tconn *, struct packet_info *);
33078-};
33079+} __do_const;
33080
33081 static struct data_cmd drbd_cmd_handler[] = {
33082 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
33083@@ -4466,7 +4466,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
33084 if (!list_empty(&tconn->current_epoch->list))
33085 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
33086 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
33087- atomic_set(&tconn->current_epoch->epoch_size, 0);
33088+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
33089 tconn->send.seen_any_write_yet = false;
33090
33091 conn_info(tconn, "Connection closed\n");
33092@@ -5222,7 +5222,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
33093 struct asender_cmd {
33094 size_t pkt_size;
33095 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
33096-};
33097+} __do_const;
33098
33099 static struct asender_cmd asender_tbl[] = {
33100 [P_PING] = { 0, got_Ping },
33101diff --git a/drivers/block/loop.c b/drivers/block/loop.c
33102index ae12512..37fa397 100644
33103--- a/drivers/block/loop.c
33104+++ b/drivers/block/loop.c
33105@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
33106 mm_segment_t old_fs = get_fs();
33107
33108 set_fs(get_ds());
33109- bw = file->f_op->write(file, buf, len, &pos);
33110+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
33111 set_fs(old_fs);
33112 if (likely(bw == len))
33113 return 0;
33114diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
33115index d620b44..587561e 100644
33116--- a/drivers/cdrom/cdrom.c
33117+++ b/drivers/cdrom/cdrom.c
33118@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
33119 ENSURE(reset, CDC_RESET);
33120 ENSURE(generic_packet, CDC_GENERIC_PACKET);
33121 cdi->mc_flags = 0;
33122- cdo->n_minors = 0;
33123 cdi->options = CDO_USE_FFLAGS;
33124
33125 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
33126@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
33127 else
33128 cdi->cdda_method = CDDA_OLD;
33129
33130- if (!cdo->generic_packet)
33131- cdo->generic_packet = cdrom_dummy_generic_packet;
33132+ if (!cdo->generic_packet) {
33133+ pax_open_kernel();
33134+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
33135+ pax_close_kernel();
33136+ }
33137
33138 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
33139 mutex_lock(&cdrom_mutex);
33140@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
33141 if (cdi->exit)
33142 cdi->exit(cdi);
33143
33144- cdi->ops->n_minors--;
33145 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
33146 }
33147
33148diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
33149index d59cdcb..11afddf 100644
33150--- a/drivers/cdrom/gdrom.c
33151+++ b/drivers/cdrom/gdrom.c
33152@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
33153 .audio_ioctl = gdrom_audio_ioctl,
33154 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
33155 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
33156- .n_minors = 1,
33157 };
33158
33159 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
33160diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
33161index 72bedad..8181ce1 100644
33162--- a/drivers/char/Kconfig
33163+++ b/drivers/char/Kconfig
33164@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
33165
33166 config DEVKMEM
33167 bool "/dev/kmem virtual device support"
33168- default y
33169+ default n
33170+ depends on !GRKERNSEC_KMEM
33171 help
33172 Say Y here if you want to support the /dev/kmem device. The
33173 /dev/kmem device is rarely used, but can be used for certain
33174@@ -581,6 +582,7 @@ config DEVPORT
33175 bool
33176 depends on !M68K
33177 depends on ISA || PCI
33178+ depends on !GRKERNSEC_KMEM
33179 default y
33180
33181 source "drivers/s390/char/Kconfig"
33182diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
33183index 2e04433..22afc64 100644
33184--- a/drivers/char/agp/frontend.c
33185+++ b/drivers/char/agp/frontend.c
33186@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33187 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
33188 return -EFAULT;
33189
33190- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
33191+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
33192 return -EFAULT;
33193
33194 client = agp_find_client_by_pid(reserve.pid);
33195diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
33196index 21cb980..f15107c 100644
33197--- a/drivers/char/genrtc.c
33198+++ b/drivers/char/genrtc.c
33199@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
33200 switch (cmd) {
33201
33202 case RTC_PLL_GET:
33203+ memset(&pll, 0, sizeof(pll));
33204 if (get_rtc_pll(&pll))
33205 return -EINVAL;
33206 else
33207diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
33208index fe6d4be..89f32100 100644
33209--- a/drivers/char/hpet.c
33210+++ b/drivers/char/hpet.c
33211@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
33212 }
33213
33214 static int
33215-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
33216+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
33217 struct hpet_info *info)
33218 {
33219 struct hpet_timer __iomem *timer;
33220diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
33221index 053201b0..8335cce 100644
33222--- a/drivers/char/ipmi/ipmi_msghandler.c
33223+++ b/drivers/char/ipmi/ipmi_msghandler.c
33224@@ -420,7 +420,7 @@ struct ipmi_smi {
33225 struct proc_dir_entry *proc_dir;
33226 char proc_dir_name[10];
33227
33228- atomic_t stats[IPMI_NUM_STATS];
33229+ atomic_unchecked_t stats[IPMI_NUM_STATS];
33230
33231 /*
33232 * run_to_completion duplicate of smb_info, smi_info
33233@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
33234
33235
33236 #define ipmi_inc_stat(intf, stat) \
33237- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
33238+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
33239 #define ipmi_get_stat(intf, stat) \
33240- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
33241+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
33242
33243 static int is_lan_addr(struct ipmi_addr *addr)
33244 {
33245@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
33246 INIT_LIST_HEAD(&intf->cmd_rcvrs);
33247 init_waitqueue_head(&intf->waitq);
33248 for (i = 0; i < IPMI_NUM_STATS; i++)
33249- atomic_set(&intf->stats[i], 0);
33250+ atomic_set_unchecked(&intf->stats[i], 0);
33251
33252 intf->proc_dir = NULL;
33253
33254diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
33255index 1c7fdcd..4899100 100644
33256--- a/drivers/char/ipmi/ipmi_si_intf.c
33257+++ b/drivers/char/ipmi/ipmi_si_intf.c
33258@@ -275,7 +275,7 @@ struct smi_info {
33259 unsigned char slave_addr;
33260
33261 /* Counters and things for the proc filesystem. */
33262- atomic_t stats[SI_NUM_STATS];
33263+ atomic_unchecked_t stats[SI_NUM_STATS];
33264
33265 struct task_struct *thread;
33266
33267@@ -284,9 +284,9 @@ struct smi_info {
33268 };
33269
33270 #define smi_inc_stat(smi, stat) \
33271- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
33272+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
33273 #define smi_get_stat(smi, stat) \
33274- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
33275+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
33276
33277 #define SI_MAX_PARMS 4
33278
33279@@ -3225,7 +3225,7 @@ static int try_smi_init(struct smi_info *new_smi)
33280 atomic_set(&new_smi->req_events, 0);
33281 new_smi->run_to_completion = 0;
33282 for (i = 0; i < SI_NUM_STATS; i++)
33283- atomic_set(&new_smi->stats[i], 0);
33284+ atomic_set_unchecked(&new_smi->stats[i], 0);
33285
33286 new_smi->interrupt_disabled = 1;
33287 atomic_set(&new_smi->stop_operation, 0);
33288diff --git a/drivers/char/mem.c b/drivers/char/mem.c
33289index c6fa3bc..4ca3e42 100644
33290--- a/drivers/char/mem.c
33291+++ b/drivers/char/mem.c
33292@@ -18,6 +18,7 @@
33293 #include <linux/raw.h>
33294 #include <linux/tty.h>
33295 #include <linux/capability.h>
33296+#include <linux/security.h>
33297 #include <linux/ptrace.h>
33298 #include <linux/device.h>
33299 #include <linux/highmem.h>
33300@@ -37,6 +38,10 @@
33301
33302 #define DEVPORT_MINOR 4
33303
33304+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33305+extern const struct file_operations grsec_fops;
33306+#endif
33307+
33308 static inline unsigned long size_inside_page(unsigned long start,
33309 unsigned long size)
33310 {
33311@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33312
33313 while (cursor < to) {
33314 if (!devmem_is_allowed(pfn)) {
33315+#ifdef CONFIG_GRKERNSEC_KMEM
33316+ gr_handle_mem_readwrite(from, to);
33317+#else
33318 printk(KERN_INFO
33319 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
33320 current->comm, from, to);
33321+#endif
33322 return 0;
33323 }
33324 cursor += PAGE_SIZE;
33325@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33326 }
33327 return 1;
33328 }
33329+#elif defined(CONFIG_GRKERNSEC_KMEM)
33330+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33331+{
33332+ return 0;
33333+}
33334 #else
33335 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33336 {
33337@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
33338
33339 while (count > 0) {
33340 unsigned long remaining;
33341+ char *temp;
33342
33343 sz = size_inside_page(p, count);
33344
33345@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
33346 if (!ptr)
33347 return -EFAULT;
33348
33349- remaining = copy_to_user(buf, ptr, sz);
33350+#ifdef CONFIG_PAX_USERCOPY
33351+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
33352+ if (!temp) {
33353+ unxlate_dev_mem_ptr(p, ptr);
33354+ return -ENOMEM;
33355+ }
33356+ memcpy(temp, ptr, sz);
33357+#else
33358+ temp = ptr;
33359+#endif
33360+
33361+ remaining = copy_to_user(buf, temp, sz);
33362+
33363+#ifdef CONFIG_PAX_USERCOPY
33364+ kfree(temp);
33365+#endif
33366+
33367 unxlate_dev_mem_ptr(p, ptr);
33368 if (remaining)
33369 return -EFAULT;
33370@@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33371 size_t count, loff_t *ppos)
33372 {
33373 unsigned long p = *ppos;
33374- ssize_t low_count, read, sz;
33375+ ssize_t low_count, read, sz, err = 0;
33376 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
33377- int err = 0;
33378
33379 read = 0;
33380 if (p < (unsigned long) high_memory) {
33381@@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33382 }
33383 #endif
33384 while (low_count > 0) {
33385+ char *temp;
33386+
33387 sz = size_inside_page(p, low_count);
33388
33389 /*
33390@@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33391 */
33392 kbuf = xlate_dev_kmem_ptr((char *)p);
33393
33394- if (copy_to_user(buf, kbuf, sz))
33395+#ifdef CONFIG_PAX_USERCOPY
33396+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
33397+ if (!temp)
33398+ return -ENOMEM;
33399+ memcpy(temp, kbuf, sz);
33400+#else
33401+ temp = kbuf;
33402+#endif
33403+
33404+ err = copy_to_user(buf, temp, sz);
33405+
33406+#ifdef CONFIG_PAX_USERCOPY
33407+ kfree(temp);
33408+#endif
33409+
33410+ if (err)
33411 return -EFAULT;
33412 buf += sz;
33413 p += sz;
33414@@ -833,6 +880,9 @@ static const struct memdev {
33415 #ifdef CONFIG_CRASH_DUMP
33416 [12] = { "oldmem", 0, &oldmem_fops, NULL },
33417 #endif
33418+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33419+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
33420+#endif
33421 };
33422
33423 static int memory_open(struct inode *inode, struct file *filp)
33424diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
33425index 9df78e2..01ba9ae 100644
33426--- a/drivers/char/nvram.c
33427+++ b/drivers/char/nvram.c
33428@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
33429
33430 spin_unlock_irq(&rtc_lock);
33431
33432- if (copy_to_user(buf, contents, tmp - contents))
33433+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
33434 return -EFAULT;
33435
33436 *ppos = i;
33437diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
33438index b66eaa0..2619d1b 100644
33439--- a/drivers/char/pcmcia/synclink_cs.c
33440+++ b/drivers/char/pcmcia/synclink_cs.c
33441@@ -2348,9 +2348,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
33442
33443 if (debug_level >= DEBUG_LEVEL_INFO)
33444 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
33445- __FILE__,__LINE__, info->device_name, port->count);
33446+ __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
33447
33448- WARN_ON(!port->count);
33449+ WARN_ON(!atomic_read(&port->count));
33450
33451 if (tty_port_close_start(port, tty, filp) == 0)
33452 goto cleanup;
33453@@ -2368,7 +2368,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
33454 cleanup:
33455 if (debug_level >= DEBUG_LEVEL_INFO)
33456 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
33457- tty->driver->name, port->count);
33458+ tty->driver->name, atomic_read(&port->count));
33459 }
33460
33461 /* Wait until the transmitter is empty.
33462@@ -2510,7 +2510,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
33463
33464 if (debug_level >= DEBUG_LEVEL_INFO)
33465 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
33466- __FILE__,__LINE__,tty->driver->name, port->count);
33467+ __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
33468
33469 /* If port is closing, signal caller to try again */
33470 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
33471@@ -2530,11 +2530,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
33472 goto cleanup;
33473 }
33474 spin_lock(&port->lock);
33475- port->count++;
33476+ atomic_inc(&port->count);
33477 spin_unlock(&port->lock);
33478 spin_unlock_irqrestore(&info->netlock, flags);
33479
33480- if (port->count == 1) {
33481+ if (atomic_read(&port->count) == 1) {
33482 /* 1st open on this device, init hardware */
33483 retval = startup(info, tty);
33484 if (retval < 0)
33485@@ -3889,7 +3889,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
33486 unsigned short new_crctype;
33487
33488 /* return error if TTY interface open */
33489- if (info->port.count)
33490+ if (atomic_read(&info->port.count))
33491 return -EBUSY;
33492
33493 switch (encoding)
33494@@ -3992,7 +3992,7 @@ static int hdlcdev_open(struct net_device *dev)
33495
33496 /* arbitrate between network and tty opens */
33497 spin_lock_irqsave(&info->netlock, flags);
33498- if (info->port.count != 0 || info->netcount != 0) {
33499+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
33500 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
33501 spin_unlock_irqrestore(&info->netlock, flags);
33502 return -EBUSY;
33503@@ -4081,7 +4081,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33504 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
33505
33506 /* return error if TTY interface open */
33507- if (info->port.count)
33508+ if (atomic_read(&info->port.count))
33509 return -EBUSY;
33510
33511 if (cmd != SIOCWANDEV)
33512diff --git a/drivers/char/random.c b/drivers/char/random.c
33513index 85e81ec..a129a39 100644
33514--- a/drivers/char/random.c
33515+++ b/drivers/char/random.c
33516@@ -272,8 +272,13 @@
33517 /*
33518 * Configuration information
33519 */
33520+#ifdef CONFIG_GRKERNSEC_RANDNET
33521+#define INPUT_POOL_WORDS 512
33522+#define OUTPUT_POOL_WORDS 128
33523+#else
33524 #define INPUT_POOL_WORDS 128
33525 #define OUTPUT_POOL_WORDS 32
33526+#endif
33527 #define SEC_XFER_SIZE 512
33528 #define EXTRACT_SIZE 10
33529
33530@@ -313,10 +318,17 @@ static struct poolinfo {
33531 int poolwords;
33532 int tap1, tap2, tap3, tap4, tap5;
33533 } poolinfo_table[] = {
33534+#ifdef CONFIG_GRKERNSEC_RANDNET
33535+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
33536+ { 512, 411, 308, 208, 104, 1 },
33537+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
33538+ { 128, 103, 76, 51, 25, 1 },
33539+#else
33540 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
33541 { 128, 103, 76, 51, 25, 1 },
33542 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
33543 { 32, 26, 20, 14, 7, 1 },
33544+#endif
33545 #if 0
33546 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
33547 { 2048, 1638, 1231, 819, 411, 1 },
33548@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
33549 input_rotate += i ? 7 : 14;
33550 }
33551
33552- ACCESS_ONCE(r->input_rotate) = input_rotate;
33553- ACCESS_ONCE(r->add_ptr) = i;
33554+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
33555+ ACCESS_ONCE_RW(r->add_ptr) = i;
33556 smp_wmb();
33557
33558 if (out)
33559@@ -1020,7 +1032,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
33560
33561 extract_buf(r, tmp);
33562 i = min_t(int, nbytes, EXTRACT_SIZE);
33563- if (copy_to_user(buf, tmp, i)) {
33564+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
33565 ret = -EFAULT;
33566 break;
33567 }
33568@@ -1356,7 +1368,7 @@ EXPORT_SYMBOL(generate_random_uuid);
33569 #include <linux/sysctl.h>
33570
33571 static int min_read_thresh = 8, min_write_thresh;
33572-static int max_read_thresh = INPUT_POOL_WORDS * 32;
33573+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
33574 static int max_write_thresh = INPUT_POOL_WORDS * 32;
33575 static char sysctl_bootid[16];
33576
33577@@ -1372,7 +1384,7 @@ static char sysctl_bootid[16];
33578 static int proc_do_uuid(ctl_table *table, int write,
33579 void __user *buffer, size_t *lenp, loff_t *ppos)
33580 {
33581- ctl_table fake_table;
33582+ ctl_table_no_const fake_table;
33583 unsigned char buf[64], tmp_uuid[16], *uuid;
33584
33585 uuid = table->data;
33586diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
33587index d780295..b29f3a8 100644
33588--- a/drivers/char/sonypi.c
33589+++ b/drivers/char/sonypi.c
33590@@ -54,6 +54,7 @@
33591
33592 #include <asm/uaccess.h>
33593 #include <asm/io.h>
33594+#include <asm/local.h>
33595
33596 #include <linux/sonypi.h>
33597
33598@@ -490,7 +491,7 @@ static struct sonypi_device {
33599 spinlock_t fifo_lock;
33600 wait_queue_head_t fifo_proc_list;
33601 struct fasync_struct *fifo_async;
33602- int open_count;
33603+ local_t open_count;
33604 int model;
33605 struct input_dev *input_jog_dev;
33606 struct input_dev *input_key_dev;
33607@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
33608 static int sonypi_misc_release(struct inode *inode, struct file *file)
33609 {
33610 mutex_lock(&sonypi_device.lock);
33611- sonypi_device.open_count--;
33612+ local_dec(&sonypi_device.open_count);
33613 mutex_unlock(&sonypi_device.lock);
33614 return 0;
33615 }
33616@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
33617 {
33618 mutex_lock(&sonypi_device.lock);
33619 /* Flush input queue on first open */
33620- if (!sonypi_device.open_count)
33621+ if (!local_read(&sonypi_device.open_count))
33622 kfifo_reset(&sonypi_device.fifo);
33623- sonypi_device.open_count++;
33624+ local_inc(&sonypi_device.open_count);
33625 mutex_unlock(&sonypi_device.lock);
33626
33627 return 0;
33628diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
33629index 93211df..c7805f7 100644
33630--- a/drivers/char/tpm/tpm.c
33631+++ b/drivers/char/tpm/tpm.c
33632@@ -410,7 +410,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
33633 chip->vendor.req_complete_val)
33634 goto out_recv;
33635
33636- if ((status == chip->vendor.req_canceled)) {
33637+ if (status == chip->vendor.req_canceled) {
33638 dev_err(chip->dev, "Operation Canceled\n");
33639 rc = -ECANCELED;
33640 goto out;
33641diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
33642index 56051d0..11cf3b7 100644
33643--- a/drivers/char/tpm/tpm_acpi.c
33644+++ b/drivers/char/tpm/tpm_acpi.c
33645@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
33646 virt = acpi_os_map_memory(start, len);
33647 if (!virt) {
33648 kfree(log->bios_event_log);
33649+ log->bios_event_log = NULL;
33650 printk("%s: ERROR - Unable to map memory\n", __func__);
33651 return -EIO;
33652 }
33653
33654- memcpy_fromio(log->bios_event_log, virt, len);
33655+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
33656
33657 acpi_os_unmap_memory(virt, len);
33658 return 0;
33659diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
33660index 84ddc55..1d32f1e 100644
33661--- a/drivers/char/tpm/tpm_eventlog.c
33662+++ b/drivers/char/tpm/tpm_eventlog.c
33663@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
33664 event = addr;
33665
33666 if ((event->event_type == 0 && event->event_size == 0) ||
33667- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
33668+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
33669 return NULL;
33670
33671 return addr;
33672@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
33673 return NULL;
33674
33675 if ((event->event_type == 0 && event->event_size == 0) ||
33676- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
33677+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
33678 return NULL;
33679
33680 (*pos)++;
33681@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
33682 int i;
33683
33684 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
33685- seq_putc(m, data[i]);
33686+ if (!seq_putc(m, data[i]))
33687+ return -EFAULT;
33688
33689 return 0;
33690 }
33691diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
33692index ee4dbea..69c817b 100644
33693--- a/drivers/char/virtio_console.c
33694+++ b/drivers/char/virtio_console.c
33695@@ -681,7 +681,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
33696 if (to_user) {
33697 ssize_t ret;
33698
33699- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
33700+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
33701 if (ret)
33702 return -EFAULT;
33703 } else {
33704@@ -780,7 +780,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
33705 if (!port_has_data(port) && !port->host_connected)
33706 return 0;
33707
33708- return fill_readbuf(port, ubuf, count, true);
33709+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
33710 }
33711
33712 static int wait_port_writable(struct port *port, bool nonblock)
33713diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
33714index 8ae1a61..9c00613 100644
33715--- a/drivers/clocksource/arm_generic.c
33716+++ b/drivers/clocksource/arm_generic.c
33717@@ -181,7 +181,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
33718 return NOTIFY_OK;
33719 }
33720
33721-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
33722+static struct notifier_block arch_timer_cpu_nb = {
33723 .notifier_call = arch_timer_cpu_notify,
33724 };
33725
33726diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
33727index fce2000..1110478 100644
33728--- a/drivers/connector/cn_proc.c
33729+++ b/drivers/connector/cn_proc.c
33730@@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
33731 (task_active_pid_ns(current) != &init_pid_ns))
33732 return;
33733
33734+ /* Can only change if privileged. */
33735+ if (!capable(CAP_NET_ADMIN)) {
33736+ err = EPERM;
33737+ goto out;
33738+ }
33739+
33740 mc_op = (enum proc_cn_mcast_op *)msg->data;
33741 switch (*mc_op) {
33742 case PROC_CN_MCAST_LISTEN:
33743@@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
33744 err = EINVAL;
33745 break;
33746 }
33747+
33748+out:
33749 cn_proc_ack(err, msg->seq, msg->ack);
33750 }
33751
33752diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
33753index 7b0d49d..134fac9 100644
33754--- a/drivers/cpufreq/acpi-cpufreq.c
33755+++ b/drivers/cpufreq/acpi-cpufreq.c
33756@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
33757 return sprintf(buf, "%u\n", boost_enabled);
33758 }
33759
33760-static struct global_attr global_boost = __ATTR(boost, 0644,
33761+static global_attr_no_const global_boost = __ATTR(boost, 0644,
33762 show_global_boost,
33763 store_global_boost);
33764
33765@@ -712,8 +712,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
33766 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
33767 per_cpu(acfreq_data, cpu) = data;
33768
33769- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
33770- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
33771+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
33772+ pax_open_kernel();
33773+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
33774+ pax_close_kernel();
33775+ }
33776
33777 result = acpi_processor_register_performance(data->acpi_data, cpu);
33778 if (result)
33779@@ -835,7 +838,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
33780 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
33781 break;
33782 case ACPI_ADR_SPACE_FIXED_HARDWARE:
33783- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
33784+ pax_open_kernel();
33785+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
33786+ pax_close_kernel();
33787 policy->cur = get_cur_freq_on_cpu(cpu);
33788 break;
33789 default:
33790@@ -846,8 +851,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
33791 acpi_processor_notify_smm(THIS_MODULE);
33792
33793 /* Check for APERF/MPERF support in hardware */
33794- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
33795- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
33796+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
33797+ pax_open_kernel();
33798+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
33799+ pax_close_kernel();
33800+ }
33801
33802 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
33803 for (i = 0; i < perf->state_count; i++)
33804diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
33805index 1f93dbd..305cef1 100644
33806--- a/drivers/cpufreq/cpufreq.c
33807+++ b/drivers/cpufreq/cpufreq.c
33808@@ -1843,7 +1843,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
33809 return NOTIFY_OK;
33810 }
33811
33812-static struct notifier_block __refdata cpufreq_cpu_notifier = {
33813+static struct notifier_block cpufreq_cpu_notifier = {
33814 .notifier_call = cpufreq_cpu_callback,
33815 };
33816
33817@@ -1875,8 +1875,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
33818
33819 pr_debug("trying to register driver %s\n", driver_data->name);
33820
33821- if (driver_data->setpolicy)
33822- driver_data->flags |= CPUFREQ_CONST_LOOPS;
33823+ if (driver_data->setpolicy) {
33824+ pax_open_kernel();
33825+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
33826+ pax_close_kernel();
33827+ }
33828
33829 spin_lock_irqsave(&cpufreq_driver_lock, flags);
33830 if (cpufreq_driver) {
33831diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
33832index 6c5f1d3..c7e2f35e 100644
33833--- a/drivers/cpufreq/cpufreq_governor.c
33834+++ b/drivers/cpufreq/cpufreq_governor.c
33835@@ -243,7 +243,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
33836 * governor, thus we are bound to jiffes/HZ
33837 */
33838 if (dbs_data->governor == GOV_CONSERVATIVE) {
33839- struct cs_ops *ops = dbs_data->gov_ops;
33840+ const struct cs_ops *ops = dbs_data->gov_ops;
33841
33842 cpufreq_register_notifier(ops->notifier_block,
33843 CPUFREQ_TRANSITION_NOTIFIER);
33844@@ -251,7 +251,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
33845 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
33846 jiffies_to_usecs(10);
33847 } else {
33848- struct od_ops *ops = dbs_data->gov_ops;
33849+ const struct od_ops *ops = dbs_data->gov_ops;
33850
33851 od_tuners->io_is_busy = ops->io_busy();
33852 }
33853@@ -268,7 +268,7 @@ second_time:
33854 cs_dbs_info->enable = 1;
33855 cs_dbs_info->requested_freq = policy->cur;
33856 } else {
33857- struct od_ops *ops = dbs_data->gov_ops;
33858+ const struct od_ops *ops = dbs_data->gov_ops;
33859 od_dbs_info->rate_mult = 1;
33860 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
33861 ops->powersave_bias_init_cpu(cpu);
33862@@ -289,7 +289,7 @@ second_time:
33863 mutex_destroy(&cpu_cdbs->timer_mutex);
33864 dbs_data->enable--;
33865 if (!dbs_data->enable) {
33866- struct cs_ops *ops = dbs_data->gov_ops;
33867+ const struct cs_ops *ops = dbs_data->gov_ops;
33868
33869 sysfs_remove_group(cpufreq_global_kobject,
33870 dbs_data->attr_group);
33871diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
33872index f661654..6c8e638 100644
33873--- a/drivers/cpufreq/cpufreq_governor.h
33874+++ b/drivers/cpufreq/cpufreq_governor.h
33875@@ -142,7 +142,7 @@ struct dbs_data {
33876 void (*gov_check_cpu)(int cpu, unsigned int load);
33877
33878 /* Governor specific ops, see below */
33879- void *gov_ops;
33880+ const void *gov_ops;
33881 };
33882
33883 /* Governor specific ops, will be passed to dbs_data->gov_ops */
33884diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
33885index 9d7732b..0b1a793 100644
33886--- a/drivers/cpufreq/cpufreq_stats.c
33887+++ b/drivers/cpufreq/cpufreq_stats.c
33888@@ -340,7 +340,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
33889 }
33890
33891 /* priority=1 so this will get called before cpufreq_remove_dev */
33892-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
33893+static struct notifier_block cpufreq_stat_cpu_notifier = {
33894 .notifier_call = cpufreq_stat_cpu_callback,
33895 .priority = 1,
33896 };
33897diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
33898index 827629c9..0bc6a03 100644
33899--- a/drivers/cpufreq/p4-clockmod.c
33900+++ b/drivers/cpufreq/p4-clockmod.c
33901@@ -167,10 +167,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
33902 case 0x0F: /* Core Duo */
33903 case 0x16: /* Celeron Core */
33904 case 0x1C: /* Atom */
33905- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
33906+ pax_open_kernel();
33907+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
33908+ pax_close_kernel();
33909 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
33910 case 0x0D: /* Pentium M (Dothan) */
33911- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
33912+ pax_open_kernel();
33913+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
33914+ pax_close_kernel();
33915 /* fall through */
33916 case 0x09: /* Pentium M (Banias) */
33917 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
33918@@ -182,7 +186,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
33919
33920 /* on P-4s, the TSC runs with constant frequency independent whether
33921 * throttling is active or not. */
33922- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
33923+ pax_open_kernel();
33924+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
33925+ pax_close_kernel();
33926
33927 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
33928 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
33929diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
33930index 3a953d5..f5993f6 100644
33931--- a/drivers/cpufreq/speedstep-centrino.c
33932+++ b/drivers/cpufreq/speedstep-centrino.c
33933@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
33934 !cpu_has(cpu, X86_FEATURE_EST))
33935 return -ENODEV;
33936
33937- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
33938- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
33939+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
33940+ pax_open_kernel();
33941+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
33942+ pax_close_kernel();
33943+ }
33944
33945 if (policy->cpu != 0)
33946 return -ENODEV;
33947diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
33948index e1f6860..f8de20b 100644
33949--- a/drivers/cpuidle/cpuidle.c
33950+++ b/drivers/cpuidle/cpuidle.c
33951@@ -279,7 +279,7 @@ static int poll_idle(struct cpuidle_device *dev,
33952
33953 static void poll_idle_init(struct cpuidle_driver *drv)
33954 {
33955- struct cpuidle_state *state = &drv->states[0];
33956+ cpuidle_state_no_const *state = &drv->states[0];
33957
33958 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
33959 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
33960diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
33961index ea2f8e7..70ac501 100644
33962--- a/drivers/cpuidle/governor.c
33963+++ b/drivers/cpuidle/governor.c
33964@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
33965 mutex_lock(&cpuidle_lock);
33966 if (__cpuidle_find_governor(gov->name) == NULL) {
33967 ret = 0;
33968- list_add_tail(&gov->governor_list, &cpuidle_governors);
33969+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
33970 if (!cpuidle_curr_governor ||
33971 cpuidle_curr_governor->rating < gov->rating)
33972 cpuidle_switch_governor(gov);
33973@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
33974 new_gov = cpuidle_replace_governor(gov->rating);
33975 cpuidle_switch_governor(new_gov);
33976 }
33977- list_del(&gov->governor_list);
33978+ pax_list_del((struct list_head *)&gov->governor_list);
33979 mutex_unlock(&cpuidle_lock);
33980 }
33981
33982diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
33983index 428754a..8bdf9cc 100644
33984--- a/drivers/cpuidle/sysfs.c
33985+++ b/drivers/cpuidle/sysfs.c
33986@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
33987 NULL
33988 };
33989
33990-static struct attribute_group cpuidle_attr_group = {
33991+static attribute_group_no_const cpuidle_attr_group = {
33992 .attrs = cpuidle_default_attrs,
33993 .name = "cpuidle",
33994 };
33995diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
33996index 3b36797..289c16a 100644
33997--- a/drivers/devfreq/devfreq.c
33998+++ b/drivers/devfreq/devfreq.c
33999@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
34000 goto err_out;
34001 }
34002
34003- list_add(&governor->node, &devfreq_governor_list);
34004+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
34005
34006 list_for_each_entry(devfreq, &devfreq_list, node) {
34007 int ret = 0;
34008@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
34009 }
34010 }
34011
34012- list_del(&governor->node);
34013+ pax_list_del((struct list_head *)&governor->node);
34014 err_out:
34015 mutex_unlock(&devfreq_list_lock);
34016
34017diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
34018index b70709b..1d8d02a 100644
34019--- a/drivers/dma/sh/shdma.c
34020+++ b/drivers/dma/sh/shdma.c
34021@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
34022 return ret;
34023 }
34024
34025-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
34026+static struct notifier_block sh_dmae_nmi_notifier = {
34027 .notifier_call = sh_dmae_nmi_handler,
34028
34029 /* Run before NMI debug handler and KGDB */
34030diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
34031index 0ca1ca7..6e6f454 100644
34032--- a/drivers/edac/edac_mc_sysfs.c
34033+++ b/drivers/edac/edac_mc_sysfs.c
34034@@ -148,7 +148,7 @@ static const char *edac_caps[] = {
34035 struct dev_ch_attribute {
34036 struct device_attribute attr;
34037 int channel;
34038-};
34039+} __do_const;
34040
34041 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
34042 struct dev_ch_attribute dev_attr_legacy_##_name = \
34043diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
34044index 0056c4d..23b54d9 100644
34045--- a/drivers/edac/edac_pci_sysfs.c
34046+++ b/drivers/edac/edac_pci_sysfs.c
34047@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
34048 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
34049 static int edac_pci_poll_msec = 1000; /* one second workq period */
34050
34051-static atomic_t pci_parity_count = ATOMIC_INIT(0);
34052-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
34053+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
34054+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
34055
34056 static struct kobject *edac_pci_top_main_kobj;
34057 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
34058@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
34059 void *value;
34060 ssize_t(*show) (void *, char *);
34061 ssize_t(*store) (void *, const char *, size_t);
34062-};
34063+} __do_const;
34064
34065 /* Set of show/store abstract level functions for PCI Parity object */
34066 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
34067@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34068 edac_printk(KERN_CRIT, EDAC_PCI,
34069 "Signaled System Error on %s\n",
34070 pci_name(dev));
34071- atomic_inc(&pci_nonparity_count);
34072+ atomic_inc_unchecked(&pci_nonparity_count);
34073 }
34074
34075 if (status & (PCI_STATUS_PARITY)) {
34076@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34077 "Master Data Parity Error on %s\n",
34078 pci_name(dev));
34079
34080- atomic_inc(&pci_parity_count);
34081+ atomic_inc_unchecked(&pci_parity_count);
34082 }
34083
34084 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34085@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34086 "Detected Parity Error on %s\n",
34087 pci_name(dev));
34088
34089- atomic_inc(&pci_parity_count);
34090+ atomic_inc_unchecked(&pci_parity_count);
34091 }
34092 }
34093
34094@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34095 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
34096 "Signaled System Error on %s\n",
34097 pci_name(dev));
34098- atomic_inc(&pci_nonparity_count);
34099+ atomic_inc_unchecked(&pci_nonparity_count);
34100 }
34101
34102 if (status & (PCI_STATUS_PARITY)) {
34103@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34104 "Master Data Parity Error on "
34105 "%s\n", pci_name(dev));
34106
34107- atomic_inc(&pci_parity_count);
34108+ atomic_inc_unchecked(&pci_parity_count);
34109 }
34110
34111 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34112@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34113 "Detected Parity Error on %s\n",
34114 pci_name(dev));
34115
34116- atomic_inc(&pci_parity_count);
34117+ atomic_inc_unchecked(&pci_parity_count);
34118 }
34119 }
34120 }
34121@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
34122 if (!check_pci_errors)
34123 return;
34124
34125- before_count = atomic_read(&pci_parity_count);
34126+ before_count = atomic_read_unchecked(&pci_parity_count);
34127
34128 /* scan all PCI devices looking for a Parity Error on devices and
34129 * bridges.
34130@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
34131 /* Only if operator has selected panic on PCI Error */
34132 if (edac_pci_get_panic_on_pe()) {
34133 /* If the count is different 'after' from 'before' */
34134- if (before_count != atomic_read(&pci_parity_count))
34135+ if (before_count != atomic_read_unchecked(&pci_parity_count))
34136 panic("EDAC: PCI Parity Error");
34137 }
34138 }
34139diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
34140index 6796799..99e8377 100644
34141--- a/drivers/edac/mce_amd.h
34142+++ b/drivers/edac/mce_amd.h
34143@@ -78,7 +78,7 @@ extern const char * const ii_msgs[];
34144 struct amd_decoder_ops {
34145 bool (*mc0_mce)(u16, u8);
34146 bool (*mc1_mce)(u16, u8);
34147-};
34148+} __no_const;
34149
34150 void amd_report_gart_errors(bool);
34151 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
34152diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
34153index 57ea7f4..789e3c3 100644
34154--- a/drivers/firewire/core-card.c
34155+++ b/drivers/firewire/core-card.c
34156@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
34157
34158 void fw_core_remove_card(struct fw_card *card)
34159 {
34160- struct fw_card_driver dummy_driver = dummy_driver_template;
34161+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
34162
34163 card->driver->update_phy_reg(card, 4,
34164 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
34165diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
34166index f8d2287..5aaf4db 100644
34167--- a/drivers/firewire/core-cdev.c
34168+++ b/drivers/firewire/core-cdev.c
34169@@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
34170 int ret;
34171
34172 if ((request->channels == 0 && request->bandwidth == 0) ||
34173- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
34174- request->bandwidth < 0)
34175+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
34176 return -EINVAL;
34177
34178 r = kmalloc(sizeof(*r), GFP_KERNEL);
34179diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
34180index af3e8aa..eb2f227 100644
34181--- a/drivers/firewire/core-device.c
34182+++ b/drivers/firewire/core-device.c
34183@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
34184 struct config_rom_attribute {
34185 struct device_attribute attr;
34186 u32 key;
34187-};
34188+} __do_const;
34189
34190 static ssize_t show_immediate(struct device *dev,
34191 struct device_attribute *dattr, char *buf)
34192diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
34193index 28a94c7..58da63a 100644
34194--- a/drivers/firewire/core-transaction.c
34195+++ b/drivers/firewire/core-transaction.c
34196@@ -38,6 +38,7 @@
34197 #include <linux/timer.h>
34198 #include <linux/types.h>
34199 #include <linux/workqueue.h>
34200+#include <linux/sched.h>
34201
34202 #include <asm/byteorder.h>
34203
34204diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
34205index 515a42c..5ecf3ba 100644
34206--- a/drivers/firewire/core.h
34207+++ b/drivers/firewire/core.h
34208@@ -111,6 +111,7 @@ struct fw_card_driver {
34209
34210 int (*stop_iso)(struct fw_iso_context *ctx);
34211 };
34212+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
34213
34214 void fw_card_initialize(struct fw_card *card,
34215 const struct fw_card_driver *driver, struct device *device);
34216diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
34217index 94a58a0..f5eba42 100644
34218--- a/drivers/firmware/dmi-id.c
34219+++ b/drivers/firmware/dmi-id.c
34220@@ -16,7 +16,7 @@
34221 struct dmi_device_attribute{
34222 struct device_attribute dev_attr;
34223 int field;
34224-};
34225+} __do_const;
34226 #define to_dmi_dev_attr(_dev_attr) \
34227 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
34228
34229diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
34230index 982f1f5..d21e5da 100644
34231--- a/drivers/firmware/dmi_scan.c
34232+++ b/drivers/firmware/dmi_scan.c
34233@@ -491,11 +491,6 @@ void __init dmi_scan_machine(void)
34234 }
34235 }
34236 else {
34237- /*
34238- * no iounmap() for that ioremap(); it would be a no-op, but
34239- * it's so early in setup that sucker gets confused into doing
34240- * what it shouldn't if we actually call it.
34241- */
34242 p = dmi_ioremap(0xF0000, 0x10000);
34243 if (p == NULL)
34244 goto error;
34245@@ -770,7 +765,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
34246 if (buf == NULL)
34247 return -1;
34248
34249- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
34250+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
34251
34252 iounmap(buf);
34253 return 0;
34254diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
34255index bcb201c..4fd34dd 100644
34256--- a/drivers/firmware/efivars.c
34257+++ b/drivers/firmware/efivars.c
34258@@ -133,7 +133,7 @@ struct efivar_attribute {
34259 };
34260
34261 static struct efivars __efivars;
34262-static struct efivar_operations ops;
34263+static efivar_operations_no_const ops __read_only;
34264
34265 #define PSTORE_EFI_ATTRIBUTES \
34266 (EFI_VARIABLE_NON_VOLATILE | \
34267@@ -1734,7 +1734,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
34268 static int
34269 create_efivars_bin_attributes(struct efivars *efivars)
34270 {
34271- struct bin_attribute *attr;
34272+ bin_attribute_no_const *attr;
34273 int error;
34274
34275 /* new_var */
34276diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
34277index 2a90ba6..07f3733 100644
34278--- a/drivers/firmware/google/memconsole.c
34279+++ b/drivers/firmware/google/memconsole.c
34280@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
34281 if (!found_memconsole())
34282 return -ENODEV;
34283
34284- memconsole_bin_attr.size = memconsole_length;
34285+ pax_open_kernel();
34286+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
34287+ pax_close_kernel();
34288
34289 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
34290
34291diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
34292index 6f2306d..af9476a 100644
34293--- a/drivers/gpio/gpio-ich.c
34294+++ b/drivers/gpio/gpio-ich.c
34295@@ -69,7 +69,7 @@ struct ichx_desc {
34296 /* Some chipsets have quirks, let these use their own request/get */
34297 int (*request)(struct gpio_chip *chip, unsigned offset);
34298 int (*get)(struct gpio_chip *chip, unsigned offset);
34299-};
34300+} __do_const;
34301
34302 static struct {
34303 spinlock_t lock;
34304diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
34305index 9902732..64b62dd 100644
34306--- a/drivers/gpio/gpio-vr41xx.c
34307+++ b/drivers/gpio/gpio-vr41xx.c
34308@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
34309 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
34310 maskl, pendl, maskh, pendh);
34311
34312- atomic_inc(&irq_err_count);
34313+ atomic_inc_unchecked(&irq_err_count);
34314
34315 return -EINVAL;
34316 }
34317diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
34318index 7b2d378..cc947ea 100644
34319--- a/drivers/gpu/drm/drm_crtc_helper.c
34320+++ b/drivers/gpu/drm/drm_crtc_helper.c
34321@@ -319,7 +319,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
34322 struct drm_crtc *tmp;
34323 int crtc_mask = 1;
34324
34325- WARN(!crtc, "checking null crtc?\n");
34326+ BUG_ON(!crtc);
34327
34328 dev = crtc->dev;
34329
34330diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
34331index be174ca..7f38143 100644
34332--- a/drivers/gpu/drm/drm_drv.c
34333+++ b/drivers/gpu/drm/drm_drv.c
34334@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
34335 /**
34336 * Copy and IOCTL return string to user space
34337 */
34338-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
34339+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
34340 {
34341 int len;
34342
34343@@ -377,7 +377,7 @@ long drm_ioctl(struct file *filp,
34344 struct drm_file *file_priv = filp->private_data;
34345 struct drm_device *dev;
34346 struct drm_ioctl_desc *ioctl;
34347- drm_ioctl_t *func;
34348+ drm_ioctl_no_const_t func;
34349 unsigned int nr = DRM_IOCTL_NR(cmd);
34350 int retcode = -EINVAL;
34351 char stack_kdata[128];
34352@@ -390,7 +390,7 @@ long drm_ioctl(struct file *filp,
34353 return -ENODEV;
34354
34355 atomic_inc(&dev->ioctl_count);
34356- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
34357+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
34358 ++file_priv->ioctl_count;
34359
34360 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
34361diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
34362index 133b413..fd68225 100644
34363--- a/drivers/gpu/drm/drm_fops.c
34364+++ b/drivers/gpu/drm/drm_fops.c
34365@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
34366 }
34367
34368 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
34369- atomic_set(&dev->counts[i], 0);
34370+ atomic_set_unchecked(&dev->counts[i], 0);
34371
34372 dev->sigdata.lock = NULL;
34373
34374@@ -134,7 +134,7 @@ int drm_open(struct inode *inode, struct file *filp)
34375 if (drm_device_is_unplugged(dev))
34376 return -ENODEV;
34377
34378- if (!dev->open_count++)
34379+ if (local_inc_return(&dev->open_count) == 1)
34380 need_setup = 1;
34381 mutex_lock(&dev->struct_mutex);
34382 old_mapping = dev->dev_mapping;
34383@@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
34384 retcode = drm_open_helper(inode, filp, dev);
34385 if (retcode)
34386 goto err_undo;
34387- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
34388+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
34389 if (need_setup) {
34390 retcode = drm_setup(dev);
34391 if (retcode)
34392@@ -164,7 +164,7 @@ err_undo:
34393 iput(container_of(dev->dev_mapping, struct inode, i_data));
34394 dev->dev_mapping = old_mapping;
34395 mutex_unlock(&dev->struct_mutex);
34396- dev->open_count--;
34397+ local_dec(&dev->open_count);
34398 return retcode;
34399 }
34400 EXPORT_SYMBOL(drm_open);
34401@@ -438,7 +438,7 @@ int drm_release(struct inode *inode, struct file *filp)
34402
34403 mutex_lock(&drm_global_mutex);
34404
34405- DRM_DEBUG("open_count = %d\n", dev->open_count);
34406+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
34407
34408 if (dev->driver->preclose)
34409 dev->driver->preclose(dev, file_priv);
34410@@ -447,10 +447,10 @@ int drm_release(struct inode *inode, struct file *filp)
34411 * Begin inline drm_release
34412 */
34413
34414- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
34415+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
34416 task_pid_nr(current),
34417 (long)old_encode_dev(file_priv->minor->device),
34418- dev->open_count);
34419+ local_read(&dev->open_count));
34420
34421 /* Release any auth tokens that might point to this file_priv,
34422 (do that under the drm_global_mutex) */
34423@@ -547,8 +547,8 @@ int drm_release(struct inode *inode, struct file *filp)
34424 * End inline drm_release
34425 */
34426
34427- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
34428- if (!--dev->open_count) {
34429+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
34430+ if (local_dec_and_test(&dev->open_count)) {
34431 if (atomic_read(&dev->ioctl_count)) {
34432 DRM_ERROR("Device busy: %d\n",
34433 atomic_read(&dev->ioctl_count));
34434diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
34435index f731116..629842c 100644
34436--- a/drivers/gpu/drm/drm_global.c
34437+++ b/drivers/gpu/drm/drm_global.c
34438@@ -36,7 +36,7 @@
34439 struct drm_global_item {
34440 struct mutex mutex;
34441 void *object;
34442- int refcount;
34443+ atomic_t refcount;
34444 };
34445
34446 static struct drm_global_item glob[DRM_GLOBAL_NUM];
34447@@ -49,7 +49,7 @@ void drm_global_init(void)
34448 struct drm_global_item *item = &glob[i];
34449 mutex_init(&item->mutex);
34450 item->object = NULL;
34451- item->refcount = 0;
34452+ atomic_set(&item->refcount, 0);
34453 }
34454 }
34455
34456@@ -59,7 +59,7 @@ void drm_global_release(void)
34457 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
34458 struct drm_global_item *item = &glob[i];
34459 BUG_ON(item->object != NULL);
34460- BUG_ON(item->refcount != 0);
34461+ BUG_ON(atomic_read(&item->refcount) != 0);
34462 }
34463 }
34464
34465@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
34466 void *object;
34467
34468 mutex_lock(&item->mutex);
34469- if (item->refcount == 0) {
34470+ if (atomic_read(&item->refcount) == 0) {
34471 item->object = kzalloc(ref->size, GFP_KERNEL);
34472 if (unlikely(item->object == NULL)) {
34473 ret = -ENOMEM;
34474@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
34475 goto out_err;
34476
34477 }
34478- ++item->refcount;
34479+ atomic_inc(&item->refcount);
34480 ref->object = item->object;
34481 object = item->object;
34482 mutex_unlock(&item->mutex);
34483@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
34484 struct drm_global_item *item = &glob[ref->global_type];
34485
34486 mutex_lock(&item->mutex);
34487- BUG_ON(item->refcount == 0);
34488+ BUG_ON(atomic_read(&item->refcount) == 0);
34489 BUG_ON(ref->object != item->object);
34490- if (--item->refcount == 0) {
34491+ if (atomic_dec_and_test(&item->refcount)) {
34492 ref->release(ref);
34493 item->object = NULL;
34494 }
34495diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
34496index d4b20ce..77a8d41 100644
34497--- a/drivers/gpu/drm/drm_info.c
34498+++ b/drivers/gpu/drm/drm_info.c
34499@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
34500 struct drm_local_map *map;
34501 struct drm_map_list *r_list;
34502
34503- /* Hardcoded from _DRM_FRAME_BUFFER,
34504- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
34505- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
34506- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
34507+ static const char * const types[] = {
34508+ [_DRM_FRAME_BUFFER] = "FB",
34509+ [_DRM_REGISTERS] = "REG",
34510+ [_DRM_SHM] = "SHM",
34511+ [_DRM_AGP] = "AGP",
34512+ [_DRM_SCATTER_GATHER] = "SG",
34513+ [_DRM_CONSISTENT] = "PCI",
34514+ [_DRM_GEM] = "GEM" };
34515 const char *type;
34516 int i;
34517
34518@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
34519 map = r_list->map;
34520 if (!map)
34521 continue;
34522- if (map->type < 0 || map->type > 5)
34523+ if (map->type >= ARRAY_SIZE(types))
34524 type = "??";
34525 else
34526 type = types[map->type];
34527@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
34528 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
34529 vma->vm_flags & VM_LOCKED ? 'l' : '-',
34530 vma->vm_flags & VM_IO ? 'i' : '-',
34531+#ifdef CONFIG_GRKERNSEC_HIDESYM
34532+ 0);
34533+#else
34534 vma->vm_pgoff);
34535+#endif
34536
34537 #if defined(__i386__)
34538 pgprot = pgprot_val(vma->vm_page_prot);
34539diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
34540index 2f4c434..dd12cd2 100644
34541--- a/drivers/gpu/drm/drm_ioc32.c
34542+++ b/drivers/gpu/drm/drm_ioc32.c
34543@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
34544 request = compat_alloc_user_space(nbytes);
34545 if (!access_ok(VERIFY_WRITE, request, nbytes))
34546 return -EFAULT;
34547- list = (struct drm_buf_desc *) (request + 1);
34548+ list = (struct drm_buf_desc __user *) (request + 1);
34549
34550 if (__put_user(count, &request->count)
34551 || __put_user(list, &request->list))
34552@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
34553 request = compat_alloc_user_space(nbytes);
34554 if (!access_ok(VERIFY_WRITE, request, nbytes))
34555 return -EFAULT;
34556- list = (struct drm_buf_pub *) (request + 1);
34557+ list = (struct drm_buf_pub __user *) (request + 1);
34558
34559 if (__put_user(count, &request->count)
34560 || __put_user(list, &request->list))
34561@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
34562 return 0;
34563 }
34564
34565-drm_ioctl_compat_t *drm_compat_ioctls[] = {
34566+drm_ioctl_compat_t drm_compat_ioctls[] = {
34567 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
34568 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
34569 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
34570@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
34571 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34572 {
34573 unsigned int nr = DRM_IOCTL_NR(cmd);
34574- drm_ioctl_compat_t *fn;
34575 int ret;
34576
34577 /* Assume that ioctls without an explicit compat routine will just
34578@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34579 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
34580 return drm_ioctl(filp, cmd, arg);
34581
34582- fn = drm_compat_ioctls[nr];
34583-
34584- if (fn != NULL)
34585- ret = (*fn) (filp, cmd, arg);
34586+ if (drm_compat_ioctls[nr] != NULL)
34587+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
34588 else
34589 ret = drm_ioctl(filp, cmd, arg);
34590
34591diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
34592index e77bd8b..1571b85 100644
34593--- a/drivers/gpu/drm/drm_ioctl.c
34594+++ b/drivers/gpu/drm/drm_ioctl.c
34595@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
34596 stats->data[i].value =
34597 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
34598 else
34599- stats->data[i].value = atomic_read(&dev->counts[i]);
34600+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
34601 stats->data[i].type = dev->types[i];
34602 }
34603
34604diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
34605index d752c96..fe08455 100644
34606--- a/drivers/gpu/drm/drm_lock.c
34607+++ b/drivers/gpu/drm/drm_lock.c
34608@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34609 if (drm_lock_take(&master->lock, lock->context)) {
34610 master->lock.file_priv = file_priv;
34611 master->lock.lock_time = jiffies;
34612- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
34613+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
34614 break; /* Got lock */
34615 }
34616
34617@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34618 return -EINVAL;
34619 }
34620
34621- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
34622+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
34623
34624 if (drm_lock_free(&master->lock, lock->context)) {
34625 /* FIXME: Should really bail out here. */
34626diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
34627index 200e104..59facda 100644
34628--- a/drivers/gpu/drm/drm_stub.c
34629+++ b/drivers/gpu/drm/drm_stub.c
34630@@ -516,7 +516,7 @@ void drm_unplug_dev(struct drm_device *dev)
34631
34632 drm_device_set_unplugged(dev);
34633
34634- if (dev->open_count == 0) {
34635+ if (local_read(&dev->open_count) == 0) {
34636 drm_put_dev(dev);
34637 }
34638 mutex_unlock(&drm_global_mutex);
34639diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
34640index 004ecdf..db1f6e0 100644
34641--- a/drivers/gpu/drm/i810/i810_dma.c
34642+++ b/drivers/gpu/drm/i810/i810_dma.c
34643@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
34644 dma->buflist[vertex->idx],
34645 vertex->discard, vertex->used);
34646
34647- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34648- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34649+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34650+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34651 sarea_priv->last_enqueue = dev_priv->counter - 1;
34652 sarea_priv->last_dispatch = (int)hw_status[5];
34653
34654@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
34655 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
34656 mc->last_render);
34657
34658- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34659- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34660+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34661+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34662 sarea_priv->last_enqueue = dev_priv->counter - 1;
34663 sarea_priv->last_dispatch = (int)hw_status[5];
34664
34665diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
34666index 6e0acad..93c8289 100644
34667--- a/drivers/gpu/drm/i810/i810_drv.h
34668+++ b/drivers/gpu/drm/i810/i810_drv.h
34669@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
34670 int page_flipping;
34671
34672 wait_queue_head_t irq_queue;
34673- atomic_t irq_received;
34674- atomic_t irq_emitted;
34675+ atomic_unchecked_t irq_received;
34676+ atomic_unchecked_t irq_emitted;
34677
34678 int front_offset;
34679 } drm_i810_private_t;
34680diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
34681index 8a7c48b..72effc2 100644
34682--- a/drivers/gpu/drm/i915/i915_debugfs.c
34683+++ b/drivers/gpu/drm/i915/i915_debugfs.c
34684@@ -496,7 +496,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
34685 I915_READ(GTIMR));
34686 }
34687 seq_printf(m, "Interrupts received: %d\n",
34688- atomic_read(&dev_priv->irq_received));
34689+ atomic_read_unchecked(&dev_priv->irq_received));
34690 for_each_ring(ring, dev_priv, i) {
34691 if (IS_GEN6(dev) || IS_GEN7(dev)) {
34692 seq_printf(m,
34693diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
34694index 99daa89..84ebd44 100644
34695--- a/drivers/gpu/drm/i915/i915_dma.c
34696+++ b/drivers/gpu/drm/i915/i915_dma.c
34697@@ -1253,7 +1253,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
34698 bool can_switch;
34699
34700 spin_lock(&dev->count_lock);
34701- can_switch = (dev->open_count == 0);
34702+ can_switch = (local_read(&dev->open_count) == 0);
34703 spin_unlock(&dev->count_lock);
34704 return can_switch;
34705 }
34706diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
34707index 7339a4b..445aaba 100644
34708--- a/drivers/gpu/drm/i915/i915_drv.h
34709+++ b/drivers/gpu/drm/i915/i915_drv.h
34710@@ -656,7 +656,7 @@ typedef struct drm_i915_private {
34711 drm_dma_handle_t *status_page_dmah;
34712 struct resource mch_res;
34713
34714- atomic_t irq_received;
34715+ atomic_unchecked_t irq_received;
34716
34717 /* protects the irq masks */
34718 spinlock_t irq_lock;
34719@@ -1102,7 +1102,7 @@ struct drm_i915_gem_object {
34720 * will be page flipped away on the next vblank. When it
34721 * reaches 0, dev_priv->pending_flip_queue will be woken up.
34722 */
34723- atomic_t pending_flip;
34724+ atomic_unchecked_t pending_flip;
34725 };
34726 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
34727
34728@@ -1633,7 +1633,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
34729 struct drm_i915_private *dev_priv, unsigned port);
34730 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
34731 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
34732-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
34733+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
34734 {
34735 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
34736 }
34737diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
34738index 26d08bb..fccb984 100644
34739--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
34740+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
34741@@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
34742 i915_gem_clflush_object(obj);
34743
34744 if (obj->base.pending_write_domain)
34745- flips |= atomic_read(&obj->pending_flip);
34746+ flips |= atomic_read_unchecked(&obj->pending_flip);
34747
34748 flush_domains |= obj->base.write_domain;
34749 }
34750@@ -703,9 +703,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
34751
34752 static int
34753 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
34754- int count)
34755+ unsigned int count)
34756 {
34757- int i;
34758+ unsigned int i;
34759
34760 for (i = 0; i < count; i++) {
34761 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
34762diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
34763index 3c59584..500f2e9 100644
34764--- a/drivers/gpu/drm/i915/i915_ioc32.c
34765+++ b/drivers/gpu/drm/i915/i915_ioc32.c
34766@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
34767 (unsigned long)request);
34768 }
34769
34770-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
34771+static drm_ioctl_compat_t i915_compat_ioctls[] = {
34772 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
34773 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
34774 [DRM_I915_GETPARAM] = compat_i915_getparam,
34775@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
34776 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34777 {
34778 unsigned int nr = DRM_IOCTL_NR(cmd);
34779- drm_ioctl_compat_t *fn = NULL;
34780 int ret;
34781
34782 if (nr < DRM_COMMAND_BASE)
34783 return drm_compat_ioctl(filp, cmd, arg);
34784
34785- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
34786- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
34787-
34788- if (fn != NULL)
34789+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
34790+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
34791 ret = (*fn) (filp, cmd, arg);
34792- else
34793+ } else
34794 ret = drm_ioctl(filp, cmd, arg);
34795
34796 return ret;
34797diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
34798index fe84338..a863190 100644
34799--- a/drivers/gpu/drm/i915/i915_irq.c
34800+++ b/drivers/gpu/drm/i915/i915_irq.c
34801@@ -535,7 +535,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
34802 u32 pipe_stats[I915_MAX_PIPES];
34803 bool blc_event;
34804
34805- atomic_inc(&dev_priv->irq_received);
34806+ atomic_inc_unchecked(&dev_priv->irq_received);
34807
34808 while (true) {
34809 iir = I915_READ(VLV_IIR);
34810@@ -688,7 +688,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
34811 irqreturn_t ret = IRQ_NONE;
34812 int i;
34813
34814- atomic_inc(&dev_priv->irq_received);
34815+ atomic_inc_unchecked(&dev_priv->irq_received);
34816
34817 /* disable master interrupt before clearing iir */
34818 de_ier = I915_READ(DEIER);
34819@@ -760,7 +760,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
34820 int ret = IRQ_NONE;
34821 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
34822
34823- atomic_inc(&dev_priv->irq_received);
34824+ atomic_inc_unchecked(&dev_priv->irq_received);
34825
34826 /* disable master interrupt before clearing iir */
34827 de_ier = I915_READ(DEIER);
34828@@ -1787,7 +1787,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
34829 {
34830 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34831
34832- atomic_set(&dev_priv->irq_received, 0);
34833+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34834
34835 I915_WRITE(HWSTAM, 0xeffe);
34836
34837@@ -1813,7 +1813,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
34838 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34839 int pipe;
34840
34841- atomic_set(&dev_priv->irq_received, 0);
34842+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34843
34844 /* VLV magic */
34845 I915_WRITE(VLV_IMR, 0);
34846@@ -2108,7 +2108,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
34847 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34848 int pipe;
34849
34850- atomic_set(&dev_priv->irq_received, 0);
34851+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34852
34853 for_each_pipe(pipe)
34854 I915_WRITE(PIPESTAT(pipe), 0);
34855@@ -2159,7 +2159,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
34856 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
34857 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
34858
34859- atomic_inc(&dev_priv->irq_received);
34860+ atomic_inc_unchecked(&dev_priv->irq_received);
34861
34862 iir = I915_READ16(IIR);
34863 if (iir == 0)
34864@@ -2244,7 +2244,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
34865 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34866 int pipe;
34867
34868- atomic_set(&dev_priv->irq_received, 0);
34869+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34870
34871 if (I915_HAS_HOTPLUG(dev)) {
34872 I915_WRITE(PORT_HOTPLUG_EN, 0);
34873@@ -2339,7 +2339,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
34874 };
34875 int pipe, ret = IRQ_NONE;
34876
34877- atomic_inc(&dev_priv->irq_received);
34878+ atomic_inc_unchecked(&dev_priv->irq_received);
34879
34880 iir = I915_READ(IIR);
34881 do {
34882@@ -2465,7 +2465,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
34883 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34884 int pipe;
34885
34886- atomic_set(&dev_priv->irq_received, 0);
34887+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34888
34889 I915_WRITE(PORT_HOTPLUG_EN, 0);
34890 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
34891@@ -2572,7 +2572,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
34892 int irq_received;
34893 int ret = IRQ_NONE, pipe;
34894
34895- atomic_inc(&dev_priv->irq_received);
34896+ atomic_inc_unchecked(&dev_priv->irq_received);
34897
34898 iir = I915_READ(IIR);
34899
34900diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
34901index 80aa1fc..85cfce3 100644
34902--- a/drivers/gpu/drm/i915/intel_display.c
34903+++ b/drivers/gpu/drm/i915/intel_display.c
34904@@ -2255,7 +2255,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
34905
34906 wait_event(dev_priv->pending_flip_queue,
34907 atomic_read(&dev_priv->mm.wedged) ||
34908- atomic_read(&obj->pending_flip) == 0);
34909+ atomic_read_unchecked(&obj->pending_flip) == 0);
34910
34911 /* Big Hammer, we also need to ensure that any pending
34912 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
34913@@ -7122,8 +7122,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
34914
34915 obj = work->old_fb_obj;
34916
34917- atomic_clear_mask(1 << intel_crtc->plane,
34918- &obj->pending_flip.counter);
34919+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
34920 wake_up(&dev_priv->pending_flip_queue);
34921
34922 queue_work(dev_priv->wq, &work->work);
34923@@ -7490,7 +7489,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
34924 /* Block clients from rendering to the new back buffer until
34925 * the flip occurs and the object is no longer visible.
34926 */
34927- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
34928+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
34929 atomic_inc(&intel_crtc->unpin_work_count);
34930
34931 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
34932@@ -7507,7 +7506,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
34933
34934 cleanup_pending:
34935 atomic_dec(&intel_crtc->unpin_work_count);
34936- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
34937+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
34938 drm_gem_object_unreference(&work->old_fb_obj->base);
34939 drm_gem_object_unreference(&obj->base);
34940 mutex_unlock(&dev->struct_mutex);
34941@@ -8849,13 +8848,13 @@ struct intel_quirk {
34942 int subsystem_vendor;
34943 int subsystem_device;
34944 void (*hook)(struct drm_device *dev);
34945-};
34946+} __do_const;
34947
34948 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
34949 struct intel_dmi_quirk {
34950 void (*hook)(struct drm_device *dev);
34951 const struct dmi_system_id (*dmi_id_list)[];
34952-};
34953+} __do_const;
34954
34955 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
34956 {
34957@@ -8863,18 +8862,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
34958 return 1;
34959 }
34960
34961+static const struct dmi_system_id intel_dmi_quirks_table[] = {
34962+ {
34963+ .callback = intel_dmi_reverse_brightness,
34964+ .ident = "NCR Corporation",
34965+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
34966+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
34967+ },
34968+ },
34969+ { } /* terminating entry */
34970+};
34971+
34972 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
34973 {
34974- .dmi_id_list = &(const struct dmi_system_id[]) {
34975- {
34976- .callback = intel_dmi_reverse_brightness,
34977- .ident = "NCR Corporation",
34978- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
34979- DMI_MATCH(DMI_PRODUCT_NAME, ""),
34980- },
34981- },
34982- { } /* terminating entry */
34983- },
34984+ .dmi_id_list = &intel_dmi_quirks_table,
34985 .hook = quirk_invert_brightness,
34986 },
34987 };
34988diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
34989index 54558a0..2d97005 100644
34990--- a/drivers/gpu/drm/mga/mga_drv.h
34991+++ b/drivers/gpu/drm/mga/mga_drv.h
34992@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
34993 u32 clear_cmd;
34994 u32 maccess;
34995
34996- atomic_t vbl_received; /**< Number of vblanks received. */
34997+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
34998 wait_queue_head_t fence_queue;
34999- atomic_t last_fence_retired;
35000+ atomic_unchecked_t last_fence_retired;
35001 u32 next_fence_to_post;
35002
35003 unsigned int fb_cpp;
35004diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
35005index 709e90d..89a1c0d 100644
35006--- a/drivers/gpu/drm/mga/mga_ioc32.c
35007+++ b/drivers/gpu/drm/mga/mga_ioc32.c
35008@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
35009 return 0;
35010 }
35011
35012-drm_ioctl_compat_t *mga_compat_ioctls[] = {
35013+drm_ioctl_compat_t mga_compat_ioctls[] = {
35014 [DRM_MGA_INIT] = compat_mga_init,
35015 [DRM_MGA_GETPARAM] = compat_mga_getparam,
35016 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
35017@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
35018 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35019 {
35020 unsigned int nr = DRM_IOCTL_NR(cmd);
35021- drm_ioctl_compat_t *fn = NULL;
35022 int ret;
35023
35024 if (nr < DRM_COMMAND_BASE)
35025 return drm_compat_ioctl(filp, cmd, arg);
35026
35027- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
35028- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35029-
35030- if (fn != NULL)
35031+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
35032+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35033 ret = (*fn) (filp, cmd, arg);
35034- else
35035+ } else
35036 ret = drm_ioctl(filp, cmd, arg);
35037
35038 return ret;
35039diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35040index 598c281..60d590e 100644
35041--- a/drivers/gpu/drm/mga/mga_irq.c
35042+++ b/drivers/gpu/drm/mga/mga_irq.c
35043@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35044 if (crtc != 0)
35045 return 0;
35046
35047- return atomic_read(&dev_priv->vbl_received);
35048+ return atomic_read_unchecked(&dev_priv->vbl_received);
35049 }
35050
35051
35052@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35053 /* VBLANK interrupt */
35054 if (status & MGA_VLINEPEN) {
35055 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35056- atomic_inc(&dev_priv->vbl_received);
35057+ atomic_inc_unchecked(&dev_priv->vbl_received);
35058 drm_handle_vblank(dev, 0);
35059 handled = 1;
35060 }
35061@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35062 if ((prim_start & ~0x03) != (prim_end & ~0x03))
35063 MGA_WRITE(MGA_PRIMEND, prim_end);
35064
35065- atomic_inc(&dev_priv->last_fence_retired);
35066+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
35067 DRM_WAKEUP(&dev_priv->fence_queue);
35068 handled = 1;
35069 }
35070@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
35071 * using fences.
35072 */
35073 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35074- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35075+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35076 - *sequence) <= (1 << 23)));
35077
35078 *sequence = cur_fence;
35079diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
35080index 865eddf..62c4cc3 100644
35081--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
35082+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
35083@@ -1015,7 +1015,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
35084 struct bit_table {
35085 const char id;
35086 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
35087-};
35088+} __no_const;
35089
35090 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
35091
35092diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
35093index aa89eb9..d45d38b 100644
35094--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
35095+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
35096@@ -80,7 +80,7 @@ struct nouveau_drm {
35097 struct drm_global_reference mem_global_ref;
35098 struct ttm_bo_global_ref bo_global_ref;
35099 struct ttm_bo_device bdev;
35100- atomic_t validate_sequence;
35101+ atomic_unchecked_t validate_sequence;
35102 int (*move)(struct nouveau_channel *,
35103 struct ttm_buffer_object *,
35104 struct ttm_mem_reg *, struct ttm_mem_reg *);
35105diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
35106index cdb83ac..27f0a16 100644
35107--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
35108+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
35109@@ -43,7 +43,7 @@ struct nouveau_fence_priv {
35110 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
35111 struct nouveau_channel *);
35112 u32 (*read)(struct nouveau_channel *);
35113-};
35114+} __no_const;
35115
35116 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
35117
35118diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
35119index 8bf695c..9fbc90a 100644
35120--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
35121+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
35122@@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
35123 int trycnt = 0;
35124 int ret, i;
35125
35126- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35127+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35128 retry:
35129 if (++trycnt > 100000) {
35130 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
35131diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35132index 08214bc..9208577 100644
35133--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35134+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35135@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
35136 unsigned long arg)
35137 {
35138 unsigned int nr = DRM_IOCTL_NR(cmd);
35139- drm_ioctl_compat_t *fn = NULL;
35140+ drm_ioctl_compat_t fn = NULL;
35141 int ret;
35142
35143 if (nr < DRM_COMMAND_BASE)
35144diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
35145index 25d3495..d81aaf6 100644
35146--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
35147+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
35148@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
35149 bool can_switch;
35150
35151 spin_lock(&dev->count_lock);
35152- can_switch = (dev->open_count == 0);
35153+ can_switch = (local_read(&dev->open_count) == 0);
35154 spin_unlock(&dev->count_lock);
35155 return can_switch;
35156 }
35157diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
35158index d4660cf..70dbe65 100644
35159--- a/drivers/gpu/drm/r128/r128_cce.c
35160+++ b/drivers/gpu/drm/r128/r128_cce.c
35161@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
35162
35163 /* GH: Simple idle check.
35164 */
35165- atomic_set(&dev_priv->idle_count, 0);
35166+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35167
35168 /* We don't support anything other than bus-mastering ring mode,
35169 * but the ring can be in either AGP or PCI space for the ring
35170diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
35171index 930c71b..499aded 100644
35172--- a/drivers/gpu/drm/r128/r128_drv.h
35173+++ b/drivers/gpu/drm/r128/r128_drv.h
35174@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
35175 int is_pci;
35176 unsigned long cce_buffers_offset;
35177
35178- atomic_t idle_count;
35179+ atomic_unchecked_t idle_count;
35180
35181 int page_flipping;
35182 int current_page;
35183 u32 crtc_offset;
35184 u32 crtc_offset_cntl;
35185
35186- atomic_t vbl_received;
35187+ atomic_unchecked_t vbl_received;
35188
35189 u32 color_fmt;
35190 unsigned int front_offset;
35191diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
35192index a954c54..9cc595c 100644
35193--- a/drivers/gpu/drm/r128/r128_ioc32.c
35194+++ b/drivers/gpu/drm/r128/r128_ioc32.c
35195@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
35196 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
35197 }
35198
35199-drm_ioctl_compat_t *r128_compat_ioctls[] = {
35200+drm_ioctl_compat_t r128_compat_ioctls[] = {
35201 [DRM_R128_INIT] = compat_r128_init,
35202 [DRM_R128_DEPTH] = compat_r128_depth,
35203 [DRM_R128_STIPPLE] = compat_r128_stipple,
35204@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
35205 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35206 {
35207 unsigned int nr = DRM_IOCTL_NR(cmd);
35208- drm_ioctl_compat_t *fn = NULL;
35209 int ret;
35210
35211 if (nr < DRM_COMMAND_BASE)
35212 return drm_compat_ioctl(filp, cmd, arg);
35213
35214- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
35215- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35216-
35217- if (fn != NULL)
35218+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
35219+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35220 ret = (*fn) (filp, cmd, arg);
35221- else
35222+ } else
35223 ret = drm_ioctl(filp, cmd, arg);
35224
35225 return ret;
35226diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
35227index 2ea4f09..d391371 100644
35228--- a/drivers/gpu/drm/r128/r128_irq.c
35229+++ b/drivers/gpu/drm/r128/r128_irq.c
35230@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
35231 if (crtc != 0)
35232 return 0;
35233
35234- return atomic_read(&dev_priv->vbl_received);
35235+ return atomic_read_unchecked(&dev_priv->vbl_received);
35236 }
35237
35238 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35239@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35240 /* VBLANK interrupt */
35241 if (status & R128_CRTC_VBLANK_INT) {
35242 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
35243- atomic_inc(&dev_priv->vbl_received);
35244+ atomic_inc_unchecked(&dev_priv->vbl_received);
35245 drm_handle_vblank(dev, 0);
35246 return IRQ_HANDLED;
35247 }
35248diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
35249index 19bb7e6..de7e2a2 100644
35250--- a/drivers/gpu/drm/r128/r128_state.c
35251+++ b/drivers/gpu/drm/r128/r128_state.c
35252@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
35253
35254 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
35255 {
35256- if (atomic_read(&dev_priv->idle_count) == 0)
35257+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
35258 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
35259 else
35260- atomic_set(&dev_priv->idle_count, 0);
35261+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35262 }
35263
35264 #endif
35265diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
35266index 5a82b6b..9e69c73 100644
35267--- a/drivers/gpu/drm/radeon/mkregtable.c
35268+++ b/drivers/gpu/drm/radeon/mkregtable.c
35269@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
35270 regex_t mask_rex;
35271 regmatch_t match[4];
35272 char buf[1024];
35273- size_t end;
35274+ long end;
35275 int len;
35276 int done = 0;
35277 int r;
35278 unsigned o;
35279 struct offset *offset;
35280 char last_reg_s[10];
35281- int last_reg;
35282+ unsigned long last_reg;
35283
35284 if (regcomp
35285 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
35286diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
35287index 0d6562b..a154330 100644
35288--- a/drivers/gpu/drm/radeon/radeon_device.c
35289+++ b/drivers/gpu/drm/radeon/radeon_device.c
35290@@ -969,7 +969,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
35291 bool can_switch;
35292
35293 spin_lock(&dev->count_lock);
35294- can_switch = (dev->open_count == 0);
35295+ can_switch = (local_read(&dev->open_count) == 0);
35296 spin_unlock(&dev->count_lock);
35297 return can_switch;
35298 }
35299diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
35300index e7fdf16..f4f6490 100644
35301--- a/drivers/gpu/drm/radeon/radeon_drv.h
35302+++ b/drivers/gpu/drm/radeon/radeon_drv.h
35303@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
35304
35305 /* SW interrupt */
35306 wait_queue_head_t swi_queue;
35307- atomic_t swi_emitted;
35308+ atomic_unchecked_t swi_emitted;
35309 int vblank_crtc;
35310 uint32_t irq_enable_reg;
35311 uint32_t r500_disp_irq_reg;
35312diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
35313index c180df8..5fd8186 100644
35314--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
35315+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
35316@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35317 request = compat_alloc_user_space(sizeof(*request));
35318 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
35319 || __put_user(req32.param, &request->param)
35320- || __put_user((void __user *)(unsigned long)req32.value,
35321+ || __put_user((unsigned long)req32.value,
35322 &request->value))
35323 return -EFAULT;
35324
35325@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35326 #define compat_radeon_cp_setparam NULL
35327 #endif /* X86_64 || IA64 */
35328
35329-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
35330+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
35331 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
35332 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
35333 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
35334@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
35335 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35336 {
35337 unsigned int nr = DRM_IOCTL_NR(cmd);
35338- drm_ioctl_compat_t *fn = NULL;
35339 int ret;
35340
35341 if (nr < DRM_COMMAND_BASE)
35342 return drm_compat_ioctl(filp, cmd, arg);
35343
35344- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
35345- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
35346-
35347- if (fn != NULL)
35348+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
35349+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
35350 ret = (*fn) (filp, cmd, arg);
35351- else
35352+ } else
35353 ret = drm_ioctl(filp, cmd, arg);
35354
35355 return ret;
35356diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
35357index e771033..a0bc6b3 100644
35358--- a/drivers/gpu/drm/radeon/radeon_irq.c
35359+++ b/drivers/gpu/drm/radeon/radeon_irq.c
35360@@ -224,8 +224,8 @@ static int radeon_emit_irq(struct drm_device * dev)
35361 unsigned int ret;
35362 RING_LOCALS;
35363
35364- atomic_inc(&dev_priv->swi_emitted);
35365- ret = atomic_read(&dev_priv->swi_emitted);
35366+ atomic_inc_unchecked(&dev_priv->swi_emitted);
35367+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
35368
35369 BEGIN_RING(4);
35370 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
35371@@ -351,7 +351,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
35372 drm_radeon_private_t *dev_priv =
35373 (drm_radeon_private_t *) dev->dev_private;
35374
35375- atomic_set(&dev_priv->swi_emitted, 0);
35376+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
35377 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
35378
35379 dev->max_vblank_count = 0x001fffff;
35380diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
35381index 8e9057b..af6dacb 100644
35382--- a/drivers/gpu/drm/radeon/radeon_state.c
35383+++ b/drivers/gpu/drm/radeon/radeon_state.c
35384@@ -2166,7 +2166,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
35385 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
35386 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
35387
35388- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
35389+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
35390 sarea_priv->nbox * sizeof(depth_boxes[0])))
35391 return -EFAULT;
35392
35393@@ -3029,7 +3029,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
35394 {
35395 drm_radeon_private_t *dev_priv = dev->dev_private;
35396 drm_radeon_getparam_t *param = data;
35397- int value;
35398+ int value = 0;
35399
35400 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
35401
35402diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
35403index 93f760e..8088227 100644
35404--- a/drivers/gpu/drm/radeon/radeon_ttm.c
35405+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
35406@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
35407 man->size = size >> PAGE_SHIFT;
35408 }
35409
35410-static struct vm_operations_struct radeon_ttm_vm_ops;
35411+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
35412 static const struct vm_operations_struct *ttm_vm_ops = NULL;
35413
35414 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35415@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
35416 }
35417 if (unlikely(ttm_vm_ops == NULL)) {
35418 ttm_vm_ops = vma->vm_ops;
35419+ pax_open_kernel();
35420 radeon_ttm_vm_ops = *ttm_vm_ops;
35421 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
35422+ pax_close_kernel();
35423 }
35424 vma->vm_ops = &radeon_ttm_vm_ops;
35425 return 0;
35426@@ -862,28 +864,33 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
35427 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
35428 else
35429 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
35430- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35431- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
35432- radeon_mem_types_list[i].driver_features = 0;
35433+ pax_open_kernel();
35434+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35435+ *(void **)&radeon_mem_types_list[i].show = &radeon_mm_dump_table;
35436+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
35437 if (i == 0)
35438- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
35439+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
35440 else
35441- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
35442-
35443+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
35444+ pax_close_kernel();
35445 }
35446 /* Add ttm page pool to debugfs */
35447 sprintf(radeon_mem_types_names[i], "ttm_page_pool");
35448- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35449- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
35450- radeon_mem_types_list[i].driver_features = 0;
35451- radeon_mem_types_list[i++].data = NULL;
35452+ pax_open_kernel();
35453+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35454+ *(void **)&radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
35455+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
35456+ *(void **)&radeon_mem_types_list[i++].data = NULL;
35457+ pax_close_kernel();
35458 #ifdef CONFIG_SWIOTLB
35459 if (swiotlb_nr_tbl()) {
35460 sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
35461- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35462- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
35463- radeon_mem_types_list[i].driver_features = 0;
35464- radeon_mem_types_list[i++].data = NULL;
35465+ pax_open_kernel();
35466+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35467+ *(void **)&radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
35468+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
35469+ *(void **)&radeon_mem_types_list[i++].data = NULL;
35470+ pax_close_kernel();
35471 }
35472 #endif
35473 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
35474diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
35475index 5706d2a..17aedaa 100644
35476--- a/drivers/gpu/drm/radeon/rs690.c
35477+++ b/drivers/gpu/drm/radeon/rs690.c
35478@@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
35479 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
35480 rdev->pm.sideport_bandwidth.full)
35481 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
35482- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
35483+ read_delay_latency.full = dfixed_const(800 * 1000);
35484 read_delay_latency.full = dfixed_div(read_delay_latency,
35485 rdev->pm.igp_sideport_mclk);
35486+ a.full = dfixed_const(370);
35487+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
35488 } else {
35489 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
35490 rdev->pm.k8_bandwidth.full)
35491diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
35492index bd2a3b4..122d9ad 100644
35493--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
35494+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
35495@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
35496 static int ttm_pool_mm_shrink(struct shrinker *shrink,
35497 struct shrink_control *sc)
35498 {
35499- static atomic_t start_pool = ATOMIC_INIT(0);
35500+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
35501 unsigned i;
35502- unsigned pool_offset = atomic_add_return(1, &start_pool);
35503+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
35504 struct ttm_page_pool *pool;
35505 int shrink_pages = sc->nr_to_scan;
35506
35507diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
35508index 1eb060c..188b1fc 100644
35509--- a/drivers/gpu/drm/udl/udl_fb.c
35510+++ b/drivers/gpu/drm/udl/udl_fb.c
35511@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
35512 fb_deferred_io_cleanup(info);
35513 kfree(info->fbdefio);
35514 info->fbdefio = NULL;
35515- info->fbops->fb_mmap = udl_fb_mmap;
35516 }
35517
35518 pr_warn("released /dev/fb%d user=%d count=%d\n",
35519diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
35520index 893a650..6190d3b 100644
35521--- a/drivers/gpu/drm/via/via_drv.h
35522+++ b/drivers/gpu/drm/via/via_drv.h
35523@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
35524 typedef uint32_t maskarray_t[5];
35525
35526 typedef struct drm_via_irq {
35527- atomic_t irq_received;
35528+ atomic_unchecked_t irq_received;
35529 uint32_t pending_mask;
35530 uint32_t enable_mask;
35531 wait_queue_head_t irq_queue;
35532@@ -75,7 +75,7 @@ typedef struct drm_via_private {
35533 struct timeval last_vblank;
35534 int last_vblank_valid;
35535 unsigned usec_per_vblank;
35536- atomic_t vbl_received;
35537+ atomic_unchecked_t vbl_received;
35538 drm_via_state_t hc_state;
35539 char pci_buf[VIA_PCI_BUF_SIZE];
35540 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
35541diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
35542index ac98964..5dbf512 100644
35543--- a/drivers/gpu/drm/via/via_irq.c
35544+++ b/drivers/gpu/drm/via/via_irq.c
35545@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
35546 if (crtc != 0)
35547 return 0;
35548
35549- return atomic_read(&dev_priv->vbl_received);
35550+ return atomic_read_unchecked(&dev_priv->vbl_received);
35551 }
35552
35553 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35554@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35555
35556 status = VIA_READ(VIA_REG_INTERRUPT);
35557 if (status & VIA_IRQ_VBLANK_PENDING) {
35558- atomic_inc(&dev_priv->vbl_received);
35559- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
35560+ atomic_inc_unchecked(&dev_priv->vbl_received);
35561+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
35562 do_gettimeofday(&cur_vblank);
35563 if (dev_priv->last_vblank_valid) {
35564 dev_priv->usec_per_vblank =
35565@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35566 dev_priv->last_vblank = cur_vblank;
35567 dev_priv->last_vblank_valid = 1;
35568 }
35569- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
35570+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
35571 DRM_DEBUG("US per vblank is: %u\n",
35572 dev_priv->usec_per_vblank);
35573 }
35574@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35575
35576 for (i = 0; i < dev_priv->num_irqs; ++i) {
35577 if (status & cur_irq->pending_mask) {
35578- atomic_inc(&cur_irq->irq_received);
35579+ atomic_inc_unchecked(&cur_irq->irq_received);
35580 DRM_WAKEUP(&cur_irq->irq_queue);
35581 handled = 1;
35582 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
35583@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
35584 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35585 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
35586 masks[irq][4]));
35587- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
35588+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
35589 } else {
35590 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35591 (((cur_irq_sequence =
35592- atomic_read(&cur_irq->irq_received)) -
35593+ atomic_read_unchecked(&cur_irq->irq_received)) -
35594 *sequence) <= (1 << 23)));
35595 }
35596 *sequence = cur_irq_sequence;
35597@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
35598 }
35599
35600 for (i = 0; i < dev_priv->num_irqs; ++i) {
35601- atomic_set(&cur_irq->irq_received, 0);
35602+ atomic_set_unchecked(&cur_irq->irq_received, 0);
35603 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
35604 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
35605 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
35606@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
35607 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
35608 case VIA_IRQ_RELATIVE:
35609 irqwait->request.sequence +=
35610- atomic_read(&cur_irq->irq_received);
35611+ atomic_read_unchecked(&cur_irq->irq_received);
35612 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
35613 case VIA_IRQ_ABSOLUTE:
35614 break;
35615diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
35616index 13aeda7..4a952d1 100644
35617--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
35618+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
35619@@ -290,7 +290,7 @@ struct vmw_private {
35620 * Fencing and IRQs.
35621 */
35622
35623- atomic_t marker_seq;
35624+ atomic_unchecked_t marker_seq;
35625 wait_queue_head_t fence_queue;
35626 wait_queue_head_t fifo_queue;
35627 int fence_queue_waiters; /* Protected by hw_mutex */
35628diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
35629index 3eb1486..0a47ee9 100644
35630--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
35631+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
35632@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
35633 (unsigned int) min,
35634 (unsigned int) fifo->capabilities);
35635
35636- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
35637+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
35638 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
35639 vmw_marker_queue_init(&fifo->marker_queue);
35640 return vmw_fifo_send_fence(dev_priv, &dummy);
35641@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
35642 if (reserveable)
35643 iowrite32(bytes, fifo_mem +
35644 SVGA_FIFO_RESERVED);
35645- return fifo_mem + (next_cmd >> 2);
35646+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
35647 } else {
35648 need_bounce = true;
35649 }
35650@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
35651
35652 fm = vmw_fifo_reserve(dev_priv, bytes);
35653 if (unlikely(fm == NULL)) {
35654- *seqno = atomic_read(&dev_priv->marker_seq);
35655+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
35656 ret = -ENOMEM;
35657 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
35658 false, 3*HZ);
35659@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
35660 }
35661
35662 do {
35663- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
35664+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
35665 } while (*seqno == 0);
35666
35667 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
35668diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
35669index 4640adb..e1384ed 100644
35670--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
35671+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
35672@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
35673 * emitted. Then the fence is stale and signaled.
35674 */
35675
35676- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
35677+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
35678 > VMW_FENCE_WRAP);
35679
35680 return ret;
35681@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
35682
35683 if (fifo_idle)
35684 down_read(&fifo_state->rwsem);
35685- signal_seq = atomic_read(&dev_priv->marker_seq);
35686+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
35687 ret = 0;
35688
35689 for (;;) {
35690diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
35691index 8a8725c..afed796 100644
35692--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
35693+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
35694@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
35695 while (!vmw_lag_lt(queue, us)) {
35696 spin_lock(&queue->lock);
35697 if (list_empty(&queue->head))
35698- seqno = atomic_read(&dev_priv->marker_seq);
35699+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
35700 else {
35701 marker = list_first_entry(&queue->head,
35702 struct vmw_marker, head);
35703diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
35704index ceb3040..6160c5c 100644
35705--- a/drivers/hid/hid-core.c
35706+++ b/drivers/hid/hid-core.c
35707@@ -2242,7 +2242,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
35708
35709 int hid_add_device(struct hid_device *hdev)
35710 {
35711- static atomic_t id = ATOMIC_INIT(0);
35712+ static atomic_unchecked_t id = ATOMIC_INIT(0);
35713 int ret;
35714
35715 if (WARN_ON(hdev->status & HID_STAT_ADDED))
35716@@ -2276,7 +2276,7 @@ int hid_add_device(struct hid_device *hdev)
35717 /* XXX hack, any other cleaner solution after the driver core
35718 * is converted to allow more than 20 bytes as the device name? */
35719 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
35720- hdev->vendor, hdev->product, atomic_inc_return(&id));
35721+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
35722
35723 hid_debug_register(hdev, dev_name(&hdev->dev));
35724 ret = device_add(&hdev->dev);
35725diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
35726index eec3291..8ed706b 100644
35727--- a/drivers/hid/hid-wiimote-debug.c
35728+++ b/drivers/hid/hid-wiimote-debug.c
35729@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
35730 else if (size == 0)
35731 return -EIO;
35732
35733- if (copy_to_user(u, buf, size))
35734+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
35735 return -EFAULT;
35736
35737 *off += size;
35738diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
35739index 773a2f2..7ce08bc 100644
35740--- a/drivers/hv/channel.c
35741+++ b/drivers/hv/channel.c
35742@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
35743 int ret = 0;
35744 int t;
35745
35746- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
35747- atomic_inc(&vmbus_connection.next_gpadl_handle);
35748+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
35749+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
35750
35751 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
35752 if (ret)
35753diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
35754index 3648f8f..30ef30d 100644
35755--- a/drivers/hv/hv.c
35756+++ b/drivers/hv/hv.c
35757@@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
35758 u64 output_address = (output) ? virt_to_phys(output) : 0;
35759 u32 output_address_hi = output_address >> 32;
35760 u32 output_address_lo = output_address & 0xFFFFFFFF;
35761- void *hypercall_page = hv_context.hypercall_page;
35762+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
35763
35764 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
35765 "=a"(hv_status_lo) : "d" (control_hi),
35766diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
35767index d8d1fad..b91caf7 100644
35768--- a/drivers/hv/hyperv_vmbus.h
35769+++ b/drivers/hv/hyperv_vmbus.h
35770@@ -594,7 +594,7 @@ enum vmbus_connect_state {
35771 struct vmbus_connection {
35772 enum vmbus_connect_state conn_state;
35773
35774- atomic_t next_gpadl_handle;
35775+ atomic_unchecked_t next_gpadl_handle;
35776
35777 /*
35778 * Represents channel interrupts. Each bit position represents a
35779diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
35780index 8e1a9ec..4687821 100644
35781--- a/drivers/hv/vmbus_drv.c
35782+++ b/drivers/hv/vmbus_drv.c
35783@@ -629,10 +629,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
35784 {
35785 int ret = 0;
35786
35787- static atomic_t device_num = ATOMIC_INIT(0);
35788+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
35789
35790 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
35791- atomic_inc_return(&device_num));
35792+ atomic_inc_return_unchecked(&device_num));
35793
35794 child_device_obj->device.bus = &hv_bus;
35795 child_device_obj->device.parent = &hv_acpi_dev->dev;
35796diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
35797index 1672e2a..4a6297c 100644
35798--- a/drivers/hwmon/acpi_power_meter.c
35799+++ b/drivers/hwmon/acpi_power_meter.c
35800@@ -117,7 +117,7 @@ struct sensor_template {
35801 struct device_attribute *devattr,
35802 const char *buf, size_t count);
35803 int index;
35804-};
35805+} __do_const;
35806
35807 /* Averaging interval */
35808 static int update_avg_interval(struct acpi_power_meter_resource *resource)
35809@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
35810 struct sensor_template *attrs)
35811 {
35812 struct device *dev = &resource->acpi_dev->dev;
35813- struct sensor_device_attribute *sensors =
35814+ sensor_device_attribute_no_const *sensors =
35815 &resource->sensors[resource->num_sensors];
35816 int res = 0;
35817
35818diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
35819index b41baff..4953e4d 100644
35820--- a/drivers/hwmon/applesmc.c
35821+++ b/drivers/hwmon/applesmc.c
35822@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
35823 {
35824 struct applesmc_node_group *grp;
35825 struct applesmc_dev_attr *node;
35826- struct attribute *attr;
35827+ attribute_no_const *attr;
35828 int ret, i;
35829
35830 for (grp = groups; grp->format; grp++) {
35831diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
35832index 56dbcfb..9874bf1 100644
35833--- a/drivers/hwmon/asus_atk0110.c
35834+++ b/drivers/hwmon/asus_atk0110.c
35835@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
35836 struct atk_sensor_data {
35837 struct list_head list;
35838 struct atk_data *data;
35839- struct device_attribute label_attr;
35840- struct device_attribute input_attr;
35841- struct device_attribute limit1_attr;
35842- struct device_attribute limit2_attr;
35843+ device_attribute_no_const label_attr;
35844+ device_attribute_no_const input_attr;
35845+ device_attribute_no_const limit1_attr;
35846+ device_attribute_no_const limit2_attr;
35847 char label_attr_name[ATTR_NAME_SIZE];
35848 char input_attr_name[ATTR_NAME_SIZE];
35849 char limit1_attr_name[ATTR_NAME_SIZE];
35850@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
35851 static struct device_attribute atk_name_attr =
35852 __ATTR(name, 0444, atk_name_show, NULL);
35853
35854-static void atk_init_attribute(struct device_attribute *attr, char *name,
35855+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
35856 sysfs_show_func show)
35857 {
35858 sysfs_attr_init(&attr->attr);
35859diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
35860index d64923d..72591e8 100644
35861--- a/drivers/hwmon/coretemp.c
35862+++ b/drivers/hwmon/coretemp.c
35863@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
35864 return NOTIFY_OK;
35865 }
35866
35867-static struct notifier_block coretemp_cpu_notifier __refdata = {
35868+static struct notifier_block coretemp_cpu_notifier = {
35869 .notifier_call = coretemp_cpu_callback,
35870 };
35871
35872diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
35873index a14f634..2916ee2 100644
35874--- a/drivers/hwmon/ibmaem.c
35875+++ b/drivers/hwmon/ibmaem.c
35876@@ -925,7 +925,7 @@ static int aem_register_sensors(struct aem_data *data,
35877 struct aem_rw_sensor_template *rw)
35878 {
35879 struct device *dev = &data->pdev->dev;
35880- struct sensor_device_attribute *sensors = data->sensors;
35881+ sensor_device_attribute_no_const *sensors = data->sensors;
35882 int err;
35883
35884 /* Set up read-only sensors */
35885diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
35886index 7d19b1b..8fdaaac 100644
35887--- a/drivers/hwmon/pmbus/pmbus_core.c
35888+++ b/drivers/hwmon/pmbus/pmbus_core.c
35889@@ -811,7 +811,7 @@ static ssize_t pmbus_show_label(struct device *dev,
35890
35891 #define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
35892 do { \
35893- struct sensor_device_attribute *a \
35894+ sensor_device_attribute_no_const *a \
35895 = &data->_type##s[data->num_##_type##s].attribute; \
35896 BUG_ON(data->num_attributes >= data->max_attributes); \
35897 sysfs_attr_init(&a->dev_attr.attr); \
35898diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
35899index 1c85d39..55ed3cf 100644
35900--- a/drivers/hwmon/sht15.c
35901+++ b/drivers/hwmon/sht15.c
35902@@ -169,7 +169,7 @@ struct sht15_data {
35903 int supply_uV;
35904 bool supply_uV_valid;
35905 struct work_struct update_supply_work;
35906- atomic_t interrupt_handled;
35907+ atomic_unchecked_t interrupt_handled;
35908 };
35909
35910 /**
35911@@ -512,13 +512,13 @@ static int sht15_measurement(struct sht15_data *data,
35912 return ret;
35913
35914 gpio_direction_input(data->pdata->gpio_data);
35915- atomic_set(&data->interrupt_handled, 0);
35916+ atomic_set_unchecked(&data->interrupt_handled, 0);
35917
35918 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35919 if (gpio_get_value(data->pdata->gpio_data) == 0) {
35920 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
35921 /* Only relevant if the interrupt hasn't occurred. */
35922- if (!atomic_read(&data->interrupt_handled))
35923+ if (!atomic_read_unchecked(&data->interrupt_handled))
35924 schedule_work(&data->read_work);
35925 }
35926 ret = wait_event_timeout(data->wait_queue,
35927@@ -785,7 +785,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
35928
35929 /* First disable the interrupt */
35930 disable_irq_nosync(irq);
35931- atomic_inc(&data->interrupt_handled);
35932+ atomic_inc_unchecked(&data->interrupt_handled);
35933 /* Then schedule a reading work struct */
35934 if (data->state != SHT15_READING_NOTHING)
35935 schedule_work(&data->read_work);
35936@@ -807,11 +807,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
35937 * If not, then start the interrupt again - care here as could
35938 * have gone low in meantime so verify it hasn't!
35939 */
35940- atomic_set(&data->interrupt_handled, 0);
35941+ atomic_set_unchecked(&data->interrupt_handled, 0);
35942 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35943 /* If still not occurred or another handler was scheduled */
35944 if (gpio_get_value(data->pdata->gpio_data)
35945- || atomic_read(&data->interrupt_handled))
35946+ || atomic_read_unchecked(&data->interrupt_handled))
35947 return;
35948 }
35949
35950diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
35951index 76f157b..9c0db1b 100644
35952--- a/drivers/hwmon/via-cputemp.c
35953+++ b/drivers/hwmon/via-cputemp.c
35954@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
35955 return NOTIFY_OK;
35956 }
35957
35958-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
35959+static struct notifier_block via_cputemp_cpu_notifier = {
35960 .notifier_call = via_cputemp_cpu_callback,
35961 };
35962
35963diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
35964index 378fcb5..5e91fa8 100644
35965--- a/drivers/i2c/busses/i2c-amd756-s4882.c
35966+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
35967@@ -43,7 +43,7 @@
35968 extern struct i2c_adapter amd756_smbus;
35969
35970 static struct i2c_adapter *s4882_adapter;
35971-static struct i2c_algorithm *s4882_algo;
35972+static i2c_algorithm_no_const *s4882_algo;
35973
35974 /* Wrapper access functions for multiplexed SMBus */
35975 static DEFINE_MUTEX(amd756_lock);
35976diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
35977index 29015eb..af2d8e9 100644
35978--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
35979+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
35980@@ -41,7 +41,7 @@
35981 extern struct i2c_adapter *nforce2_smbus;
35982
35983 static struct i2c_adapter *s4985_adapter;
35984-static struct i2c_algorithm *s4985_algo;
35985+static i2c_algorithm_no_const *s4985_algo;
35986
35987 /* Wrapper access functions for multiplexed SMBus */
35988 static DEFINE_MUTEX(nforce2_lock);
35989diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
35990index 8126824..55a2798 100644
35991--- a/drivers/ide/ide-cd.c
35992+++ b/drivers/ide/ide-cd.c
35993@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
35994 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
35995 if ((unsigned long)buf & alignment
35996 || blk_rq_bytes(rq) & q->dma_pad_mask
35997- || object_is_on_stack(buf))
35998+ || object_starts_on_stack(buf))
35999 drive->dma = 0;
36000 }
36001 }
36002diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
36003index 8848f16..f8e6dd8 100644
36004--- a/drivers/iio/industrialio-core.c
36005+++ b/drivers/iio/industrialio-core.c
36006@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
36007 }
36008
36009 static
36010-int __iio_device_attr_init(struct device_attribute *dev_attr,
36011+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
36012 const char *postfix,
36013 struct iio_chan_spec const *chan,
36014 ssize_t (*readfunc)(struct device *dev,
36015diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
36016index 394fea2..c833880 100644
36017--- a/drivers/infiniband/core/cm.c
36018+++ b/drivers/infiniband/core/cm.c
36019@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
36020
36021 struct cm_counter_group {
36022 struct kobject obj;
36023- atomic_long_t counter[CM_ATTR_COUNT];
36024+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
36025 };
36026
36027 struct cm_counter_attribute {
36028@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
36029 struct ib_mad_send_buf *msg = NULL;
36030 int ret;
36031
36032- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36033+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36034 counter[CM_REQ_COUNTER]);
36035
36036 /* Quick state check to discard duplicate REQs. */
36037@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
36038 if (!cm_id_priv)
36039 return;
36040
36041- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36042+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36043 counter[CM_REP_COUNTER]);
36044 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
36045 if (ret)
36046@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
36047 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
36048 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
36049 spin_unlock_irq(&cm_id_priv->lock);
36050- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36051+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36052 counter[CM_RTU_COUNTER]);
36053 goto out;
36054 }
36055@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
36056 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
36057 dreq_msg->local_comm_id);
36058 if (!cm_id_priv) {
36059- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36060+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36061 counter[CM_DREQ_COUNTER]);
36062 cm_issue_drep(work->port, work->mad_recv_wc);
36063 return -EINVAL;
36064@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
36065 case IB_CM_MRA_REP_RCVD:
36066 break;
36067 case IB_CM_TIMEWAIT:
36068- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36069+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36070 counter[CM_DREQ_COUNTER]);
36071 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36072 goto unlock;
36073@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
36074 cm_free_msg(msg);
36075 goto deref;
36076 case IB_CM_DREQ_RCVD:
36077- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36078+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36079 counter[CM_DREQ_COUNTER]);
36080 goto unlock;
36081 default:
36082@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
36083 ib_modify_mad(cm_id_priv->av.port->mad_agent,
36084 cm_id_priv->msg, timeout)) {
36085 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
36086- atomic_long_inc(&work->port->
36087+ atomic_long_inc_unchecked(&work->port->
36088 counter_group[CM_RECV_DUPLICATES].
36089 counter[CM_MRA_COUNTER]);
36090 goto out;
36091@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
36092 break;
36093 case IB_CM_MRA_REQ_RCVD:
36094 case IB_CM_MRA_REP_RCVD:
36095- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36096+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36097 counter[CM_MRA_COUNTER]);
36098 /* fall through */
36099 default:
36100@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
36101 case IB_CM_LAP_IDLE:
36102 break;
36103 case IB_CM_MRA_LAP_SENT:
36104- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36105+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36106 counter[CM_LAP_COUNTER]);
36107 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36108 goto unlock;
36109@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
36110 cm_free_msg(msg);
36111 goto deref;
36112 case IB_CM_LAP_RCVD:
36113- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36114+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36115 counter[CM_LAP_COUNTER]);
36116 goto unlock;
36117 default:
36118@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36119 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36120 if (cur_cm_id_priv) {
36121 spin_unlock_irq(&cm.lock);
36122- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36123+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36124 counter[CM_SIDR_REQ_COUNTER]);
36125 goto out; /* Duplicate message. */
36126 }
36127@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36128 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36129 msg->retries = 1;
36130
36131- atomic_long_add(1 + msg->retries,
36132+ atomic_long_add_unchecked(1 + msg->retries,
36133 &port->counter_group[CM_XMIT].counter[attr_index]);
36134 if (msg->retries)
36135- atomic_long_add(msg->retries,
36136+ atomic_long_add_unchecked(msg->retries,
36137 &port->counter_group[CM_XMIT_RETRIES].
36138 counter[attr_index]);
36139
36140@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
36141 }
36142
36143 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
36144- atomic_long_inc(&port->counter_group[CM_RECV].
36145+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
36146 counter[attr_id - CM_ATTR_ID_OFFSET]);
36147
36148 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
36149@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
36150 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
36151
36152 return sprintf(buf, "%ld\n",
36153- atomic_long_read(&group->counter[cm_attr->index]));
36154+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
36155 }
36156
36157 static const struct sysfs_ops cm_counter_ops = {
36158diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
36159index 176c8f9..2627b62 100644
36160--- a/drivers/infiniband/core/fmr_pool.c
36161+++ b/drivers/infiniband/core/fmr_pool.c
36162@@ -98,8 +98,8 @@ struct ib_fmr_pool {
36163
36164 struct task_struct *thread;
36165
36166- atomic_t req_ser;
36167- atomic_t flush_ser;
36168+ atomic_unchecked_t req_ser;
36169+ atomic_unchecked_t flush_ser;
36170
36171 wait_queue_head_t force_wait;
36172 };
36173@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36174 struct ib_fmr_pool *pool = pool_ptr;
36175
36176 do {
36177- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
36178+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
36179 ib_fmr_batch_release(pool);
36180
36181- atomic_inc(&pool->flush_ser);
36182+ atomic_inc_unchecked(&pool->flush_ser);
36183 wake_up_interruptible(&pool->force_wait);
36184
36185 if (pool->flush_function)
36186@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36187 }
36188
36189 set_current_state(TASK_INTERRUPTIBLE);
36190- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
36191+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
36192 !kthread_should_stop())
36193 schedule();
36194 __set_current_state(TASK_RUNNING);
36195@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
36196 pool->dirty_watermark = params->dirty_watermark;
36197 pool->dirty_len = 0;
36198 spin_lock_init(&pool->pool_lock);
36199- atomic_set(&pool->req_ser, 0);
36200- atomic_set(&pool->flush_ser, 0);
36201+ atomic_set_unchecked(&pool->req_ser, 0);
36202+ atomic_set_unchecked(&pool->flush_ser, 0);
36203 init_waitqueue_head(&pool->force_wait);
36204
36205 pool->thread = kthread_run(ib_fmr_cleanup_thread,
36206@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
36207 }
36208 spin_unlock_irq(&pool->pool_lock);
36209
36210- serial = atomic_inc_return(&pool->req_ser);
36211+ serial = atomic_inc_return_unchecked(&pool->req_ser);
36212 wake_up_process(pool->thread);
36213
36214 if (wait_event_interruptible(pool->force_wait,
36215- atomic_read(&pool->flush_ser) - serial >= 0))
36216+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
36217 return -EINTR;
36218
36219 return 0;
36220@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
36221 } else {
36222 list_add_tail(&fmr->list, &pool->dirty_list);
36223 if (++pool->dirty_len >= pool->dirty_watermark) {
36224- atomic_inc(&pool->req_ser);
36225+ atomic_inc_unchecked(&pool->req_ser);
36226 wake_up_process(pool->thread);
36227 }
36228 }
36229diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
36230index afd8179..598063f 100644
36231--- a/drivers/infiniband/hw/cxgb4/mem.c
36232+++ b/drivers/infiniband/hw/cxgb4/mem.c
36233@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
36234 int err;
36235 struct fw_ri_tpte tpt;
36236 u32 stag_idx;
36237- static atomic_t key;
36238+ static atomic_unchecked_t key;
36239
36240 if (c4iw_fatal_error(rdev))
36241 return -EIO;
36242@@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
36243 if (rdev->stats.stag.cur > rdev->stats.stag.max)
36244 rdev->stats.stag.max = rdev->stats.stag.cur;
36245 mutex_unlock(&rdev->stats.lock);
36246- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
36247+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
36248 }
36249 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
36250 __func__, stag_state, type, pdid, stag_idx);
36251diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
36252index 79b3dbc..96e5fcc 100644
36253--- a/drivers/infiniband/hw/ipath/ipath_rc.c
36254+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
36255@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
36256 struct ib_atomic_eth *ateth;
36257 struct ipath_ack_entry *e;
36258 u64 vaddr;
36259- atomic64_t *maddr;
36260+ atomic64_unchecked_t *maddr;
36261 u64 sdata;
36262 u32 rkey;
36263 u8 next;
36264@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
36265 IB_ACCESS_REMOTE_ATOMIC)))
36266 goto nack_acc_unlck;
36267 /* Perform atomic OP and save result. */
36268- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
36269+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
36270 sdata = be64_to_cpu(ateth->swap_data);
36271 e = &qp->s_ack_queue[qp->r_head_ack_queue];
36272 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
36273- (u64) atomic64_add_return(sdata, maddr) - sdata :
36274+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
36275 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
36276 be64_to_cpu(ateth->compare_data),
36277 sdata);
36278diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
36279index 1f95bba..9530f87 100644
36280--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
36281+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
36282@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
36283 unsigned long flags;
36284 struct ib_wc wc;
36285 u64 sdata;
36286- atomic64_t *maddr;
36287+ atomic64_unchecked_t *maddr;
36288 enum ib_wc_status send_status;
36289
36290 /*
36291@@ -382,11 +382,11 @@ again:
36292 IB_ACCESS_REMOTE_ATOMIC)))
36293 goto acc_err;
36294 /* Perform atomic OP and save result. */
36295- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
36296+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
36297 sdata = wqe->wr.wr.atomic.compare_add;
36298 *(u64 *) sqp->s_sge.sge.vaddr =
36299 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
36300- (u64) atomic64_add_return(sdata, maddr) - sdata :
36301+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
36302 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
36303 sdata, wqe->wr.wr.atomic.swap);
36304 goto send_comp;
36305diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
36306index 5b152a3..c1f3e83 100644
36307--- a/drivers/infiniband/hw/nes/nes.c
36308+++ b/drivers/infiniband/hw/nes/nes.c
36309@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
36310 LIST_HEAD(nes_adapter_list);
36311 static LIST_HEAD(nes_dev_list);
36312
36313-atomic_t qps_destroyed;
36314+atomic_unchecked_t qps_destroyed;
36315
36316 static unsigned int ee_flsh_adapter;
36317 static unsigned int sysfs_nonidx_addr;
36318@@ -267,7 +267,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
36319 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
36320 struct nes_adapter *nesadapter = nesdev->nesadapter;
36321
36322- atomic_inc(&qps_destroyed);
36323+ atomic_inc_unchecked(&qps_destroyed);
36324
36325 /* Free the control structures */
36326
36327diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
36328index 33cc589..3bd6538 100644
36329--- a/drivers/infiniband/hw/nes/nes.h
36330+++ b/drivers/infiniband/hw/nes/nes.h
36331@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
36332 extern unsigned int wqm_quanta;
36333 extern struct list_head nes_adapter_list;
36334
36335-extern atomic_t cm_connects;
36336-extern atomic_t cm_accepts;
36337-extern atomic_t cm_disconnects;
36338-extern atomic_t cm_closes;
36339-extern atomic_t cm_connecteds;
36340-extern atomic_t cm_connect_reqs;
36341-extern atomic_t cm_rejects;
36342-extern atomic_t mod_qp_timouts;
36343-extern atomic_t qps_created;
36344-extern atomic_t qps_destroyed;
36345-extern atomic_t sw_qps_destroyed;
36346+extern atomic_unchecked_t cm_connects;
36347+extern atomic_unchecked_t cm_accepts;
36348+extern atomic_unchecked_t cm_disconnects;
36349+extern atomic_unchecked_t cm_closes;
36350+extern atomic_unchecked_t cm_connecteds;
36351+extern atomic_unchecked_t cm_connect_reqs;
36352+extern atomic_unchecked_t cm_rejects;
36353+extern atomic_unchecked_t mod_qp_timouts;
36354+extern atomic_unchecked_t qps_created;
36355+extern atomic_unchecked_t qps_destroyed;
36356+extern atomic_unchecked_t sw_qps_destroyed;
36357 extern u32 mh_detected;
36358 extern u32 mh_pauses_sent;
36359 extern u32 cm_packets_sent;
36360@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
36361 extern u32 cm_packets_received;
36362 extern u32 cm_packets_dropped;
36363 extern u32 cm_packets_retrans;
36364-extern atomic_t cm_listens_created;
36365-extern atomic_t cm_listens_destroyed;
36366+extern atomic_unchecked_t cm_listens_created;
36367+extern atomic_unchecked_t cm_listens_destroyed;
36368 extern u32 cm_backlog_drops;
36369-extern atomic_t cm_loopbacks;
36370-extern atomic_t cm_nodes_created;
36371-extern atomic_t cm_nodes_destroyed;
36372-extern atomic_t cm_accel_dropped_pkts;
36373-extern atomic_t cm_resets_recvd;
36374-extern atomic_t pau_qps_created;
36375-extern atomic_t pau_qps_destroyed;
36376+extern atomic_unchecked_t cm_loopbacks;
36377+extern atomic_unchecked_t cm_nodes_created;
36378+extern atomic_unchecked_t cm_nodes_destroyed;
36379+extern atomic_unchecked_t cm_accel_dropped_pkts;
36380+extern atomic_unchecked_t cm_resets_recvd;
36381+extern atomic_unchecked_t pau_qps_created;
36382+extern atomic_unchecked_t pau_qps_destroyed;
36383
36384 extern u32 int_mod_timer_init;
36385 extern u32 int_mod_cq_depth_256;
36386diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
36387index 22ea67e..dcbe3bc 100644
36388--- a/drivers/infiniband/hw/nes/nes_cm.c
36389+++ b/drivers/infiniband/hw/nes/nes_cm.c
36390@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
36391 u32 cm_packets_retrans;
36392 u32 cm_packets_created;
36393 u32 cm_packets_received;
36394-atomic_t cm_listens_created;
36395-atomic_t cm_listens_destroyed;
36396+atomic_unchecked_t cm_listens_created;
36397+atomic_unchecked_t cm_listens_destroyed;
36398 u32 cm_backlog_drops;
36399-atomic_t cm_loopbacks;
36400-atomic_t cm_nodes_created;
36401-atomic_t cm_nodes_destroyed;
36402-atomic_t cm_accel_dropped_pkts;
36403-atomic_t cm_resets_recvd;
36404+atomic_unchecked_t cm_loopbacks;
36405+atomic_unchecked_t cm_nodes_created;
36406+atomic_unchecked_t cm_nodes_destroyed;
36407+atomic_unchecked_t cm_accel_dropped_pkts;
36408+atomic_unchecked_t cm_resets_recvd;
36409
36410 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
36411 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
36412@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
36413
36414 static struct nes_cm_core *g_cm_core;
36415
36416-atomic_t cm_connects;
36417-atomic_t cm_accepts;
36418-atomic_t cm_disconnects;
36419-atomic_t cm_closes;
36420-atomic_t cm_connecteds;
36421-atomic_t cm_connect_reqs;
36422-atomic_t cm_rejects;
36423+atomic_unchecked_t cm_connects;
36424+atomic_unchecked_t cm_accepts;
36425+atomic_unchecked_t cm_disconnects;
36426+atomic_unchecked_t cm_closes;
36427+atomic_unchecked_t cm_connecteds;
36428+atomic_unchecked_t cm_connect_reqs;
36429+atomic_unchecked_t cm_rejects;
36430
36431 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
36432 {
36433@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
36434 kfree(listener);
36435 listener = NULL;
36436 ret = 0;
36437- atomic_inc(&cm_listens_destroyed);
36438+ atomic_inc_unchecked(&cm_listens_destroyed);
36439 } else {
36440 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
36441 }
36442@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
36443 cm_node->rem_mac);
36444
36445 add_hte_node(cm_core, cm_node);
36446- atomic_inc(&cm_nodes_created);
36447+ atomic_inc_unchecked(&cm_nodes_created);
36448
36449 return cm_node;
36450 }
36451@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
36452 }
36453
36454 atomic_dec(&cm_core->node_cnt);
36455- atomic_inc(&cm_nodes_destroyed);
36456+ atomic_inc_unchecked(&cm_nodes_destroyed);
36457 nesqp = cm_node->nesqp;
36458 if (nesqp) {
36459 nesqp->cm_node = NULL;
36460@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
36461
36462 static void drop_packet(struct sk_buff *skb)
36463 {
36464- atomic_inc(&cm_accel_dropped_pkts);
36465+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36466 dev_kfree_skb_any(skb);
36467 }
36468
36469@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
36470 {
36471
36472 int reset = 0; /* whether to send reset in case of err.. */
36473- atomic_inc(&cm_resets_recvd);
36474+ atomic_inc_unchecked(&cm_resets_recvd);
36475 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
36476 " refcnt=%d\n", cm_node, cm_node->state,
36477 atomic_read(&cm_node->ref_count));
36478@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
36479 rem_ref_cm_node(cm_node->cm_core, cm_node);
36480 return NULL;
36481 }
36482- atomic_inc(&cm_loopbacks);
36483+ atomic_inc_unchecked(&cm_loopbacks);
36484 loopbackremotenode->loopbackpartner = cm_node;
36485 loopbackremotenode->tcp_cntxt.rcv_wscale =
36486 NES_CM_DEFAULT_RCV_WND_SCALE;
36487@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
36488 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
36489 else {
36490 rem_ref_cm_node(cm_core, cm_node);
36491- atomic_inc(&cm_accel_dropped_pkts);
36492+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36493 dev_kfree_skb_any(skb);
36494 }
36495 break;
36496@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36497
36498 if ((cm_id) && (cm_id->event_handler)) {
36499 if (issue_disconn) {
36500- atomic_inc(&cm_disconnects);
36501+ atomic_inc_unchecked(&cm_disconnects);
36502 cm_event.event = IW_CM_EVENT_DISCONNECT;
36503 cm_event.status = disconn_status;
36504 cm_event.local_addr = cm_id->local_addr;
36505@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36506 }
36507
36508 if (issue_close) {
36509- atomic_inc(&cm_closes);
36510+ atomic_inc_unchecked(&cm_closes);
36511 nes_disconnect(nesqp, 1);
36512
36513 cm_id->provider_data = nesqp;
36514@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36515
36516 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
36517 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
36518- atomic_inc(&cm_accepts);
36519+ atomic_inc_unchecked(&cm_accepts);
36520
36521 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
36522 netdev_refcnt_read(nesvnic->netdev));
36523@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
36524 struct nes_cm_core *cm_core;
36525 u8 *start_buff;
36526
36527- atomic_inc(&cm_rejects);
36528+ atomic_inc_unchecked(&cm_rejects);
36529 cm_node = (struct nes_cm_node *)cm_id->provider_data;
36530 loopback = cm_node->loopbackpartner;
36531 cm_core = cm_node->cm_core;
36532@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36533 ntohl(cm_id->local_addr.sin_addr.s_addr),
36534 ntohs(cm_id->local_addr.sin_port));
36535
36536- atomic_inc(&cm_connects);
36537+ atomic_inc_unchecked(&cm_connects);
36538 nesqp->active_conn = 1;
36539
36540 /* cache the cm_id in the qp */
36541@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
36542 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
36543 return err;
36544 }
36545- atomic_inc(&cm_listens_created);
36546+ atomic_inc_unchecked(&cm_listens_created);
36547 }
36548
36549 cm_id->add_ref(cm_id);
36550@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
36551
36552 if (nesqp->destroyed)
36553 return;
36554- atomic_inc(&cm_connecteds);
36555+ atomic_inc_unchecked(&cm_connecteds);
36556 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
36557 " local port 0x%04X. jiffies = %lu.\n",
36558 nesqp->hwqp.qp_id,
36559@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
36560
36561 cm_id->add_ref(cm_id);
36562 ret = cm_id->event_handler(cm_id, &cm_event);
36563- atomic_inc(&cm_closes);
36564+ atomic_inc_unchecked(&cm_closes);
36565 cm_event.event = IW_CM_EVENT_CLOSE;
36566 cm_event.status = 0;
36567 cm_event.provider_data = cm_id->provider_data;
36568@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
36569 return;
36570 cm_id = cm_node->cm_id;
36571
36572- atomic_inc(&cm_connect_reqs);
36573+ atomic_inc_unchecked(&cm_connect_reqs);
36574 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36575 cm_node, cm_id, jiffies);
36576
36577@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
36578 return;
36579 cm_id = cm_node->cm_id;
36580
36581- atomic_inc(&cm_connect_reqs);
36582+ atomic_inc_unchecked(&cm_connect_reqs);
36583 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36584 cm_node, cm_id, jiffies);
36585
36586diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
36587index 4166452..fc952c3 100644
36588--- a/drivers/infiniband/hw/nes/nes_mgt.c
36589+++ b/drivers/infiniband/hw/nes/nes_mgt.c
36590@@ -40,8 +40,8 @@
36591 #include "nes.h"
36592 #include "nes_mgt.h"
36593
36594-atomic_t pau_qps_created;
36595-atomic_t pau_qps_destroyed;
36596+atomic_unchecked_t pau_qps_created;
36597+atomic_unchecked_t pau_qps_destroyed;
36598
36599 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
36600 {
36601@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
36602 {
36603 struct sk_buff *skb;
36604 unsigned long flags;
36605- atomic_inc(&pau_qps_destroyed);
36606+ atomic_inc_unchecked(&pau_qps_destroyed);
36607
36608 /* Free packets that have not yet been forwarded */
36609 /* Lock is acquired by skb_dequeue when removing the skb */
36610@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
36611 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
36612 skb_queue_head_init(&nesqp->pau_list);
36613 spin_lock_init(&nesqp->pau_lock);
36614- atomic_inc(&pau_qps_created);
36615+ atomic_inc_unchecked(&pau_qps_created);
36616 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
36617 }
36618
36619diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
36620index 9542e16..a008c40 100644
36621--- a/drivers/infiniband/hw/nes/nes_nic.c
36622+++ b/drivers/infiniband/hw/nes/nes_nic.c
36623@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
36624 target_stat_values[++index] = mh_detected;
36625 target_stat_values[++index] = mh_pauses_sent;
36626 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
36627- target_stat_values[++index] = atomic_read(&cm_connects);
36628- target_stat_values[++index] = atomic_read(&cm_accepts);
36629- target_stat_values[++index] = atomic_read(&cm_disconnects);
36630- target_stat_values[++index] = atomic_read(&cm_connecteds);
36631- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
36632- target_stat_values[++index] = atomic_read(&cm_rejects);
36633- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
36634- target_stat_values[++index] = atomic_read(&qps_created);
36635- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
36636- target_stat_values[++index] = atomic_read(&qps_destroyed);
36637- target_stat_values[++index] = atomic_read(&cm_closes);
36638+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
36639+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
36640+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
36641+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
36642+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
36643+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
36644+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
36645+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
36646+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
36647+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
36648+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
36649 target_stat_values[++index] = cm_packets_sent;
36650 target_stat_values[++index] = cm_packets_bounced;
36651 target_stat_values[++index] = cm_packets_created;
36652 target_stat_values[++index] = cm_packets_received;
36653 target_stat_values[++index] = cm_packets_dropped;
36654 target_stat_values[++index] = cm_packets_retrans;
36655- target_stat_values[++index] = atomic_read(&cm_listens_created);
36656- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
36657+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
36658+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
36659 target_stat_values[++index] = cm_backlog_drops;
36660- target_stat_values[++index] = atomic_read(&cm_loopbacks);
36661- target_stat_values[++index] = atomic_read(&cm_nodes_created);
36662- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
36663- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
36664- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
36665+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
36666+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
36667+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
36668+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
36669+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
36670 target_stat_values[++index] = nesadapter->free_4kpbl;
36671 target_stat_values[++index] = nesadapter->free_256pbl;
36672 target_stat_values[++index] = int_mod_timer_init;
36673 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
36674 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
36675 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
36676- target_stat_values[++index] = atomic_read(&pau_qps_created);
36677- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
36678+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
36679+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
36680 }
36681
36682 /**
36683diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
36684index 07e4fba..685f041 100644
36685--- a/drivers/infiniband/hw/nes/nes_verbs.c
36686+++ b/drivers/infiniband/hw/nes/nes_verbs.c
36687@@ -46,9 +46,9 @@
36688
36689 #include <rdma/ib_umem.h>
36690
36691-atomic_t mod_qp_timouts;
36692-atomic_t qps_created;
36693-atomic_t sw_qps_destroyed;
36694+atomic_unchecked_t mod_qp_timouts;
36695+atomic_unchecked_t qps_created;
36696+atomic_unchecked_t sw_qps_destroyed;
36697
36698 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
36699
36700@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
36701 if (init_attr->create_flags)
36702 return ERR_PTR(-EINVAL);
36703
36704- atomic_inc(&qps_created);
36705+ atomic_inc_unchecked(&qps_created);
36706 switch (init_attr->qp_type) {
36707 case IB_QPT_RC:
36708 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
36709@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
36710 struct iw_cm_event cm_event;
36711 int ret = 0;
36712
36713- atomic_inc(&sw_qps_destroyed);
36714+ atomic_inc_unchecked(&sw_qps_destroyed);
36715 nesqp->destroyed = 1;
36716
36717 /* Blow away the connection if it exists. */
36718diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
36719index 4d11575..3e890e5 100644
36720--- a/drivers/infiniband/hw/qib/qib.h
36721+++ b/drivers/infiniband/hw/qib/qib.h
36722@@ -51,6 +51,7 @@
36723 #include <linux/completion.h>
36724 #include <linux/kref.h>
36725 #include <linux/sched.h>
36726+#include <linux/slab.h>
36727
36728 #include "qib_common.h"
36729 #include "qib_verbs.h"
36730diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
36731index da739d9..da1c7f4 100644
36732--- a/drivers/input/gameport/gameport.c
36733+++ b/drivers/input/gameport/gameport.c
36734@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
36735 */
36736 static void gameport_init_port(struct gameport *gameport)
36737 {
36738- static atomic_t gameport_no = ATOMIC_INIT(0);
36739+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
36740
36741 __module_get(THIS_MODULE);
36742
36743 mutex_init(&gameport->drv_mutex);
36744 device_initialize(&gameport->dev);
36745 dev_set_name(&gameport->dev, "gameport%lu",
36746- (unsigned long)atomic_inc_return(&gameport_no) - 1);
36747+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
36748 gameport->dev.bus = &gameport_bus;
36749 gameport->dev.release = gameport_release_port;
36750 if (gameport->parent)
36751diff --git a/drivers/input/input.c b/drivers/input/input.c
36752index c044699..174d71a 100644
36753--- a/drivers/input/input.c
36754+++ b/drivers/input/input.c
36755@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
36756 */
36757 int input_register_device(struct input_dev *dev)
36758 {
36759- static atomic_t input_no = ATOMIC_INIT(0);
36760+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
36761 struct input_devres *devres = NULL;
36762 struct input_handler *handler;
36763 unsigned int packet_size;
36764@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
36765 dev->setkeycode = input_default_setkeycode;
36766
36767 dev_set_name(&dev->dev, "input%ld",
36768- (unsigned long) atomic_inc_return(&input_no) - 1);
36769+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
36770
36771 error = device_add(&dev->dev);
36772 if (error)
36773diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
36774index 04c69af..5f92d00 100644
36775--- a/drivers/input/joystick/sidewinder.c
36776+++ b/drivers/input/joystick/sidewinder.c
36777@@ -30,6 +30,7 @@
36778 #include <linux/kernel.h>
36779 #include <linux/module.h>
36780 #include <linux/slab.h>
36781+#include <linux/sched.h>
36782 #include <linux/init.h>
36783 #include <linux/input.h>
36784 #include <linux/gameport.h>
36785diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
36786index d6cbfe9..6225402 100644
36787--- a/drivers/input/joystick/xpad.c
36788+++ b/drivers/input/joystick/xpad.c
36789@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
36790
36791 static int xpad_led_probe(struct usb_xpad *xpad)
36792 {
36793- static atomic_t led_seq = ATOMIC_INIT(0);
36794+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
36795 long led_no;
36796 struct xpad_led *led;
36797 struct led_classdev *led_cdev;
36798@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
36799 if (!led)
36800 return -ENOMEM;
36801
36802- led_no = (long)atomic_inc_return(&led_seq) - 1;
36803+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
36804
36805 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
36806 led->xpad = xpad;
36807diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
36808index fe1df23..5b710f3 100644
36809--- a/drivers/input/mouse/psmouse.h
36810+++ b/drivers/input/mouse/psmouse.h
36811@@ -115,7 +115,7 @@ struct psmouse_attribute {
36812 ssize_t (*set)(struct psmouse *psmouse, void *data,
36813 const char *buf, size_t count);
36814 bool protect;
36815-};
36816+} __do_const;
36817 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
36818
36819 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
36820diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
36821index 4c842c3..590b0bf 100644
36822--- a/drivers/input/mousedev.c
36823+++ b/drivers/input/mousedev.c
36824@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
36825
36826 spin_unlock_irq(&client->packet_lock);
36827
36828- if (copy_to_user(buffer, data, count))
36829+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
36830 return -EFAULT;
36831
36832 return count;
36833diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
36834index 25fc597..558bf3b 100644
36835--- a/drivers/input/serio/serio.c
36836+++ b/drivers/input/serio/serio.c
36837@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
36838 */
36839 static void serio_init_port(struct serio *serio)
36840 {
36841- static atomic_t serio_no = ATOMIC_INIT(0);
36842+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
36843
36844 __module_get(THIS_MODULE);
36845
36846@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
36847 mutex_init(&serio->drv_mutex);
36848 device_initialize(&serio->dev);
36849 dev_set_name(&serio->dev, "serio%ld",
36850- (long)atomic_inc_return(&serio_no) - 1);
36851+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
36852 serio->dev.bus = &serio_bus;
36853 serio->dev.release = serio_release_port;
36854 serio->dev.groups = serio_device_attr_groups;
36855diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
36856index ddbdaca..be18a78 100644
36857--- a/drivers/iommu/iommu.c
36858+++ b/drivers/iommu/iommu.c
36859@@ -554,7 +554,7 @@ static struct notifier_block iommu_bus_nb = {
36860 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
36861 {
36862 bus_register_notifier(bus, &iommu_bus_nb);
36863- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
36864+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
36865 }
36866
36867 /**
36868diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
36869index 89562a8..218999b 100644
36870--- a/drivers/isdn/capi/capi.c
36871+++ b/drivers/isdn/capi/capi.c
36872@@ -81,8 +81,8 @@ struct capiminor {
36873
36874 struct capi20_appl *ap;
36875 u32 ncci;
36876- atomic_t datahandle;
36877- atomic_t msgid;
36878+ atomic_unchecked_t datahandle;
36879+ atomic_unchecked_t msgid;
36880
36881 struct tty_port port;
36882 int ttyinstop;
36883@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
36884 capimsg_setu16(s, 2, mp->ap->applid);
36885 capimsg_setu8 (s, 4, CAPI_DATA_B3);
36886 capimsg_setu8 (s, 5, CAPI_RESP);
36887- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
36888+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
36889 capimsg_setu32(s, 8, mp->ncci);
36890 capimsg_setu16(s, 12, datahandle);
36891 }
36892@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
36893 mp->outbytes -= len;
36894 spin_unlock_bh(&mp->outlock);
36895
36896- datahandle = atomic_inc_return(&mp->datahandle);
36897+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
36898 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
36899 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
36900 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
36901 capimsg_setu16(skb->data, 2, mp->ap->applid);
36902 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
36903 capimsg_setu8 (skb->data, 5, CAPI_REQ);
36904- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
36905+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
36906 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
36907 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
36908 capimsg_setu16(skb->data, 16, len); /* Data length */
36909diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
36910index 67abf3f..076b3a6 100644
36911--- a/drivers/isdn/gigaset/interface.c
36912+++ b/drivers/isdn/gigaset/interface.c
36913@@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
36914 }
36915 tty->driver_data = cs;
36916
36917- ++cs->port.count;
36918+ atomic_inc(&cs->port.count);
36919
36920- if (cs->port.count == 1) {
36921+ if (atomic_read(&cs->port.count) == 1) {
36922 tty_port_tty_set(&cs->port, tty);
36923 tty->low_latency = 1;
36924 }
36925@@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
36926
36927 if (!cs->connected)
36928 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
36929- else if (!cs->port.count)
36930+ else if (!atomic_read(&cs->port.count))
36931 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36932- else if (!--cs->port.count)
36933+ else if (!atomic_dec_return(&cs->port.count))
36934 tty_port_tty_set(&cs->port, NULL);
36935
36936 mutex_unlock(&cs->mutex);
36937diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
36938index 821f7ac..28d4030 100644
36939--- a/drivers/isdn/hardware/avm/b1.c
36940+++ b/drivers/isdn/hardware/avm/b1.c
36941@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
36942 }
36943 if (left) {
36944 if (t4file->user) {
36945- if (copy_from_user(buf, dp, left))
36946+ if (left > sizeof buf || copy_from_user(buf, dp, left))
36947 return -EFAULT;
36948 } else {
36949 memcpy(buf, dp, left);
36950@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
36951 }
36952 if (left) {
36953 if (config->user) {
36954- if (copy_from_user(buf, dp, left))
36955+ if (left > sizeof buf || copy_from_user(buf, dp, left))
36956 return -EFAULT;
36957 } else {
36958 memcpy(buf, dp, left);
36959diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
36960index e09dc8a..15e2efb 100644
36961--- a/drivers/isdn/i4l/isdn_tty.c
36962+++ b/drivers/isdn/i4l/isdn_tty.c
36963@@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
36964
36965 #ifdef ISDN_DEBUG_MODEM_OPEN
36966 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
36967- port->count);
36968+ atomic_read(&port->count));
36969 #endif
36970- port->count++;
36971+ atomic_inc(&port->count);
36972 port->tty = tty;
36973 /*
36974 * Start up serial port
36975@@ -1559,7 +1559,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
36976 #endif
36977 return;
36978 }
36979- if ((tty->count == 1) && (port->count != 1)) {
36980+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
36981 /*
36982 * Uh, oh. tty->count is 1, which means that the tty
36983 * structure will be freed. Info->count should always
36984@@ -1568,15 +1568,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
36985 * serial port won't be shutdown.
36986 */
36987 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
36988- "info->count is %d\n", port->count);
36989- port->count = 1;
36990+ "info->count is %d\n", atomic_read(&port->count));
36991+ atomic_set(&port->count, 1);
36992 }
36993- if (--port->count < 0) {
36994+ if (atomic_dec_return(&port->count) < 0) {
36995 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
36996- info->line, port->count);
36997- port->count = 0;
36998+ info->line, atomic_read(&port->count));
36999+ atomic_set(&port->count, 0);
37000 }
37001- if (port->count) {
37002+ if (atomic_read(&port->count)) {
37003 #ifdef ISDN_DEBUG_MODEM_OPEN
37004 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
37005 #endif
37006@@ -1630,7 +1630,7 @@ isdn_tty_hangup(struct tty_struct *tty)
37007 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
37008 return;
37009 isdn_tty_shutdown(info);
37010- port->count = 0;
37011+ atomic_set(&port->count, 0);
37012 port->flags &= ~ASYNC_NORMAL_ACTIVE;
37013 port->tty = NULL;
37014 wake_up_interruptible(&port->open_wait);
37015@@ -1975,7 +1975,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
37016 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
37017 modem_info *info = &dev->mdm.info[i];
37018
37019- if (info->port.count == 0)
37020+ if (atomic_read(&info->port.count) == 0)
37021 continue;
37022 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
37023 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
37024diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
37025index e74df7c..03a03ba 100644
37026--- a/drivers/isdn/icn/icn.c
37027+++ b/drivers/isdn/icn/icn.c
37028@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
37029 if (count > len)
37030 count = len;
37031 if (user) {
37032- if (copy_from_user(msg, buf, count))
37033+ if (count > sizeof msg || copy_from_user(msg, buf, count))
37034 return -EFAULT;
37035 } else
37036 memcpy(msg, buf, count);
37037diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
37038index 6a8405d..0bd1c7e 100644
37039--- a/drivers/leds/leds-clevo-mail.c
37040+++ b/drivers/leds/leds-clevo-mail.c
37041@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
37042 * detected as working, but in reality it is not) as low as
37043 * possible.
37044 */
37045-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
37046+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
37047 {
37048 .callback = clevo_mail_led_dmi_callback,
37049 .ident = "Clevo D410J",
37050diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
37051index ec9b287..65c9bf4 100644
37052--- a/drivers/leds/leds-ss4200.c
37053+++ b/drivers/leds/leds-ss4200.c
37054@@ -92,7 +92,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
37055 * detected as working, but in reality it is not) as low as
37056 * possible.
37057 */
37058-static struct dmi_system_id __initdata nas_led_whitelist[] = {
37059+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
37060 {
37061 .callback = ss4200_led_dmi_callback,
37062 .ident = "Intel SS4200-E",
37063diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
37064index a5ebc00..982886f 100644
37065--- a/drivers/lguest/core.c
37066+++ b/drivers/lguest/core.c
37067@@ -92,9 +92,17 @@ static __init int map_switcher(void)
37068 * it's worked so far. The end address needs +1 because __get_vm_area
37069 * allocates an extra guard page, so we need space for that.
37070 */
37071+
37072+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
37073+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37074+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
37075+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37076+#else
37077 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37078 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
37079 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37080+#endif
37081+
37082 if (!switcher_vma) {
37083 err = -ENOMEM;
37084 printk("lguest: could not map switcher pages high\n");
37085@@ -119,7 +127,7 @@ static __init int map_switcher(void)
37086 * Now the Switcher is mapped at the right address, we can't fail!
37087 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
37088 */
37089- memcpy(switcher_vma->addr, start_switcher_text,
37090+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
37091 end_switcher_text - start_switcher_text);
37092
37093 printk(KERN_INFO "lguest: mapped switcher at %p\n",
37094diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
37095index 4af12e1..0e89afe 100644
37096--- a/drivers/lguest/x86/core.c
37097+++ b/drivers/lguest/x86/core.c
37098@@ -59,7 +59,7 @@ static struct {
37099 /* Offset from where switcher.S was compiled to where we've copied it */
37100 static unsigned long switcher_offset(void)
37101 {
37102- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
37103+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
37104 }
37105
37106 /* This cpu's struct lguest_pages. */
37107@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
37108 * These copies are pretty cheap, so we do them unconditionally: */
37109 /* Save the current Host top-level page directory.
37110 */
37111+
37112+#ifdef CONFIG_PAX_PER_CPU_PGD
37113+ pages->state.host_cr3 = read_cr3();
37114+#else
37115 pages->state.host_cr3 = __pa(current->mm->pgd);
37116+#endif
37117+
37118 /*
37119 * Set up the Guest's page tables to see this CPU's pages (and no
37120 * other CPU's pages).
37121@@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
37122 * compiled-in switcher code and the high-mapped copy we just made.
37123 */
37124 for (i = 0; i < IDT_ENTRIES; i++)
37125- default_idt_entries[i] += switcher_offset();
37126+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
37127
37128 /*
37129 * Set up the Switcher's per-cpu areas.
37130@@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
37131 * it will be undisturbed when we switch. To change %cs and jump we
37132 * need this structure to feed to Intel's "lcall" instruction.
37133 */
37134- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
37135+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
37136 lguest_entry.segment = LGUEST_CS;
37137
37138 /*
37139diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
37140index 40634b0..4f5855e 100644
37141--- a/drivers/lguest/x86/switcher_32.S
37142+++ b/drivers/lguest/x86/switcher_32.S
37143@@ -87,6 +87,7 @@
37144 #include <asm/page.h>
37145 #include <asm/segment.h>
37146 #include <asm/lguest.h>
37147+#include <asm/processor-flags.h>
37148
37149 // We mark the start of the code to copy
37150 // It's placed in .text tho it's never run here
37151@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
37152 // Changes type when we load it: damn Intel!
37153 // For after we switch over our page tables
37154 // That entry will be read-only: we'd crash.
37155+
37156+#ifdef CONFIG_PAX_KERNEXEC
37157+ mov %cr0, %edx
37158+ xor $X86_CR0_WP, %edx
37159+ mov %edx, %cr0
37160+#endif
37161+
37162 movl $(GDT_ENTRY_TSS*8), %edx
37163 ltr %dx
37164
37165@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
37166 // Let's clear it again for our return.
37167 // The GDT descriptor of the Host
37168 // Points to the table after two "size" bytes
37169- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
37170+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
37171 // Clear "used" from type field (byte 5, bit 2)
37172- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
37173+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
37174+
37175+#ifdef CONFIG_PAX_KERNEXEC
37176+ mov %cr0, %eax
37177+ xor $X86_CR0_WP, %eax
37178+ mov %eax, %cr0
37179+#endif
37180
37181 // Once our page table's switched, the Guest is live!
37182 // The Host fades as we run this final step.
37183@@ -295,13 +309,12 @@ deliver_to_host:
37184 // I consulted gcc, and it gave
37185 // These instructions, which I gladly credit:
37186 leal (%edx,%ebx,8), %eax
37187- movzwl (%eax),%edx
37188- movl 4(%eax), %eax
37189- xorw %ax, %ax
37190- orl %eax, %edx
37191+ movl 4(%eax), %edx
37192+ movw (%eax), %dx
37193 // Now the address of the handler's in %edx
37194 // We call it now: its "iret" drops us home.
37195- jmp *%edx
37196+ ljmp $__KERNEL_CS, $1f
37197+1: jmp *%edx
37198
37199 // Every interrupt can come to us here
37200 // But we must truly tell each apart.
37201diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
37202index 7155945..4bcc562 100644
37203--- a/drivers/md/bitmap.c
37204+++ b/drivers/md/bitmap.c
37205@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
37206 chunk_kb ? "KB" : "B");
37207 if (bitmap->storage.file) {
37208 seq_printf(seq, ", file: ");
37209- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
37210+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
37211 }
37212
37213 seq_printf(seq, "\n");
37214diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
37215index 0666b5d..ed82cb4 100644
37216--- a/drivers/md/dm-ioctl.c
37217+++ b/drivers/md/dm-ioctl.c
37218@@ -1628,7 +1628,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
37219 cmd == DM_LIST_VERSIONS_CMD)
37220 return 0;
37221
37222- if ((cmd == DM_DEV_CREATE_CMD)) {
37223+ if (cmd == DM_DEV_CREATE_CMD) {
37224 if (!*param->name) {
37225 DMWARN("name not supplied when creating device");
37226 return -EINVAL;
37227diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
37228index fa51918..c26253c 100644
37229--- a/drivers/md/dm-raid1.c
37230+++ b/drivers/md/dm-raid1.c
37231@@ -40,7 +40,7 @@ enum dm_raid1_error {
37232
37233 struct mirror {
37234 struct mirror_set *ms;
37235- atomic_t error_count;
37236+ atomic_unchecked_t error_count;
37237 unsigned long error_type;
37238 struct dm_dev *dev;
37239 sector_t offset;
37240@@ -183,7 +183,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
37241 struct mirror *m;
37242
37243 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
37244- if (!atomic_read(&m->error_count))
37245+ if (!atomic_read_unchecked(&m->error_count))
37246 return m;
37247
37248 return NULL;
37249@@ -215,7 +215,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37250 * simple way to tell if a device has encountered
37251 * errors.
37252 */
37253- atomic_inc(&m->error_count);
37254+ atomic_inc_unchecked(&m->error_count);
37255
37256 if (test_and_set_bit(error_type, &m->error_type))
37257 return;
37258@@ -406,7 +406,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
37259 struct mirror *m = get_default_mirror(ms);
37260
37261 do {
37262- if (likely(!atomic_read(&m->error_count)))
37263+ if (likely(!atomic_read_unchecked(&m->error_count)))
37264 return m;
37265
37266 if (m-- == ms->mirror)
37267@@ -420,7 +420,7 @@ static int default_ok(struct mirror *m)
37268 {
37269 struct mirror *default_mirror = get_default_mirror(m->ms);
37270
37271- return !atomic_read(&default_mirror->error_count);
37272+ return !atomic_read_unchecked(&default_mirror->error_count);
37273 }
37274
37275 static int mirror_available(struct mirror_set *ms, struct bio *bio)
37276@@ -557,7 +557,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
37277 */
37278 if (likely(region_in_sync(ms, region, 1)))
37279 m = choose_mirror(ms, bio->bi_sector);
37280- else if (m && atomic_read(&m->error_count))
37281+ else if (m && atomic_read_unchecked(&m->error_count))
37282 m = NULL;
37283
37284 if (likely(m))
37285@@ -924,7 +924,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
37286 }
37287
37288 ms->mirror[mirror].ms = ms;
37289- atomic_set(&(ms->mirror[mirror].error_count), 0);
37290+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
37291 ms->mirror[mirror].error_type = 0;
37292 ms->mirror[mirror].offset = offset;
37293
37294@@ -1337,7 +1337,7 @@ static void mirror_resume(struct dm_target *ti)
37295 */
37296 static char device_status_char(struct mirror *m)
37297 {
37298- if (!atomic_read(&(m->error_count)))
37299+ if (!atomic_read_unchecked(&(m->error_count)))
37300 return 'A';
37301
37302 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
37303diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
37304index c89cde8..9d184cf 100644
37305--- a/drivers/md/dm-stripe.c
37306+++ b/drivers/md/dm-stripe.c
37307@@ -20,7 +20,7 @@ struct stripe {
37308 struct dm_dev *dev;
37309 sector_t physical_start;
37310
37311- atomic_t error_count;
37312+ atomic_unchecked_t error_count;
37313 };
37314
37315 struct stripe_c {
37316@@ -184,7 +184,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37317 kfree(sc);
37318 return r;
37319 }
37320- atomic_set(&(sc->stripe[i].error_count), 0);
37321+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
37322 }
37323
37324 ti->private = sc;
37325@@ -325,7 +325,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
37326 DMEMIT("%d ", sc->stripes);
37327 for (i = 0; i < sc->stripes; i++) {
37328 DMEMIT("%s ", sc->stripe[i].dev->name);
37329- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
37330+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
37331 'D' : 'A';
37332 }
37333 buffer[i] = '\0';
37334@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
37335 */
37336 for (i = 0; i < sc->stripes; i++)
37337 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
37338- atomic_inc(&(sc->stripe[i].error_count));
37339- if (atomic_read(&(sc->stripe[i].error_count)) <
37340+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
37341+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
37342 DM_IO_ERROR_THRESHOLD)
37343 schedule_work(&sc->trigger_event);
37344 }
37345diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
37346index daf25d0..d74f49f 100644
37347--- a/drivers/md/dm-table.c
37348+++ b/drivers/md/dm-table.c
37349@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
37350 if (!dev_size)
37351 return 0;
37352
37353- if ((start >= dev_size) || (start + len > dev_size)) {
37354+ if ((start >= dev_size) || (len > dev_size - start)) {
37355 DMWARN("%s: %s too small for target: "
37356 "start=%llu, len=%llu, dev_size=%llu",
37357 dm_device_name(ti->table->md), bdevname(bdev, b),
37358diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
37359index 4d6e853..a234157 100644
37360--- a/drivers/md/dm-thin-metadata.c
37361+++ b/drivers/md/dm-thin-metadata.c
37362@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
37363 {
37364 pmd->info.tm = pmd->tm;
37365 pmd->info.levels = 2;
37366- pmd->info.value_type.context = pmd->data_sm;
37367+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
37368 pmd->info.value_type.size = sizeof(__le64);
37369 pmd->info.value_type.inc = data_block_inc;
37370 pmd->info.value_type.dec = data_block_dec;
37371@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
37372
37373 pmd->bl_info.tm = pmd->tm;
37374 pmd->bl_info.levels = 1;
37375- pmd->bl_info.value_type.context = pmd->data_sm;
37376+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
37377 pmd->bl_info.value_type.size = sizeof(__le64);
37378 pmd->bl_info.value_type.inc = data_block_inc;
37379 pmd->bl_info.value_type.dec = data_block_dec;
37380diff --git a/drivers/md/dm.c b/drivers/md/dm.c
37381index 314a0e2..1376406 100644
37382--- a/drivers/md/dm.c
37383+++ b/drivers/md/dm.c
37384@@ -170,9 +170,9 @@ struct mapped_device {
37385 /*
37386 * Event handling.
37387 */
37388- atomic_t event_nr;
37389+ atomic_unchecked_t event_nr;
37390 wait_queue_head_t eventq;
37391- atomic_t uevent_seq;
37392+ atomic_unchecked_t uevent_seq;
37393 struct list_head uevent_list;
37394 spinlock_t uevent_lock; /* Protect access to uevent_list */
37395
37396@@ -1872,8 +1872,8 @@ static struct mapped_device *alloc_dev(int minor)
37397 rwlock_init(&md->map_lock);
37398 atomic_set(&md->holders, 1);
37399 atomic_set(&md->open_count, 0);
37400- atomic_set(&md->event_nr, 0);
37401- atomic_set(&md->uevent_seq, 0);
37402+ atomic_set_unchecked(&md->event_nr, 0);
37403+ atomic_set_unchecked(&md->uevent_seq, 0);
37404 INIT_LIST_HEAD(&md->uevent_list);
37405 spin_lock_init(&md->uevent_lock);
37406
37407@@ -2014,7 +2014,7 @@ static void event_callback(void *context)
37408
37409 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
37410
37411- atomic_inc(&md->event_nr);
37412+ atomic_inc_unchecked(&md->event_nr);
37413 wake_up(&md->eventq);
37414 }
37415
37416@@ -2669,18 +2669,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
37417
37418 uint32_t dm_next_uevent_seq(struct mapped_device *md)
37419 {
37420- return atomic_add_return(1, &md->uevent_seq);
37421+ return atomic_add_return_unchecked(1, &md->uevent_seq);
37422 }
37423
37424 uint32_t dm_get_event_nr(struct mapped_device *md)
37425 {
37426- return atomic_read(&md->event_nr);
37427+ return atomic_read_unchecked(&md->event_nr);
37428 }
37429
37430 int dm_wait_event(struct mapped_device *md, int event_nr)
37431 {
37432 return wait_event_interruptible(md->eventq,
37433- (event_nr != atomic_read(&md->event_nr)));
37434+ (event_nr != atomic_read_unchecked(&md->event_nr)));
37435 }
37436
37437 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
37438diff --git a/drivers/md/md.c b/drivers/md/md.c
37439index 3db3d1b..9487468 100644
37440--- a/drivers/md/md.c
37441+++ b/drivers/md/md.c
37442@@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
37443 * start build, activate spare
37444 */
37445 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
37446-static atomic_t md_event_count;
37447+static atomic_unchecked_t md_event_count;
37448 void md_new_event(struct mddev *mddev)
37449 {
37450- atomic_inc(&md_event_count);
37451+ atomic_inc_unchecked(&md_event_count);
37452 wake_up(&md_event_waiters);
37453 }
37454 EXPORT_SYMBOL_GPL(md_new_event);
37455@@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
37456 */
37457 static void md_new_event_inintr(struct mddev *mddev)
37458 {
37459- atomic_inc(&md_event_count);
37460+ atomic_inc_unchecked(&md_event_count);
37461 wake_up(&md_event_waiters);
37462 }
37463
37464@@ -1503,7 +1503,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
37465 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
37466 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
37467 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
37468- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37469+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37470
37471 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
37472 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
37473@@ -1747,7 +1747,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
37474 else
37475 sb->resync_offset = cpu_to_le64(0);
37476
37477- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
37478+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
37479
37480 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
37481 sb->size = cpu_to_le64(mddev->dev_sectors);
37482@@ -2747,7 +2747,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
37483 static ssize_t
37484 errors_show(struct md_rdev *rdev, char *page)
37485 {
37486- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
37487+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
37488 }
37489
37490 static ssize_t
37491@@ -2756,7 +2756,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
37492 char *e;
37493 unsigned long n = simple_strtoul(buf, &e, 10);
37494 if (*buf && (*e == 0 || *e == '\n')) {
37495- atomic_set(&rdev->corrected_errors, n);
37496+ atomic_set_unchecked(&rdev->corrected_errors, n);
37497 return len;
37498 }
37499 return -EINVAL;
37500@@ -3203,8 +3203,8 @@ int md_rdev_init(struct md_rdev *rdev)
37501 rdev->sb_loaded = 0;
37502 rdev->bb_page = NULL;
37503 atomic_set(&rdev->nr_pending, 0);
37504- atomic_set(&rdev->read_errors, 0);
37505- atomic_set(&rdev->corrected_errors, 0);
37506+ atomic_set_unchecked(&rdev->read_errors, 0);
37507+ atomic_set_unchecked(&rdev->corrected_errors, 0);
37508
37509 INIT_LIST_HEAD(&rdev->same_set);
37510 init_waitqueue_head(&rdev->blocked_wait);
37511@@ -6980,7 +6980,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
37512
37513 spin_unlock(&pers_lock);
37514 seq_printf(seq, "\n");
37515- seq->poll_event = atomic_read(&md_event_count);
37516+ seq->poll_event = atomic_read_unchecked(&md_event_count);
37517 return 0;
37518 }
37519 if (v == (void*)2) {
37520@@ -7083,7 +7083,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
37521 return error;
37522
37523 seq = file->private_data;
37524- seq->poll_event = atomic_read(&md_event_count);
37525+ seq->poll_event = atomic_read_unchecked(&md_event_count);
37526 return error;
37527 }
37528
37529@@ -7097,7 +7097,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
37530 /* always allow read */
37531 mask = POLLIN | POLLRDNORM;
37532
37533- if (seq->poll_event != atomic_read(&md_event_count))
37534+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
37535 mask |= POLLERR | POLLPRI;
37536 return mask;
37537 }
37538@@ -7141,7 +7141,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
37539 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
37540 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
37541 (int)part_stat_read(&disk->part0, sectors[1]) -
37542- atomic_read(&disk->sync_io);
37543+ atomic_read_unchecked(&disk->sync_io);
37544 /* sync IO will cause sync_io to increase before the disk_stats
37545 * as sync_io is counted when a request starts, and
37546 * disk_stats is counted when it completes.
37547diff --git a/drivers/md/md.h b/drivers/md/md.h
37548index eca59c3..7c42285 100644
37549--- a/drivers/md/md.h
37550+++ b/drivers/md/md.h
37551@@ -94,13 +94,13 @@ struct md_rdev {
37552 * only maintained for arrays that
37553 * support hot removal
37554 */
37555- atomic_t read_errors; /* number of consecutive read errors that
37556+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
37557 * we have tried to ignore.
37558 */
37559 struct timespec last_read_error; /* monotonic time since our
37560 * last read error
37561 */
37562- atomic_t corrected_errors; /* number of corrected read errors,
37563+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
37564 * for reporting to userspace and storing
37565 * in superblock.
37566 */
37567@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
37568
37569 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
37570 {
37571- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
37572+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
37573 }
37574
37575 struct md_personality
37576diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
37577index 1cbfc6b..56e1dbb 100644
37578--- a/drivers/md/persistent-data/dm-space-map.h
37579+++ b/drivers/md/persistent-data/dm-space-map.h
37580@@ -60,6 +60,7 @@ struct dm_space_map {
37581 int (*root_size)(struct dm_space_map *sm, size_t *result);
37582 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
37583 };
37584+typedef struct dm_space_map __no_const dm_space_map_no_const;
37585
37586 /*----------------------------------------------------------------*/
37587
37588diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
37589index d5bddfc..b079b4b 100644
37590--- a/drivers/md/raid1.c
37591+++ b/drivers/md/raid1.c
37592@@ -1818,7 +1818,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
37593 if (r1_sync_page_io(rdev, sect, s,
37594 bio->bi_io_vec[idx].bv_page,
37595 READ) != 0)
37596- atomic_add(s, &rdev->corrected_errors);
37597+ atomic_add_unchecked(s, &rdev->corrected_errors);
37598 }
37599 sectors -= s;
37600 sect += s;
37601@@ -2040,7 +2040,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
37602 test_bit(In_sync, &rdev->flags)) {
37603 if (r1_sync_page_io(rdev, sect, s,
37604 conf->tmppage, READ)) {
37605- atomic_add(s, &rdev->corrected_errors);
37606+ atomic_add_unchecked(s, &rdev->corrected_errors);
37607 printk(KERN_INFO
37608 "md/raid1:%s: read error corrected "
37609 "(%d sectors at %llu on %s)\n",
37610diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
37611index 64d4824..8b9ea57 100644
37612--- a/drivers/md/raid10.c
37613+++ b/drivers/md/raid10.c
37614@@ -1877,7 +1877,7 @@ static void end_sync_read(struct bio *bio, int error)
37615 /* The write handler will notice the lack of
37616 * R10BIO_Uptodate and record any errors etc
37617 */
37618- atomic_add(r10_bio->sectors,
37619+ atomic_add_unchecked(r10_bio->sectors,
37620 &conf->mirrors[d].rdev->corrected_errors);
37621
37622 /* for reconstruct, we always reschedule after a read.
37623@@ -2226,7 +2226,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
37624 {
37625 struct timespec cur_time_mon;
37626 unsigned long hours_since_last;
37627- unsigned int read_errors = atomic_read(&rdev->read_errors);
37628+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
37629
37630 ktime_get_ts(&cur_time_mon);
37631
37632@@ -2248,9 +2248,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
37633 * overflowing the shift of read_errors by hours_since_last.
37634 */
37635 if (hours_since_last >= 8 * sizeof(read_errors))
37636- atomic_set(&rdev->read_errors, 0);
37637+ atomic_set_unchecked(&rdev->read_errors, 0);
37638 else
37639- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
37640+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
37641 }
37642
37643 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
37644@@ -2304,8 +2304,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
37645 return;
37646
37647 check_decay_read_errors(mddev, rdev);
37648- atomic_inc(&rdev->read_errors);
37649- if (atomic_read(&rdev->read_errors) > max_read_errors) {
37650+ atomic_inc_unchecked(&rdev->read_errors);
37651+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
37652 char b[BDEVNAME_SIZE];
37653 bdevname(rdev->bdev, b);
37654
37655@@ -2313,7 +2313,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
37656 "md/raid10:%s: %s: Raid device exceeded "
37657 "read_error threshold [cur %d:max %d]\n",
37658 mdname(mddev), b,
37659- atomic_read(&rdev->read_errors), max_read_errors);
37660+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
37661 printk(KERN_NOTICE
37662 "md/raid10:%s: %s: Failing raid device\n",
37663 mdname(mddev), b);
37664@@ -2468,7 +2468,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
37665 sect +
37666 choose_data_offset(r10_bio, rdev)),
37667 bdevname(rdev->bdev, b));
37668- atomic_add(s, &rdev->corrected_errors);
37669+ atomic_add_unchecked(s, &rdev->corrected_errors);
37670 }
37671
37672 rdev_dec_pending(rdev, mddev);
37673diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
37674index 19d77a0..56051b92 100644
37675--- a/drivers/md/raid5.c
37676+++ b/drivers/md/raid5.c
37677@@ -1797,21 +1797,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
37678 mdname(conf->mddev), STRIPE_SECTORS,
37679 (unsigned long long)s,
37680 bdevname(rdev->bdev, b));
37681- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
37682+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
37683 clear_bit(R5_ReadError, &sh->dev[i].flags);
37684 clear_bit(R5_ReWrite, &sh->dev[i].flags);
37685 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
37686 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
37687
37688- if (atomic_read(&rdev->read_errors))
37689- atomic_set(&rdev->read_errors, 0);
37690+ if (atomic_read_unchecked(&rdev->read_errors))
37691+ atomic_set_unchecked(&rdev->read_errors, 0);
37692 } else {
37693 const char *bdn = bdevname(rdev->bdev, b);
37694 int retry = 0;
37695 int set_bad = 0;
37696
37697 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
37698- atomic_inc(&rdev->read_errors);
37699+ atomic_inc_unchecked(&rdev->read_errors);
37700 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
37701 printk_ratelimited(
37702 KERN_WARNING
37703@@ -1839,7 +1839,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
37704 mdname(conf->mddev),
37705 (unsigned long long)s,
37706 bdn);
37707- } else if (atomic_read(&rdev->read_errors)
37708+ } else if (atomic_read_unchecked(&rdev->read_errors)
37709 > conf->max_nr_stripes)
37710 printk(KERN_WARNING
37711 "md/raid:%s: Too many read errors, failing device %s.\n",
37712diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
37713index d33101a..6b13069 100644
37714--- a/drivers/media/dvb-core/dvbdev.c
37715+++ b/drivers/media/dvb-core/dvbdev.c
37716@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
37717 const struct dvb_device *template, void *priv, int type)
37718 {
37719 struct dvb_device *dvbdev;
37720- struct file_operations *dvbdevfops;
37721+ file_operations_no_const *dvbdevfops;
37722 struct device *clsdev;
37723 int minor;
37724 int id;
37725diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
37726index 404f63a..4796533 100644
37727--- a/drivers/media/dvb-frontends/dib3000.h
37728+++ b/drivers/media/dvb-frontends/dib3000.h
37729@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
37730 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
37731 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
37732 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
37733-};
37734+} __no_const;
37735
37736 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
37737 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
37738diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
37739index 8e9a668..78d6310 100644
37740--- a/drivers/media/platform/omap/omap_vout.c
37741+++ b/drivers/media/platform/omap/omap_vout.c
37742@@ -63,7 +63,6 @@ enum omap_vout_channels {
37743 OMAP_VIDEO2,
37744 };
37745
37746-static struct videobuf_queue_ops video_vbq_ops;
37747 /* Variables configurable through module params*/
37748 static u32 video1_numbuffers = 3;
37749 static u32 video2_numbuffers = 3;
37750@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
37751 {
37752 struct videobuf_queue *q;
37753 struct omap_vout_device *vout = NULL;
37754+ static struct videobuf_queue_ops video_vbq_ops = {
37755+ .buf_setup = omap_vout_buffer_setup,
37756+ .buf_prepare = omap_vout_buffer_prepare,
37757+ .buf_release = omap_vout_buffer_release,
37758+ .buf_queue = omap_vout_buffer_queue,
37759+ };
37760
37761 vout = video_drvdata(file);
37762 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
37763@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
37764 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
37765
37766 q = &vout->vbq;
37767- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
37768- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
37769- video_vbq_ops.buf_release = omap_vout_buffer_release;
37770- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
37771 spin_lock_init(&vout->vbq_lock);
37772
37773 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
37774diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
37775index b671e20..34088b7 100644
37776--- a/drivers/media/platform/s5p-tv/mixer.h
37777+++ b/drivers/media/platform/s5p-tv/mixer.h
37778@@ -155,7 +155,7 @@ struct mxr_layer {
37779 /** layer index (unique identifier) */
37780 int idx;
37781 /** callbacks for layer methods */
37782- struct mxr_layer_ops ops;
37783+ struct mxr_layer_ops *ops;
37784 /** format array */
37785 const struct mxr_format **fmt_array;
37786 /** size of format array */
37787diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
37788index b93a21f..2535195 100644
37789--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
37790+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
37791@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
37792 {
37793 struct mxr_layer *layer;
37794 int ret;
37795- struct mxr_layer_ops ops = {
37796+ static struct mxr_layer_ops ops = {
37797 .release = mxr_graph_layer_release,
37798 .buffer_set = mxr_graph_buffer_set,
37799 .stream_set = mxr_graph_stream_set,
37800diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
37801index 3b1670a..595c939 100644
37802--- a/drivers/media/platform/s5p-tv/mixer_reg.c
37803+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
37804@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
37805 layer->update_buf = next;
37806 }
37807
37808- layer->ops.buffer_set(layer, layer->update_buf);
37809+ layer->ops->buffer_set(layer, layer->update_buf);
37810
37811 if (done && done != layer->shadow_buf)
37812 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
37813diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
37814index 1f3b743..e839271 100644
37815--- a/drivers/media/platform/s5p-tv/mixer_video.c
37816+++ b/drivers/media/platform/s5p-tv/mixer_video.c
37817@@ -208,7 +208,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
37818 layer->geo.src.height = layer->geo.src.full_height;
37819
37820 mxr_geometry_dump(mdev, &layer->geo);
37821- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
37822+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
37823 mxr_geometry_dump(mdev, &layer->geo);
37824 }
37825
37826@@ -226,7 +226,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
37827 layer->geo.dst.full_width = mbus_fmt.width;
37828 layer->geo.dst.full_height = mbus_fmt.height;
37829 layer->geo.dst.field = mbus_fmt.field;
37830- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
37831+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
37832
37833 mxr_geometry_dump(mdev, &layer->geo);
37834 }
37835@@ -332,7 +332,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
37836 /* set source size to highest accepted value */
37837 geo->src.full_width = max(geo->dst.full_width, pix->width);
37838 geo->src.full_height = max(geo->dst.full_height, pix->height);
37839- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
37840+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
37841 mxr_geometry_dump(mdev, &layer->geo);
37842 /* set cropping to total visible screen */
37843 geo->src.width = pix->width;
37844@@ -340,12 +340,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
37845 geo->src.x_offset = 0;
37846 geo->src.y_offset = 0;
37847 /* assure consistency of geometry */
37848- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
37849+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
37850 mxr_geometry_dump(mdev, &layer->geo);
37851 /* set full size to lowest possible value */
37852 geo->src.full_width = 0;
37853 geo->src.full_height = 0;
37854- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
37855+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
37856 mxr_geometry_dump(mdev, &layer->geo);
37857
37858 /* returning results */
37859@@ -472,7 +472,7 @@ static int mxr_s_selection(struct file *file, void *fh,
37860 target->width = s->r.width;
37861 target->height = s->r.height;
37862
37863- layer->ops.fix_geometry(layer, stage, s->flags);
37864+ layer->ops->fix_geometry(layer, stage, s->flags);
37865
37866 /* retrieve update selection rectangle */
37867 res.left = target->x_offset;
37868@@ -937,13 +937,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
37869 mxr_output_get(mdev);
37870
37871 mxr_layer_update_output(layer);
37872- layer->ops.format_set(layer);
37873+ layer->ops->format_set(layer);
37874 /* enabling layer in hardware */
37875 spin_lock_irqsave(&layer->enq_slock, flags);
37876 layer->state = MXR_LAYER_STREAMING;
37877 spin_unlock_irqrestore(&layer->enq_slock, flags);
37878
37879- layer->ops.stream_set(layer, MXR_ENABLE);
37880+ layer->ops->stream_set(layer, MXR_ENABLE);
37881 mxr_streamer_get(mdev);
37882
37883 return 0;
37884@@ -1013,7 +1013,7 @@ static int stop_streaming(struct vb2_queue *vq)
37885 spin_unlock_irqrestore(&layer->enq_slock, flags);
37886
37887 /* disabling layer in hardware */
37888- layer->ops.stream_set(layer, MXR_DISABLE);
37889+ layer->ops->stream_set(layer, MXR_DISABLE);
37890 /* remove one streamer */
37891 mxr_streamer_put(mdev);
37892 /* allow changes in output configuration */
37893@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
37894
37895 void mxr_layer_release(struct mxr_layer *layer)
37896 {
37897- if (layer->ops.release)
37898- layer->ops.release(layer);
37899+ if (layer->ops->release)
37900+ layer->ops->release(layer);
37901 }
37902
37903 void mxr_base_layer_release(struct mxr_layer *layer)
37904@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
37905
37906 layer->mdev = mdev;
37907 layer->idx = idx;
37908- layer->ops = *ops;
37909+ layer->ops = ops;
37910
37911 spin_lock_init(&layer->enq_slock);
37912 INIT_LIST_HEAD(&layer->enq_list);
37913diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
37914index 3d13a63..da31bf1 100644
37915--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
37916+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
37917@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
37918 {
37919 struct mxr_layer *layer;
37920 int ret;
37921- struct mxr_layer_ops ops = {
37922+ static struct mxr_layer_ops ops = {
37923 .release = mxr_vp_layer_release,
37924 .buffer_set = mxr_vp_buffer_set,
37925 .stream_set = mxr_vp_stream_set,
37926diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
37927index 643d80a..56bb96b 100644
37928--- a/drivers/media/radio/radio-cadet.c
37929+++ b/drivers/media/radio/radio-cadet.c
37930@@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
37931 unsigned char readbuf[RDS_BUFFER];
37932 int i = 0;
37933
37934+ if (count > RDS_BUFFER)
37935+ return -EFAULT;
37936 mutex_lock(&dev->lock);
37937 if (dev->rdsstat == 0)
37938 cadet_start_rds(dev);
37939@@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
37940 while (i < count && dev->rdsin != dev->rdsout)
37941 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
37942
37943- if (i && copy_to_user(data, readbuf, i))
37944+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
37945 i = -EFAULT;
37946 unlock:
37947 mutex_unlock(&dev->lock);
37948diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
37949index 3940bb0..fb3952a 100644
37950--- a/drivers/media/usb/dvb-usb/cxusb.c
37951+++ b/drivers/media/usb/dvb-usb/cxusb.c
37952@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
37953
37954 struct dib0700_adapter_state {
37955 int (*set_param_save) (struct dvb_frontend *);
37956-};
37957+} __no_const;
37958
37959 static int dib7070_set_param_override(struct dvb_frontend *fe)
37960 {
37961diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
37962index 9382895..ac8093c 100644
37963--- a/drivers/media/usb/dvb-usb/dw2102.c
37964+++ b/drivers/media/usb/dvb-usb/dw2102.c
37965@@ -95,7 +95,7 @@ struct su3000_state {
37966
37967 struct s6x0_state {
37968 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
37969-};
37970+} __no_const;
37971
37972 /* debug */
37973 static int dvb_usb_dw2102_debug;
37974diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
37975index aa6e7c7..4cd8061 100644
37976--- a/drivers/media/v4l2-core/v4l2-ioctl.c
37977+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
37978@@ -1923,7 +1923,8 @@ struct v4l2_ioctl_info {
37979 struct file *file, void *fh, void *p);
37980 } u;
37981 void (*debug)(const void *arg, bool write_only);
37982-};
37983+} __do_const;
37984+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
37985
37986 /* This control needs a priority check */
37987 #define INFO_FL_PRIO (1 << 0)
37988@@ -2108,7 +2109,7 @@ static long __video_do_ioctl(struct file *file,
37989 struct video_device *vfd = video_devdata(file);
37990 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
37991 bool write_only = false;
37992- struct v4l2_ioctl_info default_info;
37993+ v4l2_ioctl_info_no_const default_info;
37994 const struct v4l2_ioctl_info *info;
37995 void *fh = file->private_data;
37996 struct v4l2_fh *vfh = NULL;
37997diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
37998index 29b2172..a7c5b31 100644
37999--- a/drivers/memstick/host/r592.c
38000+++ b/drivers/memstick/host/r592.c
38001@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
38002 /* Executes one TPC (data is read/written from small or large fifo) */
38003 static void r592_execute_tpc(struct r592_device *dev)
38004 {
38005- bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
38006+ bool is_write;
38007 int len, error;
38008 u32 status, reg;
38009
38010@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
38011 return;
38012 }
38013
38014+ is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
38015 len = dev->req->long_data ?
38016 dev->req->sg.length : dev->req->data_len;
38017
38018diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
38019index fb69baa..cf7ad22 100644
38020--- a/drivers/message/fusion/mptbase.c
38021+++ b/drivers/message/fusion/mptbase.c
38022@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
38023 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
38024 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
38025
38026+#ifdef CONFIG_GRKERNSEC_HIDESYM
38027+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
38028+#else
38029 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38030 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
38031+#endif
38032+
38033 /*
38034 * Rounding UP to nearest 4-kB boundary here...
38035 */
38036diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
38037index fa43c39..daeb158 100644
38038--- a/drivers/message/fusion/mptsas.c
38039+++ b/drivers/message/fusion/mptsas.c
38040@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
38041 return 0;
38042 }
38043
38044+static inline void
38045+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38046+{
38047+ if (phy_info->port_details) {
38048+ phy_info->port_details->rphy = rphy;
38049+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38050+ ioc->name, rphy));
38051+ }
38052+
38053+ if (rphy) {
38054+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38055+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38056+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38057+ ioc->name, rphy, rphy->dev.release));
38058+ }
38059+}
38060+
38061 /* no mutex */
38062 static void
38063 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
38064@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
38065 return NULL;
38066 }
38067
38068-static inline void
38069-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38070-{
38071- if (phy_info->port_details) {
38072- phy_info->port_details->rphy = rphy;
38073- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38074- ioc->name, rphy));
38075- }
38076-
38077- if (rphy) {
38078- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38079- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38080- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38081- ioc->name, rphy, rphy->dev.release));
38082- }
38083-}
38084-
38085 static inline struct sas_port *
38086 mptsas_get_port(struct mptsas_phyinfo *phy_info)
38087 {
38088diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
38089index 164afa7..b6b2e74 100644
38090--- a/drivers/message/fusion/mptscsih.c
38091+++ b/drivers/message/fusion/mptscsih.c
38092@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
38093
38094 h = shost_priv(SChost);
38095
38096- if (h) {
38097- if (h->info_kbuf == NULL)
38098- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38099- return h->info_kbuf;
38100- h->info_kbuf[0] = '\0';
38101+ if (!h)
38102+ return NULL;
38103
38104- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38105- h->info_kbuf[size-1] = '\0';
38106- }
38107+ if (h->info_kbuf == NULL)
38108+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38109+ return h->info_kbuf;
38110+ h->info_kbuf[0] = '\0';
38111+
38112+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38113+ h->info_kbuf[size-1] = '\0';
38114
38115 return h->info_kbuf;
38116 }
38117diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
38118index 8001aa6..b137580 100644
38119--- a/drivers/message/i2o/i2o_proc.c
38120+++ b/drivers/message/i2o/i2o_proc.c
38121@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
38122 "Array Controller Device"
38123 };
38124
38125-static char *chtostr(char *tmp, u8 *chars, int n)
38126-{
38127- tmp[0] = 0;
38128- return strncat(tmp, (char *)chars, n);
38129-}
38130-
38131 static int i2o_report_query_status(struct seq_file *seq, int block_status,
38132 char *group)
38133 {
38134@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38135 } *result;
38136
38137 i2o_exec_execute_ddm_table ddm_table;
38138- char tmp[28 + 1];
38139
38140 result = kmalloc(sizeof(*result), GFP_KERNEL);
38141 if (!result)
38142@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38143
38144 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
38145 seq_printf(seq, "%-#8x", ddm_table.module_id);
38146- seq_printf(seq, "%-29s",
38147- chtostr(tmp, ddm_table.module_name_version, 28));
38148+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
38149 seq_printf(seq, "%9d ", ddm_table.data_size);
38150 seq_printf(seq, "%8d", ddm_table.code_size);
38151
38152@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38153
38154 i2o_driver_result_table *result;
38155 i2o_driver_store_table *dst;
38156- char tmp[28 + 1];
38157
38158 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
38159 if (result == NULL)
38160@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38161
38162 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
38163 seq_printf(seq, "%-#8x", dst->module_id);
38164- seq_printf(seq, "%-29s",
38165- chtostr(tmp, dst->module_name_version, 28));
38166- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
38167+ seq_printf(seq, "%-.28s", dst->module_name_version);
38168+ seq_printf(seq, "%-.8s", dst->date);
38169 seq_printf(seq, "%8d ", dst->module_size);
38170 seq_printf(seq, "%8d ", dst->mpb_size);
38171 seq_printf(seq, "0x%04x", dst->module_flags);
38172@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38173 // == (allow) 512d bytes (max)
38174 static u16 *work16 = (u16 *) work32;
38175 int token;
38176- char tmp[16 + 1];
38177
38178 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
38179
38180@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38181 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
38182 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
38183 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
38184- seq_printf(seq, "Vendor info : %s\n",
38185- chtostr(tmp, (u8 *) (work32 + 2), 16));
38186- seq_printf(seq, "Product info : %s\n",
38187- chtostr(tmp, (u8 *) (work32 + 6), 16));
38188- seq_printf(seq, "Description : %s\n",
38189- chtostr(tmp, (u8 *) (work32 + 10), 16));
38190- seq_printf(seq, "Product rev. : %s\n",
38191- chtostr(tmp, (u8 *) (work32 + 14), 8));
38192+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
38193+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
38194+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
38195+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
38196
38197 seq_printf(seq, "Serial number : ");
38198 print_serial_number(seq, (u8 *) (work32 + 16),
38199@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38200 u8 pad[256]; // allow up to 256 byte (max) serial number
38201 } result;
38202
38203- char tmp[24 + 1];
38204-
38205 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
38206
38207 if (token < 0) {
38208@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38209 }
38210
38211 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
38212- seq_printf(seq, "Module name : %s\n",
38213- chtostr(tmp, result.module_name, 24));
38214- seq_printf(seq, "Module revision : %s\n",
38215- chtostr(tmp, result.module_rev, 8));
38216+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
38217+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
38218
38219 seq_printf(seq, "Serial number : ");
38220 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
38221@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38222 u8 instance_number[4];
38223 } result;
38224
38225- char tmp[64 + 1];
38226-
38227 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
38228
38229 if (token < 0) {
38230@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38231 return 0;
38232 }
38233
38234- seq_printf(seq, "Device name : %s\n",
38235- chtostr(tmp, result.device_name, 64));
38236- seq_printf(seq, "Service name : %s\n",
38237- chtostr(tmp, result.service_name, 64));
38238- seq_printf(seq, "Physical name : %s\n",
38239- chtostr(tmp, result.physical_location, 64));
38240- seq_printf(seq, "Instance number : %s\n",
38241- chtostr(tmp, result.instance_number, 4));
38242+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
38243+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
38244+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
38245+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
38246
38247 return 0;
38248 }
38249diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
38250index a8c08f3..155fe3d 100644
38251--- a/drivers/message/i2o/iop.c
38252+++ b/drivers/message/i2o/iop.c
38253@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
38254
38255 spin_lock_irqsave(&c->context_list_lock, flags);
38256
38257- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
38258- atomic_inc(&c->context_list_counter);
38259+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
38260+ atomic_inc_unchecked(&c->context_list_counter);
38261
38262- entry->context = atomic_read(&c->context_list_counter);
38263+ entry->context = atomic_read_unchecked(&c->context_list_counter);
38264
38265 list_add(&entry->list, &c->context_list);
38266
38267@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
38268
38269 #if BITS_PER_LONG == 64
38270 spin_lock_init(&c->context_list_lock);
38271- atomic_set(&c->context_list_counter, 0);
38272+ atomic_set_unchecked(&c->context_list_counter, 0);
38273 INIT_LIST_HEAD(&c->context_list);
38274 #endif
38275
38276diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
38277index 45ece11..8efa218 100644
38278--- a/drivers/mfd/janz-cmodio.c
38279+++ b/drivers/mfd/janz-cmodio.c
38280@@ -13,6 +13,7 @@
38281
38282 #include <linux/kernel.h>
38283 #include <linux/module.h>
38284+#include <linux/slab.h>
38285 #include <linux/init.h>
38286 #include <linux/pci.h>
38287 #include <linux/interrupt.h>
38288diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
38289index a5f9888..1c0ed56 100644
38290--- a/drivers/mfd/twl4030-irq.c
38291+++ b/drivers/mfd/twl4030-irq.c
38292@@ -35,6 +35,7 @@
38293 #include <linux/of.h>
38294 #include <linux/irqdomain.h>
38295 #include <linux/i2c/twl.h>
38296+#include <asm/pgtable.h>
38297
38298 #include "twl-core.h"
38299
38300@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
38301 * Install an irq handler for each of the SIH modules;
38302 * clone dummy irq_chip since PIH can't *do* anything
38303 */
38304- twl4030_irq_chip = dummy_irq_chip;
38305- twl4030_irq_chip.name = "twl4030";
38306+ pax_open_kernel();
38307+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
38308+ *(const char **)&twl4030_irq_chip.name = "twl4030";
38309
38310- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
38311+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
38312+ pax_close_kernel();
38313
38314 for (i = irq_base; i < irq_end; i++) {
38315 irq_set_chip_and_handler(i, &twl4030_irq_chip,
38316diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
38317index 277a8db..0e0b754 100644
38318--- a/drivers/mfd/twl6030-irq.c
38319+++ b/drivers/mfd/twl6030-irq.c
38320@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
38321 * install an irq handler for each of the modules;
38322 * clone dummy irq_chip since PIH can't *do* anything
38323 */
38324- twl6030_irq_chip = dummy_irq_chip;
38325- twl6030_irq_chip.name = "twl6030";
38326- twl6030_irq_chip.irq_set_type = NULL;
38327- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
38328+ pax_open_kernel();
38329+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
38330+ *(const char **)&twl6030_irq_chip.name = "twl6030";
38331+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
38332+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
38333+ pax_close_kernel();
38334
38335 for (i = irq_base; i < irq_end; i++) {
38336 irq_set_chip_and_handler(i, &twl6030_irq_chip,
38337diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
38338index f428d86..274c368 100644
38339--- a/drivers/misc/c2port/core.c
38340+++ b/drivers/misc/c2port/core.c
38341@@ -924,7 +924,9 @@ struct c2port_device *c2port_device_register(char *name,
38342 mutex_init(&c2dev->mutex);
38343
38344 /* Create binary file */
38345- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
38346+ pax_open_kernel();
38347+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
38348+ pax_close_kernel();
38349 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
38350 if (unlikely(ret))
38351 goto error_device_create_bin_file;
38352diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
38353index 3aa9a96..59cf685 100644
38354--- a/drivers/misc/kgdbts.c
38355+++ b/drivers/misc/kgdbts.c
38356@@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
38357 char before[BREAK_INSTR_SIZE];
38358 char after[BREAK_INSTR_SIZE];
38359
38360- probe_kernel_read(before, (char *)kgdbts_break_test,
38361+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
38362 BREAK_INSTR_SIZE);
38363 init_simple_test();
38364 ts.tst = plant_and_detach_test;
38365@@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
38366 /* Activate test with initial breakpoint */
38367 if (!is_early)
38368 kgdb_breakpoint();
38369- probe_kernel_read(after, (char *)kgdbts_break_test,
38370+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
38371 BREAK_INSTR_SIZE);
38372 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
38373 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
38374diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
38375index 4a87e5c..76bdf5c 100644
38376--- a/drivers/misc/lis3lv02d/lis3lv02d.c
38377+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
38378@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
38379 * the lid is closed. This leads to interrupts as soon as a little move
38380 * is done.
38381 */
38382- atomic_inc(&lis3->count);
38383+ atomic_inc_unchecked(&lis3->count);
38384
38385 wake_up_interruptible(&lis3->misc_wait);
38386 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
38387@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
38388 if (lis3->pm_dev)
38389 pm_runtime_get_sync(lis3->pm_dev);
38390
38391- atomic_set(&lis3->count, 0);
38392+ atomic_set_unchecked(&lis3->count, 0);
38393 return 0;
38394 }
38395
38396@@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
38397 add_wait_queue(&lis3->misc_wait, &wait);
38398 while (true) {
38399 set_current_state(TASK_INTERRUPTIBLE);
38400- data = atomic_xchg(&lis3->count, 0);
38401+ data = atomic_xchg_unchecked(&lis3->count, 0);
38402 if (data)
38403 break;
38404
38405@@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
38406 struct lis3lv02d, miscdev);
38407
38408 poll_wait(file, &lis3->misc_wait, wait);
38409- if (atomic_read(&lis3->count))
38410+ if (atomic_read_unchecked(&lis3->count))
38411 return POLLIN | POLLRDNORM;
38412 return 0;
38413 }
38414diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
38415index c439c82..1f20f57 100644
38416--- a/drivers/misc/lis3lv02d/lis3lv02d.h
38417+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
38418@@ -297,7 +297,7 @@ struct lis3lv02d {
38419 struct input_polled_dev *idev; /* input device */
38420 struct platform_device *pdev; /* platform device */
38421 struct regulator_bulk_data regulators[2];
38422- atomic_t count; /* interrupt count after last read */
38423+ atomic_unchecked_t count; /* interrupt count after last read */
38424 union axis_conversion ac; /* hw -> logical axis */
38425 int mapped_btns[3];
38426
38427diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
38428index 2f30bad..c4c13d0 100644
38429--- a/drivers/misc/sgi-gru/gruhandles.c
38430+++ b/drivers/misc/sgi-gru/gruhandles.c
38431@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
38432 unsigned long nsec;
38433
38434 nsec = CLKS2NSEC(clks);
38435- atomic_long_inc(&mcs_op_statistics[op].count);
38436- atomic_long_add(nsec, &mcs_op_statistics[op].total);
38437+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
38438+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
38439 if (mcs_op_statistics[op].max < nsec)
38440 mcs_op_statistics[op].max = nsec;
38441 }
38442diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
38443index 950dbe9..eeef0f8 100644
38444--- a/drivers/misc/sgi-gru/gruprocfs.c
38445+++ b/drivers/misc/sgi-gru/gruprocfs.c
38446@@ -32,9 +32,9 @@
38447
38448 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
38449
38450-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
38451+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
38452 {
38453- unsigned long val = atomic_long_read(v);
38454+ unsigned long val = atomic_long_read_unchecked(v);
38455
38456 seq_printf(s, "%16lu %s\n", val, id);
38457 }
38458@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
38459
38460 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
38461 for (op = 0; op < mcsop_last; op++) {
38462- count = atomic_long_read(&mcs_op_statistics[op].count);
38463- total = atomic_long_read(&mcs_op_statistics[op].total);
38464+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
38465+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
38466 max = mcs_op_statistics[op].max;
38467 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
38468 count ? total / count : 0, max);
38469diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
38470index 5c3ce24..4915ccb 100644
38471--- a/drivers/misc/sgi-gru/grutables.h
38472+++ b/drivers/misc/sgi-gru/grutables.h
38473@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
38474 * GRU statistics.
38475 */
38476 struct gru_stats_s {
38477- atomic_long_t vdata_alloc;
38478- atomic_long_t vdata_free;
38479- atomic_long_t gts_alloc;
38480- atomic_long_t gts_free;
38481- atomic_long_t gms_alloc;
38482- atomic_long_t gms_free;
38483- atomic_long_t gts_double_allocate;
38484- atomic_long_t assign_context;
38485- atomic_long_t assign_context_failed;
38486- atomic_long_t free_context;
38487- atomic_long_t load_user_context;
38488- atomic_long_t load_kernel_context;
38489- atomic_long_t lock_kernel_context;
38490- atomic_long_t unlock_kernel_context;
38491- atomic_long_t steal_user_context;
38492- atomic_long_t steal_kernel_context;
38493- atomic_long_t steal_context_failed;
38494- atomic_long_t nopfn;
38495- atomic_long_t asid_new;
38496- atomic_long_t asid_next;
38497- atomic_long_t asid_wrap;
38498- atomic_long_t asid_reuse;
38499- atomic_long_t intr;
38500- atomic_long_t intr_cbr;
38501- atomic_long_t intr_tfh;
38502- atomic_long_t intr_spurious;
38503- atomic_long_t intr_mm_lock_failed;
38504- atomic_long_t call_os;
38505- atomic_long_t call_os_wait_queue;
38506- atomic_long_t user_flush_tlb;
38507- atomic_long_t user_unload_context;
38508- atomic_long_t user_exception;
38509- atomic_long_t set_context_option;
38510- atomic_long_t check_context_retarget_intr;
38511- atomic_long_t check_context_unload;
38512- atomic_long_t tlb_dropin;
38513- atomic_long_t tlb_preload_page;
38514- atomic_long_t tlb_dropin_fail_no_asid;
38515- atomic_long_t tlb_dropin_fail_upm;
38516- atomic_long_t tlb_dropin_fail_invalid;
38517- atomic_long_t tlb_dropin_fail_range_active;
38518- atomic_long_t tlb_dropin_fail_idle;
38519- atomic_long_t tlb_dropin_fail_fmm;
38520- atomic_long_t tlb_dropin_fail_no_exception;
38521- atomic_long_t tfh_stale_on_fault;
38522- atomic_long_t mmu_invalidate_range;
38523- atomic_long_t mmu_invalidate_page;
38524- atomic_long_t flush_tlb;
38525- atomic_long_t flush_tlb_gru;
38526- atomic_long_t flush_tlb_gru_tgh;
38527- atomic_long_t flush_tlb_gru_zero_asid;
38528+ atomic_long_unchecked_t vdata_alloc;
38529+ atomic_long_unchecked_t vdata_free;
38530+ atomic_long_unchecked_t gts_alloc;
38531+ atomic_long_unchecked_t gts_free;
38532+ atomic_long_unchecked_t gms_alloc;
38533+ atomic_long_unchecked_t gms_free;
38534+ atomic_long_unchecked_t gts_double_allocate;
38535+ atomic_long_unchecked_t assign_context;
38536+ atomic_long_unchecked_t assign_context_failed;
38537+ atomic_long_unchecked_t free_context;
38538+ atomic_long_unchecked_t load_user_context;
38539+ atomic_long_unchecked_t load_kernel_context;
38540+ atomic_long_unchecked_t lock_kernel_context;
38541+ atomic_long_unchecked_t unlock_kernel_context;
38542+ atomic_long_unchecked_t steal_user_context;
38543+ atomic_long_unchecked_t steal_kernel_context;
38544+ atomic_long_unchecked_t steal_context_failed;
38545+ atomic_long_unchecked_t nopfn;
38546+ atomic_long_unchecked_t asid_new;
38547+ atomic_long_unchecked_t asid_next;
38548+ atomic_long_unchecked_t asid_wrap;
38549+ atomic_long_unchecked_t asid_reuse;
38550+ atomic_long_unchecked_t intr;
38551+ atomic_long_unchecked_t intr_cbr;
38552+ atomic_long_unchecked_t intr_tfh;
38553+ atomic_long_unchecked_t intr_spurious;
38554+ atomic_long_unchecked_t intr_mm_lock_failed;
38555+ atomic_long_unchecked_t call_os;
38556+ atomic_long_unchecked_t call_os_wait_queue;
38557+ atomic_long_unchecked_t user_flush_tlb;
38558+ atomic_long_unchecked_t user_unload_context;
38559+ atomic_long_unchecked_t user_exception;
38560+ atomic_long_unchecked_t set_context_option;
38561+ atomic_long_unchecked_t check_context_retarget_intr;
38562+ atomic_long_unchecked_t check_context_unload;
38563+ atomic_long_unchecked_t tlb_dropin;
38564+ atomic_long_unchecked_t tlb_preload_page;
38565+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
38566+ atomic_long_unchecked_t tlb_dropin_fail_upm;
38567+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
38568+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
38569+ atomic_long_unchecked_t tlb_dropin_fail_idle;
38570+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
38571+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
38572+ atomic_long_unchecked_t tfh_stale_on_fault;
38573+ atomic_long_unchecked_t mmu_invalidate_range;
38574+ atomic_long_unchecked_t mmu_invalidate_page;
38575+ atomic_long_unchecked_t flush_tlb;
38576+ atomic_long_unchecked_t flush_tlb_gru;
38577+ atomic_long_unchecked_t flush_tlb_gru_tgh;
38578+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
38579
38580- atomic_long_t copy_gpa;
38581- atomic_long_t read_gpa;
38582+ atomic_long_unchecked_t copy_gpa;
38583+ atomic_long_unchecked_t read_gpa;
38584
38585- atomic_long_t mesq_receive;
38586- atomic_long_t mesq_receive_none;
38587- atomic_long_t mesq_send;
38588- atomic_long_t mesq_send_failed;
38589- atomic_long_t mesq_noop;
38590- atomic_long_t mesq_send_unexpected_error;
38591- atomic_long_t mesq_send_lb_overflow;
38592- atomic_long_t mesq_send_qlimit_reached;
38593- atomic_long_t mesq_send_amo_nacked;
38594- atomic_long_t mesq_send_put_nacked;
38595- atomic_long_t mesq_page_overflow;
38596- atomic_long_t mesq_qf_locked;
38597- atomic_long_t mesq_qf_noop_not_full;
38598- atomic_long_t mesq_qf_switch_head_failed;
38599- atomic_long_t mesq_qf_unexpected_error;
38600- atomic_long_t mesq_noop_unexpected_error;
38601- atomic_long_t mesq_noop_lb_overflow;
38602- atomic_long_t mesq_noop_qlimit_reached;
38603- atomic_long_t mesq_noop_amo_nacked;
38604- atomic_long_t mesq_noop_put_nacked;
38605- atomic_long_t mesq_noop_page_overflow;
38606+ atomic_long_unchecked_t mesq_receive;
38607+ atomic_long_unchecked_t mesq_receive_none;
38608+ atomic_long_unchecked_t mesq_send;
38609+ atomic_long_unchecked_t mesq_send_failed;
38610+ atomic_long_unchecked_t mesq_noop;
38611+ atomic_long_unchecked_t mesq_send_unexpected_error;
38612+ atomic_long_unchecked_t mesq_send_lb_overflow;
38613+ atomic_long_unchecked_t mesq_send_qlimit_reached;
38614+ atomic_long_unchecked_t mesq_send_amo_nacked;
38615+ atomic_long_unchecked_t mesq_send_put_nacked;
38616+ atomic_long_unchecked_t mesq_page_overflow;
38617+ atomic_long_unchecked_t mesq_qf_locked;
38618+ atomic_long_unchecked_t mesq_qf_noop_not_full;
38619+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
38620+ atomic_long_unchecked_t mesq_qf_unexpected_error;
38621+ atomic_long_unchecked_t mesq_noop_unexpected_error;
38622+ atomic_long_unchecked_t mesq_noop_lb_overflow;
38623+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
38624+ atomic_long_unchecked_t mesq_noop_amo_nacked;
38625+ atomic_long_unchecked_t mesq_noop_put_nacked;
38626+ atomic_long_unchecked_t mesq_noop_page_overflow;
38627
38628 };
38629
38630@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
38631 tghop_invalidate, mcsop_last};
38632
38633 struct mcs_op_statistic {
38634- atomic_long_t count;
38635- atomic_long_t total;
38636+ atomic_long_unchecked_t count;
38637+ atomic_long_unchecked_t total;
38638 unsigned long max;
38639 };
38640
38641@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
38642
38643 #define STAT(id) do { \
38644 if (gru_options & OPT_STATS) \
38645- atomic_long_inc(&gru_stats.id); \
38646+ atomic_long_inc_unchecked(&gru_stats.id); \
38647 } while (0)
38648
38649 #ifdef CONFIG_SGI_GRU_DEBUG
38650diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
38651index c862cd4..0d176fe 100644
38652--- a/drivers/misc/sgi-xp/xp.h
38653+++ b/drivers/misc/sgi-xp/xp.h
38654@@ -288,7 +288,7 @@ struct xpc_interface {
38655 xpc_notify_func, void *);
38656 void (*received) (short, int, void *);
38657 enum xp_retval (*partid_to_nasids) (short, void *);
38658-};
38659+} __no_const;
38660
38661 extern struct xpc_interface xpc_interface;
38662
38663diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
38664index b94d5f7..7f494c5 100644
38665--- a/drivers/misc/sgi-xp/xpc.h
38666+++ b/drivers/misc/sgi-xp/xpc.h
38667@@ -835,6 +835,7 @@ struct xpc_arch_operations {
38668 void (*received_payload) (struct xpc_channel *, void *);
38669 void (*notify_senders_of_disconnect) (struct xpc_channel *);
38670 };
38671+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
38672
38673 /* struct xpc_partition act_state values (for XPC HB) */
38674
38675@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
38676 /* found in xpc_main.c */
38677 extern struct device *xpc_part;
38678 extern struct device *xpc_chan;
38679-extern struct xpc_arch_operations xpc_arch_ops;
38680+extern xpc_arch_operations_no_const xpc_arch_ops;
38681 extern int xpc_disengage_timelimit;
38682 extern int xpc_disengage_timedout;
38683 extern int xpc_activate_IRQ_rcvd;
38684diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
38685index d971817..33bdca5 100644
38686--- a/drivers/misc/sgi-xp/xpc_main.c
38687+++ b/drivers/misc/sgi-xp/xpc_main.c
38688@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
38689 .notifier_call = xpc_system_die,
38690 };
38691
38692-struct xpc_arch_operations xpc_arch_ops;
38693+xpc_arch_operations_no_const xpc_arch_ops;
38694
38695 /*
38696 * Timer function to enforce the timelimit on the partition disengage.
38697@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
38698
38699 if (((die_args->trapnr == X86_TRAP_MF) ||
38700 (die_args->trapnr == X86_TRAP_XF)) &&
38701- !user_mode_vm(die_args->regs))
38702+ !user_mode(die_args->regs))
38703 xpc_die_deactivate();
38704
38705 break;
38706diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
38707index 6d8f701..35b6369 100644
38708--- a/drivers/mmc/core/mmc_ops.c
38709+++ b/drivers/mmc/core/mmc_ops.c
38710@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
38711 void *data_buf;
38712 int is_on_stack;
38713
38714- is_on_stack = object_is_on_stack(buf);
38715+ is_on_stack = object_starts_on_stack(buf);
38716 if (is_on_stack) {
38717 /*
38718 * dma onto stack is unsafe/nonportable, but callers to this
38719diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
38720index 53b8fd9..615b462 100644
38721--- a/drivers/mmc/host/dw_mmc.h
38722+++ b/drivers/mmc/host/dw_mmc.h
38723@@ -205,5 +205,5 @@ struct dw_mci_drv_data {
38724 int (*parse_dt)(struct dw_mci *host);
38725 int (*setup_bus)(struct dw_mci *host,
38726 struct device_node *slot_np, u8 bus_width);
38727-};
38728+} __do_const;
38729 #endif /* _DW_MMC_H_ */
38730diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
38731index 82a8de1..3c56ccb 100644
38732--- a/drivers/mmc/host/sdhci-s3c.c
38733+++ b/drivers/mmc/host/sdhci-s3c.c
38734@@ -721,9 +721,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
38735 * we can use overriding functions instead of default.
38736 */
38737 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
38738- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
38739- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
38740- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
38741+ pax_open_kernel();
38742+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
38743+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
38744+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
38745+ pax_close_kernel();
38746 }
38747
38748 /* It supports additional host capabilities if needed */
38749diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
38750index a4eb8b5..8c0628f 100644
38751--- a/drivers/mtd/devices/doc2000.c
38752+++ b/drivers/mtd/devices/doc2000.c
38753@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
38754
38755 /* The ECC will not be calculated correctly if less than 512 is written */
38756 /* DBB-
38757- if (len != 0x200 && eccbuf)
38758+ if (len != 0x200)
38759 printk(KERN_WARNING
38760 "ECC needs a full sector write (adr: %lx size %lx)\n",
38761 (long) to, (long) len);
38762diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
38763index 0c8bb6b..6f35deb 100644
38764--- a/drivers/mtd/nand/denali.c
38765+++ b/drivers/mtd/nand/denali.c
38766@@ -24,6 +24,7 @@
38767 #include <linux/slab.h>
38768 #include <linux/mtd/mtd.h>
38769 #include <linux/module.h>
38770+#include <linux/slab.h>
38771
38772 #include "denali.h"
38773
38774diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
38775index 51b9d6a..52af9a7 100644
38776--- a/drivers/mtd/nftlmount.c
38777+++ b/drivers/mtd/nftlmount.c
38778@@ -24,6 +24,7 @@
38779 #include <asm/errno.h>
38780 #include <linux/delay.h>
38781 #include <linux/slab.h>
38782+#include <linux/sched.h>
38783 #include <linux/mtd/mtd.h>
38784 #include <linux/mtd/nand.h>
38785 #include <linux/mtd/nftl.h>
38786diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
38787index 8dd6ba5..419cc1d 100644
38788--- a/drivers/mtd/sm_ftl.c
38789+++ b/drivers/mtd/sm_ftl.c
38790@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
38791 #define SM_CIS_VENDOR_OFFSET 0x59
38792 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
38793 {
38794- struct attribute_group *attr_group;
38795+ attribute_group_no_const *attr_group;
38796 struct attribute **attributes;
38797 struct sm_sysfs_attribute *vendor_attribute;
38798
38799diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
38800index b7d45f3..b5c89d9 100644
38801--- a/drivers/net/bonding/bond_main.c
38802+++ b/drivers/net/bonding/bond_main.c
38803@@ -4861,7 +4861,7 @@ static unsigned int bond_get_num_tx_queues(void)
38804 return tx_queues;
38805 }
38806
38807-static struct rtnl_link_ops bond_link_ops __read_mostly = {
38808+static struct rtnl_link_ops bond_link_ops = {
38809 .kind = "bond",
38810 .priv_size = sizeof(struct bonding),
38811 .setup = bond_setup,
38812diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
38813index 70dba5d..11a0919 100644
38814--- a/drivers/net/ethernet/8390/ax88796.c
38815+++ b/drivers/net/ethernet/8390/ax88796.c
38816@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
38817 if (ax->plat->reg_offsets)
38818 ei_local->reg_offset = ax->plat->reg_offsets;
38819 else {
38820+ resource_size_t _mem_size = mem_size;
38821+ do_div(_mem_size, 0x18);
38822 ei_local->reg_offset = ax->reg_offsets;
38823 for (ret = 0; ret < 0x18; ret++)
38824- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
38825+ ax->reg_offsets[ret] = _mem_size * ret;
38826 }
38827
38828 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
38829diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
38830index 0991534..8098e92 100644
38831--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
38832+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
38833@@ -1094,7 +1094,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
38834 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
38835 {
38836 /* RX_MODE controlling object */
38837- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
38838+ bnx2x_init_rx_mode_obj(bp);
38839
38840 /* multicast configuration controlling object */
38841 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
38842diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
38843index 09b625e..15b16fe 100644
38844--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
38845+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
38846@@ -2375,15 +2375,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
38847 return rc;
38848 }
38849
38850-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
38851- struct bnx2x_rx_mode_obj *o)
38852+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
38853 {
38854 if (CHIP_IS_E1x(bp)) {
38855- o->wait_comp = bnx2x_empty_rx_mode_wait;
38856- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
38857+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
38858+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
38859 } else {
38860- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
38861- o->config_rx_mode = bnx2x_set_rx_mode_e2;
38862+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
38863+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
38864 }
38865 }
38866
38867diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
38868index adbd91b..58ec94a 100644
38869--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
38870+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
38871@@ -1293,8 +1293,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
38872
38873 /********************* RX MODE ****************/
38874
38875-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
38876- struct bnx2x_rx_mode_obj *o);
38877+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
38878
38879 /**
38880 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
38881diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
38882index d330e81..ce1fb9a 100644
38883--- a/drivers/net/ethernet/broadcom/tg3.h
38884+++ b/drivers/net/ethernet/broadcom/tg3.h
38885@@ -146,6 +146,7 @@
38886 #define CHIPREV_ID_5750_A0 0x4000
38887 #define CHIPREV_ID_5750_A1 0x4001
38888 #define CHIPREV_ID_5750_A3 0x4003
38889+#define CHIPREV_ID_5750_C1 0x4201
38890 #define CHIPREV_ID_5750_C2 0x4202
38891 #define CHIPREV_ID_5752_A0_HW 0x5000
38892 #define CHIPREV_ID_5752_A0 0x6000
38893diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
38894index 8cffcdf..aadf043 100644
38895--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
38896+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
38897@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
38898 */
38899 struct l2t_skb_cb {
38900 arp_failure_handler_func arp_failure_handler;
38901-};
38902+} __no_const;
38903
38904 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
38905
38906diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
38907index 4c83003..2a2a5b9 100644
38908--- a/drivers/net/ethernet/dec/tulip/de4x5.c
38909+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
38910@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38911 for (i=0; i<ETH_ALEN; i++) {
38912 tmp.addr[i] = dev->dev_addr[i];
38913 }
38914- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
38915+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
38916 break;
38917
38918 case DE4X5_SET_HWADDR: /* Set the hardware address */
38919@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38920 spin_lock_irqsave(&lp->lock, flags);
38921 memcpy(&statbuf, &lp->pktStats, ioc->len);
38922 spin_unlock_irqrestore(&lp->lock, flags);
38923- if (copy_to_user(ioc->data, &statbuf, ioc->len))
38924+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
38925 return -EFAULT;
38926 break;
38927 }
38928diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
38929index 4d6f3c5..6169e60 100644
38930--- a/drivers/net/ethernet/emulex/benet/be_main.c
38931+++ b/drivers/net/ethernet/emulex/benet/be_main.c
38932@@ -455,7 +455,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
38933
38934 if (wrapped)
38935 newacc += 65536;
38936- ACCESS_ONCE(*acc) = newacc;
38937+ ACCESS_ONCE_RW(*acc) = newacc;
38938 }
38939
38940 void be_parse_stats(struct be_adapter *adapter)
38941diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
38942index 74d749e..eefb1bd 100644
38943--- a/drivers/net/ethernet/faraday/ftgmac100.c
38944+++ b/drivers/net/ethernet/faraday/ftgmac100.c
38945@@ -31,6 +31,8 @@
38946 #include <linux/netdevice.h>
38947 #include <linux/phy.h>
38948 #include <linux/platform_device.h>
38949+#include <linux/interrupt.h>
38950+#include <linux/irqreturn.h>
38951 #include <net/ip.h>
38952
38953 #include "ftgmac100.h"
38954diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
38955index b901a01..1ff32ee 100644
38956--- a/drivers/net/ethernet/faraday/ftmac100.c
38957+++ b/drivers/net/ethernet/faraday/ftmac100.c
38958@@ -31,6 +31,8 @@
38959 #include <linux/module.h>
38960 #include <linux/netdevice.h>
38961 #include <linux/platform_device.h>
38962+#include <linux/interrupt.h>
38963+#include <linux/irqreturn.h>
38964
38965 #include "ftmac100.h"
38966
38967diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
38968index bb9256a..56d8752 100644
38969--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
38970+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
38971@@ -806,7 +806,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
38972 }
38973
38974 /* update the base incval used to calculate frequency adjustment */
38975- ACCESS_ONCE(adapter->base_incval) = incval;
38976+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
38977 smp_mb();
38978
38979 /* need lock to prevent incorrect read while modifying cyclecounter */
38980diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
38981index fbe5363..266b4e3 100644
38982--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
38983+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
38984@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
38985 struct __vxge_hw_fifo *fifo;
38986 struct vxge_hw_fifo_config *config;
38987 u32 txdl_size, txdl_per_memblock;
38988- struct vxge_hw_mempool_cbs fifo_mp_callback;
38989+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
38990+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
38991+ };
38992+
38993 struct __vxge_hw_virtualpath *vpath;
38994
38995 if ((vp == NULL) || (attr == NULL)) {
38996@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
38997 goto exit;
38998 }
38999
39000- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
39001-
39002 fifo->mempool =
39003 __vxge_hw_mempool_create(vpath->hldev,
39004 fifo->config->memblock_size,
39005diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
39006index 998974f..ecd26db 100644
39007--- a/drivers/net/ethernet/realtek/r8169.c
39008+++ b/drivers/net/ethernet/realtek/r8169.c
39009@@ -741,22 +741,22 @@ struct rtl8169_private {
39010 struct mdio_ops {
39011 void (*write)(struct rtl8169_private *, int, int);
39012 int (*read)(struct rtl8169_private *, int);
39013- } mdio_ops;
39014+ } __no_const mdio_ops;
39015
39016 struct pll_power_ops {
39017 void (*down)(struct rtl8169_private *);
39018 void (*up)(struct rtl8169_private *);
39019- } pll_power_ops;
39020+ } __no_const pll_power_ops;
39021
39022 struct jumbo_ops {
39023 void (*enable)(struct rtl8169_private *);
39024 void (*disable)(struct rtl8169_private *);
39025- } jumbo_ops;
39026+ } __no_const jumbo_ops;
39027
39028 struct csi_ops {
39029 void (*write)(struct rtl8169_private *, int, int);
39030 u32 (*read)(struct rtl8169_private *, int);
39031- } csi_ops;
39032+ } __no_const csi_ops;
39033
39034 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
39035 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
39036diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
39037index 0767043f..08c2553 100644
39038--- a/drivers/net/ethernet/sfc/ptp.c
39039+++ b/drivers/net/ethernet/sfc/ptp.c
39040@@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
39041 (u32)((u64)ptp->start.dma_addr >> 32));
39042
39043 /* Clear flag that signals MC ready */
39044- ACCESS_ONCE(*start) = 0;
39045+ ACCESS_ONCE_RW(*start) = 0;
39046 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
39047 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
39048
39049diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39050index 0c74a70..3bc6f68 100644
39051--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39052+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39053@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
39054
39055 writel(value, ioaddr + MMC_CNTRL);
39056
39057- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39058- MMC_CNTRL, value);
39059+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39060+// MMC_CNTRL, value);
39061 }
39062
39063 /* To mask all all interrupts.*/
39064diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
39065index e6fe0d8..2b7d752 100644
39066--- a/drivers/net/hyperv/hyperv_net.h
39067+++ b/drivers/net/hyperv/hyperv_net.h
39068@@ -101,7 +101,7 @@ struct rndis_device {
39069
39070 enum rndis_device_state state;
39071 bool link_state;
39072- atomic_t new_req_id;
39073+ atomic_unchecked_t new_req_id;
39074
39075 spinlock_t request_lock;
39076 struct list_head req_list;
39077diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
39078index 2b657d4..9903bc0 100644
39079--- a/drivers/net/hyperv/rndis_filter.c
39080+++ b/drivers/net/hyperv/rndis_filter.c
39081@@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
39082 * template
39083 */
39084 set = &rndis_msg->msg.set_req;
39085- set->req_id = atomic_inc_return(&dev->new_req_id);
39086+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
39087
39088 /* Add to the request list */
39089 spin_lock_irqsave(&dev->request_lock, flags);
39090@@ -758,7 +758,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
39091
39092 /* Setup the rndis set */
39093 halt = &request->request_msg.msg.halt_req;
39094- halt->req_id = atomic_inc_return(&dev->new_req_id);
39095+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
39096
39097 /* Ignore return since this msg is optional. */
39098 rndis_filter_send_request(dev, request);
39099diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
39100index 1e9cb0b..7839125 100644
39101--- a/drivers/net/ieee802154/fakehard.c
39102+++ b/drivers/net/ieee802154/fakehard.c
39103@@ -386,7 +386,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
39104 phy->transmit_power = 0xbf;
39105
39106 dev->netdev_ops = &fake_ops;
39107- dev->ml_priv = &fake_mlme;
39108+ dev->ml_priv = (void *)&fake_mlme;
39109
39110 priv = netdev_priv(dev);
39111 priv->phy = phy;
39112diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
39113index d3fb97d..19520c7 100644
39114--- a/drivers/net/macvlan.c
39115+++ b/drivers/net/macvlan.c
39116@@ -851,13 +851,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
39117 int macvlan_link_register(struct rtnl_link_ops *ops)
39118 {
39119 /* common fields */
39120- ops->priv_size = sizeof(struct macvlan_dev);
39121- ops->validate = macvlan_validate;
39122- ops->maxtype = IFLA_MACVLAN_MAX;
39123- ops->policy = macvlan_policy;
39124- ops->changelink = macvlan_changelink;
39125- ops->get_size = macvlan_get_size;
39126- ops->fill_info = macvlan_fill_info;
39127+ pax_open_kernel();
39128+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
39129+ *(void **)&ops->validate = macvlan_validate;
39130+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
39131+ *(const void **)&ops->policy = macvlan_policy;
39132+ *(void **)&ops->changelink = macvlan_changelink;
39133+ *(void **)&ops->get_size = macvlan_get_size;
39134+ *(void **)&ops->fill_info = macvlan_fill_info;
39135+ pax_close_kernel();
39136
39137 return rtnl_link_register(ops);
39138 };
39139@@ -913,7 +915,7 @@ static int macvlan_device_event(struct notifier_block *unused,
39140 return NOTIFY_DONE;
39141 }
39142
39143-static struct notifier_block macvlan_notifier_block __read_mostly = {
39144+static struct notifier_block macvlan_notifier_block = {
39145 .notifier_call = macvlan_device_event,
39146 };
39147
39148diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
39149index 0f0f9ce..0ca5819 100644
39150--- a/drivers/net/macvtap.c
39151+++ b/drivers/net/macvtap.c
39152@@ -1100,7 +1100,7 @@ static int macvtap_device_event(struct notifier_block *unused,
39153 return NOTIFY_DONE;
39154 }
39155
39156-static struct notifier_block macvtap_notifier_block __read_mostly = {
39157+static struct notifier_block macvtap_notifier_block = {
39158 .notifier_call = macvtap_device_event,
39159 };
39160
39161diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
39162index daec9b0..6428fcb 100644
39163--- a/drivers/net/phy/mdio-bitbang.c
39164+++ b/drivers/net/phy/mdio-bitbang.c
39165@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
39166 struct mdiobb_ctrl *ctrl = bus->priv;
39167
39168 module_put(ctrl->ops->owner);
39169+ mdiobus_unregister(bus);
39170 mdiobus_free(bus);
39171 }
39172 EXPORT_SYMBOL(free_mdio_bitbang);
39173diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
39174index 508570e..f706dc7 100644
39175--- a/drivers/net/ppp/ppp_generic.c
39176+++ b/drivers/net/ppp/ppp_generic.c
39177@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39178 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
39179 struct ppp_stats stats;
39180 struct ppp_comp_stats cstats;
39181- char *vers;
39182
39183 switch (cmd) {
39184 case SIOCGPPPSTATS:
39185@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39186 break;
39187
39188 case SIOCGPPPVER:
39189- vers = PPP_VERSION;
39190- if (copy_to_user(addr, vers, strlen(vers) + 1))
39191+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
39192 break;
39193 err = 0;
39194 break;
39195diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
39196index ad86660..9fd0884 100644
39197--- a/drivers/net/team/team.c
39198+++ b/drivers/net/team/team.c
39199@@ -2601,7 +2601,7 @@ static int team_device_event(struct notifier_block *unused,
39200 return NOTIFY_DONE;
39201 }
39202
39203-static struct notifier_block team_notifier_block __read_mostly = {
39204+static struct notifier_block team_notifier_block = {
39205 .notifier_call = team_device_event,
39206 };
39207
39208diff --git a/drivers/net/tun.c b/drivers/net/tun.c
39209index 2917a86..edd463f 100644
39210--- a/drivers/net/tun.c
39211+++ b/drivers/net/tun.c
39212@@ -1836,7 +1836,7 @@ unlock:
39213 }
39214
39215 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
39216- unsigned long arg, int ifreq_len)
39217+ unsigned long arg, size_t ifreq_len)
39218 {
39219 struct tun_file *tfile = file->private_data;
39220 struct tun_struct *tun;
39221@@ -1848,6 +1848,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
39222 int vnet_hdr_sz;
39223 int ret;
39224
39225+ if (ifreq_len > sizeof ifr)
39226+ return -EFAULT;
39227+
39228 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
39229 if (copy_from_user(&ifr, argp, ifreq_len))
39230 return -EFAULT;
39231diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39232index cd8ccb2..cff5144 100644
39233--- a/drivers/net/usb/hso.c
39234+++ b/drivers/net/usb/hso.c
39235@@ -71,7 +71,7 @@
39236 #include <asm/byteorder.h>
39237 #include <linux/serial_core.h>
39238 #include <linux/serial.h>
39239-
39240+#include <asm/local.h>
39241
39242 #define MOD_AUTHOR "Option Wireless"
39243 #define MOD_DESCRIPTION "USB High Speed Option driver"
39244@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39245 struct urb *urb;
39246
39247 urb = serial->rx_urb[0];
39248- if (serial->port.count > 0) {
39249+ if (atomic_read(&serial->port.count) > 0) {
39250 count = put_rxbuf_data(urb, serial);
39251 if (count == -1)
39252 return;
39253@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39254 DUMP1(urb->transfer_buffer, urb->actual_length);
39255
39256 /* Anyone listening? */
39257- if (serial->port.count == 0)
39258+ if (atomic_read(&serial->port.count) == 0)
39259 return;
39260
39261 if (status == 0) {
39262@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39263 tty_port_tty_set(&serial->port, tty);
39264
39265 /* check for port already opened, if not set the termios */
39266- serial->port.count++;
39267- if (serial->port.count == 1) {
39268+ if (atomic_inc_return(&serial->port.count) == 1) {
39269 serial->rx_state = RX_IDLE;
39270 /* Force default termio settings */
39271 _hso_serial_set_termios(tty, NULL);
39272@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39273 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39274 if (result) {
39275 hso_stop_serial_device(serial->parent);
39276- serial->port.count--;
39277+ atomic_dec(&serial->port.count);
39278 kref_put(&serial->parent->ref, hso_serial_ref_free);
39279 }
39280 } else {
39281@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39282
39283 /* reset the rts and dtr */
39284 /* do the actual close */
39285- serial->port.count--;
39286+ atomic_dec(&serial->port.count);
39287
39288- if (serial->port.count <= 0) {
39289- serial->port.count = 0;
39290+ if (atomic_read(&serial->port.count) <= 0) {
39291+ atomic_set(&serial->port.count, 0);
39292 tty_port_tty_set(&serial->port, NULL);
39293 if (!usb_gone)
39294 hso_stop_serial_device(serial->parent);
39295@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39296
39297 /* the actual setup */
39298 spin_lock_irqsave(&serial->serial_lock, flags);
39299- if (serial->port.count)
39300+ if (atomic_read(&serial->port.count))
39301 _hso_serial_set_termios(tty, old);
39302 else
39303 tty->termios = *old;
39304@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
39305 D1("Pending read interrupt on port %d\n", i);
39306 spin_lock(&serial->serial_lock);
39307 if (serial->rx_state == RX_IDLE &&
39308- serial->port.count > 0) {
39309+ atomic_read(&serial->port.count) > 0) {
39310 /* Setup and send a ctrl req read on
39311 * port i */
39312 if (!serial->rx_urb_filled[0]) {
39313@@ -3079,7 +3078,7 @@ static int hso_resume(struct usb_interface *iface)
39314 /* Start all serial ports */
39315 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39316 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39317- if (dev2ser(serial_table[i])->port.count) {
39318+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
39319 result =
39320 hso_start_serial_device(serial_table[i], GFP_NOIO);
39321 hso_kick_transmit(dev2ser(serial_table[i]));
39322diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
39323index 656230e..15525a8 100644
39324--- a/drivers/net/vxlan.c
39325+++ b/drivers/net/vxlan.c
39326@@ -1428,7 +1428,7 @@ nla_put_failure:
39327 return -EMSGSIZE;
39328 }
39329
39330-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
39331+static struct rtnl_link_ops vxlan_link_ops = {
39332 .kind = "vxlan",
39333 .maxtype = IFLA_VXLAN_MAX,
39334 .policy = vxlan_policy,
39335diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
39336index 8d78253..bebbb68 100644
39337--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
39338+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
39339@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39340 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
39341 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
39342
39343- ACCESS_ONCE(ads->ds_link) = i->link;
39344- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
39345+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
39346+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
39347
39348 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
39349 ctl6 = SM(i->keytype, AR_EncrType);
39350@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39351
39352 if ((i->is_first || i->is_last) &&
39353 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
39354- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
39355+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
39356 | set11nTries(i->rates, 1)
39357 | set11nTries(i->rates, 2)
39358 | set11nTries(i->rates, 3)
39359 | (i->dur_update ? AR_DurUpdateEna : 0)
39360 | SM(0, AR_BurstDur);
39361
39362- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
39363+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
39364 | set11nRate(i->rates, 1)
39365 | set11nRate(i->rates, 2)
39366 | set11nRate(i->rates, 3);
39367 } else {
39368- ACCESS_ONCE(ads->ds_ctl2) = 0;
39369- ACCESS_ONCE(ads->ds_ctl3) = 0;
39370+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
39371+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
39372 }
39373
39374 if (!i->is_first) {
39375- ACCESS_ONCE(ads->ds_ctl0) = 0;
39376- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
39377- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
39378+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
39379+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
39380+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
39381 return;
39382 }
39383
39384@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39385 break;
39386 }
39387
39388- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
39389+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
39390 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
39391 | SM(i->txpower, AR_XmitPower)
39392 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
39393@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39394 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
39395 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
39396
39397- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
39398- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
39399+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
39400+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
39401
39402 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
39403 return;
39404
39405- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
39406+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
39407 | set11nPktDurRTSCTS(i->rates, 1);
39408
39409- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
39410+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
39411 | set11nPktDurRTSCTS(i->rates, 3);
39412
39413- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
39414+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
39415 | set11nRateFlags(i->rates, 1)
39416 | set11nRateFlags(i->rates, 2)
39417 | set11nRateFlags(i->rates, 3)
39418diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
39419index 301bf72..3f5654f 100644
39420--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
39421+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
39422@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39423 (i->qcu << AR_TxQcuNum_S) | desc_len;
39424
39425 checksum += val;
39426- ACCESS_ONCE(ads->info) = val;
39427+ ACCESS_ONCE_RW(ads->info) = val;
39428
39429 checksum += i->link;
39430- ACCESS_ONCE(ads->link) = i->link;
39431+ ACCESS_ONCE_RW(ads->link) = i->link;
39432
39433 checksum += i->buf_addr[0];
39434- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
39435+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
39436 checksum += i->buf_addr[1];
39437- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
39438+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
39439 checksum += i->buf_addr[2];
39440- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
39441+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
39442 checksum += i->buf_addr[3];
39443- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
39444+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
39445
39446 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
39447- ACCESS_ONCE(ads->ctl3) = val;
39448+ ACCESS_ONCE_RW(ads->ctl3) = val;
39449 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
39450- ACCESS_ONCE(ads->ctl5) = val;
39451+ ACCESS_ONCE_RW(ads->ctl5) = val;
39452 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
39453- ACCESS_ONCE(ads->ctl7) = val;
39454+ ACCESS_ONCE_RW(ads->ctl7) = val;
39455 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
39456- ACCESS_ONCE(ads->ctl9) = val;
39457+ ACCESS_ONCE_RW(ads->ctl9) = val;
39458
39459 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
39460- ACCESS_ONCE(ads->ctl10) = checksum;
39461+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
39462
39463 if (i->is_first || i->is_last) {
39464- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
39465+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
39466 | set11nTries(i->rates, 1)
39467 | set11nTries(i->rates, 2)
39468 | set11nTries(i->rates, 3)
39469 | (i->dur_update ? AR_DurUpdateEna : 0)
39470 | SM(0, AR_BurstDur);
39471
39472- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
39473+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
39474 | set11nRate(i->rates, 1)
39475 | set11nRate(i->rates, 2)
39476 | set11nRate(i->rates, 3);
39477 } else {
39478- ACCESS_ONCE(ads->ctl13) = 0;
39479- ACCESS_ONCE(ads->ctl14) = 0;
39480+ ACCESS_ONCE_RW(ads->ctl13) = 0;
39481+ ACCESS_ONCE_RW(ads->ctl14) = 0;
39482 }
39483
39484 ads->ctl20 = 0;
39485@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39486
39487 ctl17 = SM(i->keytype, AR_EncrType);
39488 if (!i->is_first) {
39489- ACCESS_ONCE(ads->ctl11) = 0;
39490- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
39491- ACCESS_ONCE(ads->ctl15) = 0;
39492- ACCESS_ONCE(ads->ctl16) = 0;
39493- ACCESS_ONCE(ads->ctl17) = ctl17;
39494- ACCESS_ONCE(ads->ctl18) = 0;
39495- ACCESS_ONCE(ads->ctl19) = 0;
39496+ ACCESS_ONCE_RW(ads->ctl11) = 0;
39497+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
39498+ ACCESS_ONCE_RW(ads->ctl15) = 0;
39499+ ACCESS_ONCE_RW(ads->ctl16) = 0;
39500+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
39501+ ACCESS_ONCE_RW(ads->ctl18) = 0;
39502+ ACCESS_ONCE_RW(ads->ctl19) = 0;
39503 return;
39504 }
39505
39506- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
39507+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
39508 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
39509 | SM(i->txpower, AR_XmitPower)
39510 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
39511@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39512 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
39513 ctl12 |= SM(val, AR_PAPRDChainMask);
39514
39515- ACCESS_ONCE(ads->ctl12) = ctl12;
39516- ACCESS_ONCE(ads->ctl17) = ctl17;
39517+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
39518+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
39519
39520- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
39521+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
39522 | set11nPktDurRTSCTS(i->rates, 1);
39523
39524- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
39525+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
39526 | set11nPktDurRTSCTS(i->rates, 3);
39527
39528- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
39529+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
39530 | set11nRateFlags(i->rates, 1)
39531 | set11nRateFlags(i->rates, 2)
39532 | set11nRateFlags(i->rates, 3)
39533 | SM(i->rtscts_rate, AR_RTSCTSRate);
39534
39535- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
39536+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
39537 }
39538
39539 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
39540diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
39541index 9d26fc5..60d9f14 100644
39542--- a/drivers/net/wireless/ath/ath9k/hw.h
39543+++ b/drivers/net/wireless/ath/ath9k/hw.h
39544@@ -658,7 +658,7 @@ struct ath_hw_private_ops {
39545
39546 /* ANI */
39547 void (*ani_cache_ini_regs)(struct ath_hw *ah);
39548-};
39549+} __no_const;
39550
39551 /**
39552 * struct ath_hw_ops - callbacks used by hardware code and driver code
39553@@ -688,7 +688,7 @@ struct ath_hw_ops {
39554 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
39555 struct ath_hw_antcomb_conf *antconf);
39556 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
39557-};
39558+} __no_const;
39559
39560 struct ath_nf_limits {
39561 s16 max;
39562diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
39563index 3726cd6..b655808 100644
39564--- a/drivers/net/wireless/iwlegacy/3945-mac.c
39565+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
39566@@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39567 */
39568 if (il3945_mod_params.disable_hw_scan) {
39569 D_INFO("Disabling hw_scan\n");
39570- il3945_mac_ops.hw_scan = NULL;
39571+ pax_open_kernel();
39572+ *(void **)&il3945_mac_ops.hw_scan = NULL;
39573+ pax_close_kernel();
39574 }
39575
39576 D_INFO("*** LOAD DRIVER ***\n");
39577diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
39578index 5b9533e..7733880 100644
39579--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
39580+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
39581@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
39582 {
39583 struct iwl_priv *priv = file->private_data;
39584 char buf[64];
39585- int buf_size;
39586+ size_t buf_size;
39587 u32 offset, len;
39588
39589 memset(buf, 0, sizeof(buf));
39590@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
39591 struct iwl_priv *priv = file->private_data;
39592
39593 char buf[8];
39594- int buf_size;
39595+ size_t buf_size;
39596 u32 reset_flag;
39597
39598 memset(buf, 0, sizeof(buf));
39599@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
39600 {
39601 struct iwl_priv *priv = file->private_data;
39602 char buf[8];
39603- int buf_size;
39604+ size_t buf_size;
39605 int ht40;
39606
39607 memset(buf, 0, sizeof(buf));
39608@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
39609 {
39610 struct iwl_priv *priv = file->private_data;
39611 char buf[8];
39612- int buf_size;
39613+ size_t buf_size;
39614 int value;
39615
39616 memset(buf, 0, sizeof(buf));
39617@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
39618 {
39619 struct iwl_priv *priv = file->private_data;
39620 char buf[8];
39621- int buf_size;
39622+ size_t buf_size;
39623 int clear;
39624
39625 memset(buf, 0, sizeof(buf));
39626@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
39627 {
39628 struct iwl_priv *priv = file->private_data;
39629 char buf[8];
39630- int buf_size;
39631+ size_t buf_size;
39632 int trace;
39633
39634 memset(buf, 0, sizeof(buf));
39635@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
39636 {
39637 struct iwl_priv *priv = file->private_data;
39638 char buf[8];
39639- int buf_size;
39640+ size_t buf_size;
39641 int missed;
39642
39643 memset(buf, 0, sizeof(buf));
39644@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
39645
39646 struct iwl_priv *priv = file->private_data;
39647 char buf[8];
39648- int buf_size;
39649+ size_t buf_size;
39650 int plcp;
39651
39652 memset(buf, 0, sizeof(buf));
39653@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
39654
39655 struct iwl_priv *priv = file->private_data;
39656 char buf[8];
39657- int buf_size;
39658+ size_t buf_size;
39659 int flush;
39660
39661 memset(buf, 0, sizeof(buf));
39662@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
39663
39664 struct iwl_priv *priv = file->private_data;
39665 char buf[8];
39666- int buf_size;
39667+ size_t buf_size;
39668 int rts;
39669
39670 if (!priv->cfg->ht_params)
39671@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
39672 {
39673 struct iwl_priv *priv = file->private_data;
39674 char buf[8];
39675- int buf_size;
39676+ size_t buf_size;
39677
39678 memset(buf, 0, sizeof(buf));
39679 buf_size = min(count, sizeof(buf) - 1);
39680@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
39681 struct iwl_priv *priv = file->private_data;
39682 u32 event_log_flag;
39683 char buf[8];
39684- int buf_size;
39685+ size_t buf_size;
39686
39687 /* check that the interface is up */
39688 if (!iwl_is_ready(priv))
39689@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
39690 struct iwl_priv *priv = file->private_data;
39691 char buf[8];
39692 u32 calib_disabled;
39693- int buf_size;
39694+ size_t buf_size;
39695
39696 memset(buf, 0, sizeof(buf));
39697 buf_size = min(count, sizeof(buf) - 1);
39698diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
39699index 35708b9..31f7754 100644
39700--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
39701+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
39702@@ -1100,7 +1100,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
39703 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
39704
39705 char buf[8];
39706- int buf_size;
39707+ size_t buf_size;
39708 u32 reset_flag;
39709
39710 memset(buf, 0, sizeof(buf));
39711@@ -1121,7 +1121,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
39712 {
39713 struct iwl_trans *trans = file->private_data;
39714 char buf[8];
39715- int buf_size;
39716+ size_t buf_size;
39717 int csr;
39718
39719 memset(buf, 0, sizeof(buf));
39720diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
39721index ff90855..e46d223 100644
39722--- a/drivers/net/wireless/mac80211_hwsim.c
39723+++ b/drivers/net/wireless/mac80211_hwsim.c
39724@@ -2062,25 +2062,19 @@ static int __init init_mac80211_hwsim(void)
39725
39726 if (channels > 1) {
39727 hwsim_if_comb.num_different_channels = channels;
39728- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
39729- mac80211_hwsim_ops.cancel_hw_scan =
39730- mac80211_hwsim_cancel_hw_scan;
39731- mac80211_hwsim_ops.sw_scan_start = NULL;
39732- mac80211_hwsim_ops.sw_scan_complete = NULL;
39733- mac80211_hwsim_ops.remain_on_channel =
39734- mac80211_hwsim_roc;
39735- mac80211_hwsim_ops.cancel_remain_on_channel =
39736- mac80211_hwsim_croc;
39737- mac80211_hwsim_ops.add_chanctx =
39738- mac80211_hwsim_add_chanctx;
39739- mac80211_hwsim_ops.remove_chanctx =
39740- mac80211_hwsim_remove_chanctx;
39741- mac80211_hwsim_ops.change_chanctx =
39742- mac80211_hwsim_change_chanctx;
39743- mac80211_hwsim_ops.assign_vif_chanctx =
39744- mac80211_hwsim_assign_vif_chanctx;
39745- mac80211_hwsim_ops.unassign_vif_chanctx =
39746- mac80211_hwsim_unassign_vif_chanctx;
39747+ pax_open_kernel();
39748+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
39749+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
39750+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
39751+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
39752+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
39753+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
39754+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
39755+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
39756+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
39757+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
39758+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
39759+ pax_close_kernel();
39760 }
39761
39762 spin_lock_init(&hwsim_radio_lock);
39763diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39764index abe1d03..fb02c22 100644
39765--- a/drivers/net/wireless/rndis_wlan.c
39766+++ b/drivers/net/wireless/rndis_wlan.c
39767@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39768
39769 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
39770
39771- if (rts_threshold < 0 || rts_threshold > 2347)
39772+ if (rts_threshold > 2347)
39773 rts_threshold = 2347;
39774
39775 tmp = cpu_to_le32(rts_threshold);
39776diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
39777index 0751b35..246ba3e 100644
39778--- a/drivers/net/wireless/rt2x00/rt2x00.h
39779+++ b/drivers/net/wireless/rt2x00/rt2x00.h
39780@@ -398,7 +398,7 @@ struct rt2x00_intf {
39781 * for hardware which doesn't support hardware
39782 * sequence counting.
39783 */
39784- atomic_t seqno;
39785+ atomic_unchecked_t seqno;
39786 };
39787
39788 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
39789diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
39790index e488b94..14b6a0c 100644
39791--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
39792+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
39793@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
39794 * sequence counter given by mac80211.
39795 */
39796 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
39797- seqno = atomic_add_return(0x10, &intf->seqno);
39798+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
39799 else
39800- seqno = atomic_read(&intf->seqno);
39801+ seqno = atomic_read_unchecked(&intf->seqno);
39802
39803 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
39804 hdr->seq_ctrl |= cpu_to_le16(seqno);
39805diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
39806index e57ee48..541cf6c 100644
39807--- a/drivers/net/wireless/ti/wl1251/sdio.c
39808+++ b/drivers/net/wireless/ti/wl1251/sdio.c
39809@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
39810
39811 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
39812
39813- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
39814- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
39815+ pax_open_kernel();
39816+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
39817+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
39818+ pax_close_kernel();
39819
39820 wl1251_info("using dedicated interrupt line");
39821 } else {
39822- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
39823- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
39824+ pax_open_kernel();
39825+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
39826+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
39827+ pax_close_kernel();
39828
39829 wl1251_info("using SDIO interrupt");
39830 }
39831diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
39832index e5f5f8f..fdf15b7 100644
39833--- a/drivers/net/wireless/ti/wl12xx/main.c
39834+++ b/drivers/net/wireless/ti/wl12xx/main.c
39835@@ -644,7 +644,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
39836 sizeof(wl->conf.mem));
39837
39838 /* read data preparation is only needed by wl127x */
39839- wl->ops->prepare_read = wl127x_prepare_read;
39840+ pax_open_kernel();
39841+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
39842+ pax_close_kernel();
39843
39844 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
39845 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
39846@@ -665,7 +667,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
39847 sizeof(wl->conf.mem));
39848
39849 /* read data preparation is only needed by wl127x */
39850- wl->ops->prepare_read = wl127x_prepare_read;
39851+ pax_open_kernel();
39852+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
39853+ pax_close_kernel();
39854
39855 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
39856 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
39857diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
39858index 8d8c1f8..e754844 100644
39859--- a/drivers/net/wireless/ti/wl18xx/main.c
39860+++ b/drivers/net/wireless/ti/wl18xx/main.c
39861@@ -1489,8 +1489,10 @@ static int wl18xx_setup(struct wl1271 *wl)
39862 }
39863
39864 if (!checksum_param) {
39865- wl18xx_ops.set_rx_csum = NULL;
39866- wl18xx_ops.init_vif = NULL;
39867+ pax_open_kernel();
39868+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
39869+ *(void **)&wl18xx_ops.init_vif = NULL;
39870+ pax_close_kernel();
39871 }
39872
39873 /* Enable 11a Band only if we have 5G antennas */
39874diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39875index d93b2b6..ae50401 100644
39876--- a/drivers/oprofile/buffer_sync.c
39877+++ b/drivers/oprofile/buffer_sync.c
39878@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39879 if (cookie == NO_COOKIE)
39880 offset = pc;
39881 if (cookie == INVALID_COOKIE) {
39882- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39883+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39884 offset = pc;
39885 }
39886 if (cookie != last_cookie) {
39887@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
39888 /* add userspace sample */
39889
39890 if (!mm) {
39891- atomic_inc(&oprofile_stats.sample_lost_no_mm);
39892+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
39893 return 0;
39894 }
39895
39896 cookie = lookup_dcookie(mm, s->eip, &offset);
39897
39898 if (cookie == INVALID_COOKIE) {
39899- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39900+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39901 return 0;
39902 }
39903
39904@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
39905 /* ignore backtraces if failed to add a sample */
39906 if (state == sb_bt_start) {
39907 state = sb_bt_ignore;
39908- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
39909+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
39910 }
39911 }
39912 release_mm(mm);
39913diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
39914index c0cc4e7..44d4e54 100644
39915--- a/drivers/oprofile/event_buffer.c
39916+++ b/drivers/oprofile/event_buffer.c
39917@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
39918 }
39919
39920 if (buffer_pos == buffer_size) {
39921- atomic_inc(&oprofile_stats.event_lost_overflow);
39922+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
39923 return;
39924 }
39925
39926diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
39927index ed2c3ec..deda85a 100644
39928--- a/drivers/oprofile/oprof.c
39929+++ b/drivers/oprofile/oprof.c
39930@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
39931 if (oprofile_ops.switch_events())
39932 return;
39933
39934- atomic_inc(&oprofile_stats.multiplex_counter);
39935+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
39936 start_switch_worker();
39937 }
39938
39939diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
39940index 917d28e..d62d981 100644
39941--- a/drivers/oprofile/oprofile_stats.c
39942+++ b/drivers/oprofile/oprofile_stats.c
39943@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
39944 cpu_buf->sample_invalid_eip = 0;
39945 }
39946
39947- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
39948- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
39949- atomic_set(&oprofile_stats.event_lost_overflow, 0);
39950- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
39951- atomic_set(&oprofile_stats.multiplex_counter, 0);
39952+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
39953+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
39954+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
39955+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
39956+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
39957 }
39958
39959
39960diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
39961index 38b6fc0..b5cbfce 100644
39962--- a/drivers/oprofile/oprofile_stats.h
39963+++ b/drivers/oprofile/oprofile_stats.h
39964@@ -13,11 +13,11 @@
39965 #include <linux/atomic.h>
39966
39967 struct oprofile_stat_struct {
39968- atomic_t sample_lost_no_mm;
39969- atomic_t sample_lost_no_mapping;
39970- atomic_t bt_lost_no_mapping;
39971- atomic_t event_lost_overflow;
39972- atomic_t multiplex_counter;
39973+ atomic_unchecked_t sample_lost_no_mm;
39974+ atomic_unchecked_t sample_lost_no_mapping;
39975+ atomic_unchecked_t bt_lost_no_mapping;
39976+ atomic_unchecked_t event_lost_overflow;
39977+ atomic_unchecked_t multiplex_counter;
39978 };
39979
39980 extern struct oprofile_stat_struct oprofile_stats;
39981diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
39982index 849357c..b83c1e0 100644
39983--- a/drivers/oprofile/oprofilefs.c
39984+++ b/drivers/oprofile/oprofilefs.c
39985@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
39986
39987
39988 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
39989- char const *name, atomic_t *val)
39990+ char const *name, atomic_unchecked_t *val)
39991 {
39992 return __oprofilefs_create_file(sb, root, name,
39993 &atomic_ro_fops, 0444, val);
39994diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
39995index 93404f7..4a313d8 100644
39996--- a/drivers/oprofile/timer_int.c
39997+++ b/drivers/oprofile/timer_int.c
39998@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
39999 return NOTIFY_OK;
40000 }
40001
40002-static struct notifier_block __refdata oprofile_cpu_notifier = {
40003+static struct notifier_block oprofile_cpu_notifier = {
40004 .notifier_call = oprofile_cpu_notify,
40005 };
40006
40007diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40008index 3f56bc0..707d642 100644
40009--- a/drivers/parport/procfs.c
40010+++ b/drivers/parport/procfs.c
40011@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40012
40013 *ppos += len;
40014
40015- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40016+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40017 }
40018
40019 #ifdef CONFIG_PARPORT_1284
40020@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40021
40022 *ppos += len;
40023
40024- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40025+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40026 }
40027 #endif /* IEEE1284.3 support. */
40028
40029diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
40030index c35e8ad..fc33beb 100644
40031--- a/drivers/pci/hotplug/acpiphp_ibm.c
40032+++ b/drivers/pci/hotplug/acpiphp_ibm.c
40033@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
40034 goto init_cleanup;
40035 }
40036
40037- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
40038+ pax_open_kernel();
40039+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
40040+ pax_close_kernel();
40041 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
40042
40043 return retval;
40044diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
40045index a6a71c4..c91097b 100644
40046--- a/drivers/pci/hotplug/cpcihp_generic.c
40047+++ b/drivers/pci/hotplug/cpcihp_generic.c
40048@@ -73,7 +73,6 @@ static u16 port;
40049 static unsigned int enum_bit;
40050 static u8 enum_mask;
40051
40052-static struct cpci_hp_controller_ops generic_hpc_ops;
40053 static struct cpci_hp_controller generic_hpc;
40054
40055 static int __init validate_parameters(void)
40056@@ -139,6 +138,10 @@ static int query_enum(void)
40057 return ((value & enum_mask) == enum_mask);
40058 }
40059
40060+static struct cpci_hp_controller_ops generic_hpc_ops = {
40061+ .query_enum = query_enum,
40062+};
40063+
40064 static int __init cpcihp_generic_init(void)
40065 {
40066 int status;
40067@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
40068 pci_dev_put(dev);
40069
40070 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
40071- generic_hpc_ops.query_enum = query_enum;
40072 generic_hpc.ops = &generic_hpc_ops;
40073
40074 status = cpci_hp_register_controller(&generic_hpc);
40075diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
40076index 449b4bb..257e2e8 100644
40077--- a/drivers/pci/hotplug/cpcihp_zt5550.c
40078+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
40079@@ -59,7 +59,6 @@
40080 /* local variables */
40081 static bool debug;
40082 static bool poll;
40083-static struct cpci_hp_controller_ops zt5550_hpc_ops;
40084 static struct cpci_hp_controller zt5550_hpc;
40085
40086 /* Primary cPCI bus bridge device */
40087@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
40088 return 0;
40089 }
40090
40091+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
40092+ .query_enum = zt5550_hc_query_enum,
40093+};
40094+
40095 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
40096 {
40097 int status;
40098@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
40099 dbg("returned from zt5550_hc_config");
40100
40101 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
40102- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
40103 zt5550_hpc.ops = &zt5550_hpc_ops;
40104 if(!poll) {
40105 zt5550_hpc.irq = hc_dev->irq;
40106 zt5550_hpc.irq_flags = IRQF_SHARED;
40107 zt5550_hpc.dev_id = hc_dev;
40108
40109- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
40110- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
40111- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
40112+ pax_open_kernel();
40113+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
40114+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
40115+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
40116+ pax_open_kernel();
40117 } else {
40118 info("using ENUM# polling mode");
40119 }
40120diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40121index 76ba8a1..20ca857 100644
40122--- a/drivers/pci/hotplug/cpqphp_nvram.c
40123+++ b/drivers/pci/hotplug/cpqphp_nvram.c
40124@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40125
40126 void compaq_nvram_init (void __iomem *rom_start)
40127 {
40128+
40129+#ifndef CONFIG_PAX_KERNEXEC
40130 if (rom_start) {
40131 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40132 }
40133+#endif
40134+
40135 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40136
40137 /* initialize our int15 lock */
40138diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
40139index 202f4a9..8ee47d0 100644
40140--- a/drivers/pci/hotplug/pci_hotplug_core.c
40141+++ b/drivers/pci/hotplug/pci_hotplug_core.c
40142@@ -448,8 +448,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
40143 return -EINVAL;
40144 }
40145
40146- slot->ops->owner = owner;
40147- slot->ops->mod_name = mod_name;
40148+ pax_open_kernel();
40149+ *(struct module **)&slot->ops->owner = owner;
40150+ *(const char **)&slot->ops->mod_name = mod_name;
40151+ pax_close_kernel();
40152
40153 mutex_lock(&pci_hp_mutex);
40154 /*
40155diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
40156index 939bd1d..a1459c9 100644
40157--- a/drivers/pci/hotplug/pciehp_core.c
40158+++ b/drivers/pci/hotplug/pciehp_core.c
40159@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
40160 struct slot *slot = ctrl->slot;
40161 struct hotplug_slot *hotplug = NULL;
40162 struct hotplug_slot_info *info = NULL;
40163- struct hotplug_slot_ops *ops = NULL;
40164+ hotplug_slot_ops_no_const *ops = NULL;
40165 char name[SLOT_NAME_SIZE];
40166 int retval = -ENOMEM;
40167
40168diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
40169index 9c6e9bb..2916736 100644
40170--- a/drivers/pci/pci-sysfs.c
40171+++ b/drivers/pci/pci-sysfs.c
40172@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
40173 {
40174 /* allocate attribute structure, piggyback attribute name */
40175 int name_len = write_combine ? 13 : 10;
40176- struct bin_attribute *res_attr;
40177+ bin_attribute_no_const *res_attr;
40178 int retval;
40179
40180 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
40181@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
40182 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
40183 {
40184 int retval;
40185- struct bin_attribute *attr;
40186+ bin_attribute_no_const *attr;
40187
40188 /* If the device has VPD, try to expose it in sysfs. */
40189 if (dev->vpd) {
40190@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
40191 {
40192 int retval;
40193 int rom_size = 0;
40194- struct bin_attribute *attr;
40195+ bin_attribute_no_const *attr;
40196
40197 if (!sysfs_initialized)
40198 return -EACCES;
40199diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
40200index e851829..a1a7196 100644
40201--- a/drivers/pci/pci.h
40202+++ b/drivers/pci/pci.h
40203@@ -98,7 +98,7 @@ struct pci_vpd_ops {
40204 struct pci_vpd {
40205 unsigned int len;
40206 const struct pci_vpd_ops *ops;
40207- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
40208+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
40209 };
40210
40211 extern int pci_vpd_pci22_init(struct pci_dev *dev);
40212diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40213index 8474b6a..ee81993 100644
40214--- a/drivers/pci/pcie/aspm.c
40215+++ b/drivers/pci/pcie/aspm.c
40216@@ -27,9 +27,9 @@
40217 #define MODULE_PARAM_PREFIX "pcie_aspm."
40218
40219 /* Note: those are not register definitions */
40220-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40221-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40222-#define ASPM_STATE_L1 (4) /* L1 state */
40223+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40224+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40225+#define ASPM_STATE_L1 (4U) /* L1 state */
40226 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40227 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40228
40229diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40230index 6186f03..1a78714 100644
40231--- a/drivers/pci/probe.c
40232+++ b/drivers/pci/probe.c
40233@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
40234 struct pci_bus_region region;
40235 bool bar_too_big = false, bar_disabled = false;
40236
40237- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
40238+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
40239
40240 /* No printks while decoding is disabled! */
40241 if (!dev->mmio_always_on) {
40242diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40243index 9b8505c..f00870a 100644
40244--- a/drivers/pci/proc.c
40245+++ b/drivers/pci/proc.c
40246@@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40247 static int __init pci_proc_init(void)
40248 {
40249 struct pci_dev *dev = NULL;
40250+
40251+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40252+#ifdef CONFIG_GRKERNSEC_PROC_USER
40253+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40254+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40255+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40256+#endif
40257+#else
40258 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40259+#endif
40260 proc_create("devices", 0, proc_bus_pci_dir,
40261 &proc_bus_pci_dev_operations);
40262 proc_initialized = 1;
40263diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40264index 2111dbb..79e434b 100644
40265--- a/drivers/platform/x86/msi-laptop.c
40266+++ b/drivers/platform/x86/msi-laptop.c
40267@@ -820,12 +820,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
40268 int result;
40269
40270 /* allow userland write sysfs file */
40271- dev_attr_bluetooth.store = store_bluetooth;
40272- dev_attr_wlan.store = store_wlan;
40273- dev_attr_threeg.store = store_threeg;
40274- dev_attr_bluetooth.attr.mode |= S_IWUSR;
40275- dev_attr_wlan.attr.mode |= S_IWUSR;
40276- dev_attr_threeg.attr.mode |= S_IWUSR;
40277+ pax_open_kernel();
40278+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
40279+ *(void **)&dev_attr_wlan.store = store_wlan;
40280+ *(void **)&dev_attr_threeg.store = store_threeg;
40281+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
40282+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
40283+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
40284+ pax_close_kernel();
40285
40286 /* disable hardware control by fn key */
40287 result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
40288diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40289index b8ad71f..3ec9bb4 100644
40290--- a/drivers/platform/x86/sony-laptop.c
40291+++ b/drivers/platform/x86/sony-laptop.c
40292@@ -2356,7 +2356,7 @@ static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
40293 }
40294
40295 /* High speed charging function */
40296-static struct device_attribute *hsc_handle;
40297+static device_attribute_no_const *hsc_handle;
40298
40299 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
40300 struct device_attribute *attr,
40301diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40302index f946ca7..f25c833 100644
40303--- a/drivers/platform/x86/thinkpad_acpi.c
40304+++ b/drivers/platform/x86/thinkpad_acpi.c
40305@@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
40306 return 0;
40307 }
40308
40309-void static hotkey_mask_warn_incomplete_mask(void)
40310+static void hotkey_mask_warn_incomplete_mask(void)
40311 {
40312 /* log only what the user can fix... */
40313 const u32 wantedmask = hotkey_driver_mask &
40314@@ -2328,11 +2328,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
40315 }
40316 }
40317
40318-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40319- struct tp_nvram_state *newn,
40320- const u32 event_mask)
40321-{
40322-
40323 #define TPACPI_COMPARE_KEY(__scancode, __member) \
40324 do { \
40325 if ((event_mask & (1 << __scancode)) && \
40326@@ -2346,36 +2341,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40327 tpacpi_hotkey_send_key(__scancode); \
40328 } while (0)
40329
40330- void issue_volchange(const unsigned int oldvol,
40331- const unsigned int newvol)
40332- {
40333- unsigned int i = oldvol;
40334+static void issue_volchange(const unsigned int oldvol,
40335+ const unsigned int newvol,
40336+ const u32 event_mask)
40337+{
40338+ unsigned int i = oldvol;
40339
40340- while (i > newvol) {
40341- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
40342- i--;
40343- }
40344- while (i < newvol) {
40345- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
40346- i++;
40347- }
40348+ while (i > newvol) {
40349+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
40350+ i--;
40351 }
40352+ while (i < newvol) {
40353+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
40354+ i++;
40355+ }
40356+}
40357
40358- void issue_brightnesschange(const unsigned int oldbrt,
40359- const unsigned int newbrt)
40360- {
40361- unsigned int i = oldbrt;
40362+static void issue_brightnesschange(const unsigned int oldbrt,
40363+ const unsigned int newbrt,
40364+ const u32 event_mask)
40365+{
40366+ unsigned int i = oldbrt;
40367
40368- while (i > newbrt) {
40369- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
40370- i--;
40371- }
40372- while (i < newbrt) {
40373- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
40374- i++;
40375- }
40376+ while (i > newbrt) {
40377+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
40378+ i--;
40379+ }
40380+ while (i < newbrt) {
40381+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
40382+ i++;
40383 }
40384+}
40385
40386+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40387+ struct tp_nvram_state *newn,
40388+ const u32 event_mask)
40389+{
40390 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
40391 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
40392 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
40393@@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40394 oldn->volume_level != newn->volume_level) {
40395 /* recently muted, or repeated mute keypress, or
40396 * multiple presses ending in mute */
40397- issue_volchange(oldn->volume_level, newn->volume_level);
40398+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
40399 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
40400 }
40401 } else {
40402@@ -2419,7 +2420,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40403 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
40404 }
40405 if (oldn->volume_level != newn->volume_level) {
40406- issue_volchange(oldn->volume_level, newn->volume_level);
40407+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
40408 } else if (oldn->volume_toggle != newn->volume_toggle) {
40409 /* repeated vol up/down keypress at end of scale ? */
40410 if (newn->volume_level == 0)
40411@@ -2432,7 +2433,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40412 /* handle brightness */
40413 if (oldn->brightness_level != newn->brightness_level) {
40414 issue_brightnesschange(oldn->brightness_level,
40415- newn->brightness_level);
40416+ newn->brightness_level,
40417+ event_mask);
40418 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
40419 /* repeated key presses that didn't change state */
40420 if (newn->brightness_level == 0)
40421@@ -2441,10 +2443,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40422 && !tp_features.bright_unkfw)
40423 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
40424 }
40425+}
40426
40427 #undef TPACPI_COMPARE_KEY
40428 #undef TPACPI_MAY_SEND_KEY
40429-}
40430
40431 /*
40432 * Polling driver
40433diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40434index 769d265..a3a05ca 100644
40435--- a/drivers/pnp/pnpbios/bioscalls.c
40436+++ b/drivers/pnp/pnpbios/bioscalls.c
40437@@ -58,7 +58,7 @@ do { \
40438 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40439 } while(0)
40440
40441-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40442+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40443 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40444
40445 /*
40446@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40447
40448 cpu = get_cpu();
40449 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40450+
40451+ pax_open_kernel();
40452 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40453+ pax_close_kernel();
40454
40455 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40456 spin_lock_irqsave(&pnp_bios_lock, flags);
40457@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40458 :"memory");
40459 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40460
40461+ pax_open_kernel();
40462 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40463+ pax_close_kernel();
40464+
40465 put_cpu();
40466
40467 /* If we get here and this is set then the PnP BIOS faulted on us. */
40468@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40469 return status;
40470 }
40471
40472-void pnpbios_calls_init(union pnp_bios_install_struct *header)
40473+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40474 {
40475 int i;
40476
40477@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40478 pnp_bios_callpoint.offset = header->fields.pm16offset;
40479 pnp_bios_callpoint.segment = PNP_CS16;
40480
40481+ pax_open_kernel();
40482+
40483 for_each_possible_cpu(i) {
40484 struct desc_struct *gdt = get_cpu_gdt_table(i);
40485 if (!gdt)
40486@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40487 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40488 (unsigned long)__va(header->fields.pm16dseg));
40489 }
40490+
40491+ pax_close_kernel();
40492 }
40493diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40494index 3e6db1c..1fbbdae 100644
40495--- a/drivers/pnp/resource.c
40496+++ b/drivers/pnp/resource.c
40497@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40498 return 1;
40499
40500 /* check if the resource is valid */
40501- if (*irq < 0 || *irq > 15)
40502+ if (*irq > 15)
40503 return 0;
40504
40505 /* check if the resource is reserved */
40506@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40507 return 1;
40508
40509 /* check if the resource is valid */
40510- if (*dma < 0 || *dma == 4 || *dma > 7)
40511+ if (*dma == 4 || *dma > 7)
40512 return 0;
40513
40514 /* check if the resource is reserved */
40515diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
40516index 7df7c5f..bd48c47 100644
40517--- a/drivers/power/pda_power.c
40518+++ b/drivers/power/pda_power.c
40519@@ -37,7 +37,11 @@ static int polling;
40520
40521 #ifdef CONFIG_USB_OTG_UTILS
40522 static struct usb_phy *transceiver;
40523-static struct notifier_block otg_nb;
40524+static int otg_handle_notification(struct notifier_block *nb,
40525+ unsigned long event, void *unused);
40526+static struct notifier_block otg_nb = {
40527+ .notifier_call = otg_handle_notification
40528+};
40529 #endif
40530
40531 static struct regulator *ac_draw;
40532@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
40533
40534 #ifdef CONFIG_USB_OTG_UTILS
40535 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
40536- otg_nb.notifier_call = otg_handle_notification;
40537 ret = usb_register_notifier(transceiver, &otg_nb);
40538 if (ret) {
40539 dev_err(dev, "failure to register otg notifier\n");
40540diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
40541index cc439fd..8fa30df 100644
40542--- a/drivers/power/power_supply.h
40543+++ b/drivers/power/power_supply.h
40544@@ -16,12 +16,12 @@ struct power_supply;
40545
40546 #ifdef CONFIG_SYSFS
40547
40548-extern void power_supply_init_attrs(struct device_type *dev_type);
40549+extern void power_supply_init_attrs(void);
40550 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
40551
40552 #else
40553
40554-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
40555+static inline void power_supply_init_attrs(void) {}
40556 #define power_supply_uevent NULL
40557
40558 #endif /* CONFIG_SYSFS */
40559diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
40560index 8a7cfb3..493e0a2 100644
40561--- a/drivers/power/power_supply_core.c
40562+++ b/drivers/power/power_supply_core.c
40563@@ -24,7 +24,10 @@
40564 struct class *power_supply_class;
40565 EXPORT_SYMBOL_GPL(power_supply_class);
40566
40567-static struct device_type power_supply_dev_type;
40568+extern const struct attribute_group *power_supply_attr_groups[];
40569+static struct device_type power_supply_dev_type = {
40570+ .groups = power_supply_attr_groups,
40571+};
40572
40573 static int __power_supply_changed_work(struct device *dev, void *data)
40574 {
40575@@ -393,7 +396,6 @@ static int __init power_supply_class_init(void)
40576 return PTR_ERR(power_supply_class);
40577
40578 power_supply_class->dev_uevent = power_supply_uevent;
40579- power_supply_init_attrs(&power_supply_dev_type);
40580
40581 return 0;
40582 }
40583diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
40584index 40fa3b7..d9c2e0e 100644
40585--- a/drivers/power/power_supply_sysfs.c
40586+++ b/drivers/power/power_supply_sysfs.c
40587@@ -229,17 +229,15 @@ static struct attribute_group power_supply_attr_group = {
40588 .is_visible = power_supply_attr_is_visible,
40589 };
40590
40591-static const struct attribute_group *power_supply_attr_groups[] = {
40592+const struct attribute_group *power_supply_attr_groups[] = {
40593 &power_supply_attr_group,
40594 NULL,
40595 };
40596
40597-void power_supply_init_attrs(struct device_type *dev_type)
40598+void power_supply_init_attrs(void)
40599 {
40600 int i;
40601
40602- dev_type->groups = power_supply_attr_groups;
40603-
40604 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
40605 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
40606 }
40607diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
40608index 4d7c635..9860196 100644
40609--- a/drivers/regulator/max8660.c
40610+++ b/drivers/regulator/max8660.c
40611@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
40612 max8660->shadow_regs[MAX8660_OVER1] = 5;
40613 } else {
40614 /* Otherwise devices can be toggled via software */
40615- max8660_dcdc_ops.enable = max8660_dcdc_enable;
40616- max8660_dcdc_ops.disable = max8660_dcdc_disable;
40617+ pax_open_kernel();
40618+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
40619+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
40620+ pax_close_kernel();
40621 }
40622
40623 /*
40624diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
40625index 9a8ea91..c483dd9 100644
40626--- a/drivers/regulator/max8973-regulator.c
40627+++ b/drivers/regulator/max8973-regulator.c
40628@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
40629 if (!pdata->enable_ext_control) {
40630 max->desc.enable_reg = MAX8973_VOUT;
40631 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
40632- max8973_dcdc_ops.enable = regulator_enable_regmap;
40633- max8973_dcdc_ops.disable = regulator_disable_regmap;
40634- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
40635+ pax_open_kernel();
40636+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
40637+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
40638+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
40639+ pax_close_kernel();
40640 }
40641
40642 max->enable_external_control = pdata->enable_ext_control;
40643diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
40644index 0d84b1f..c2da6ac 100644
40645--- a/drivers/regulator/mc13892-regulator.c
40646+++ b/drivers/regulator/mc13892-regulator.c
40647@@ -540,10 +540,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
40648 }
40649 mc13xxx_unlock(mc13892);
40650
40651- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
40652+ pax_open_kernel();
40653+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
40654 = mc13892_vcam_set_mode;
40655- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
40656+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
40657 = mc13892_vcam_get_mode;
40658+ pax_close_kernel();
40659
40660 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
40661 ARRAY_SIZE(mc13892_regulators));
40662diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
40663index 16630aa..6afc992 100644
40664--- a/drivers/rtc/rtc-cmos.c
40665+++ b/drivers/rtc/rtc-cmos.c
40666@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
40667 hpet_rtc_timer_init();
40668
40669 /* export at least the first block of NVRAM */
40670- nvram.size = address_space - NVRAM_OFFSET;
40671+ pax_open_kernel();
40672+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
40673+ pax_close_kernel();
40674 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
40675 if (retval < 0) {
40676 dev_dbg(dev, "can't create nvram file? %d\n", retval);
40677diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40678index 9a86b4b..3a383dc 100644
40679--- a/drivers/rtc/rtc-dev.c
40680+++ b/drivers/rtc/rtc-dev.c
40681@@ -14,6 +14,7 @@
40682 #include <linux/module.h>
40683 #include <linux/rtc.h>
40684 #include <linux/sched.h>
40685+#include <linux/grsecurity.h>
40686 #include "rtc-core.h"
40687
40688 static dev_t rtc_devt;
40689@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
40690 if (copy_from_user(&tm, uarg, sizeof(tm)))
40691 return -EFAULT;
40692
40693+ gr_log_timechange();
40694+
40695 return rtc_set_time(rtc, &tm);
40696
40697 case RTC_PIE_ON:
40698diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
40699index e0d0ba4..3c65868 100644
40700--- a/drivers/rtc/rtc-ds1307.c
40701+++ b/drivers/rtc/rtc-ds1307.c
40702@@ -106,7 +106,7 @@ struct ds1307 {
40703 u8 offset; /* register's offset */
40704 u8 regs[11];
40705 u16 nvram_offset;
40706- struct bin_attribute *nvram;
40707+ bin_attribute_no_const *nvram;
40708 enum ds_type type;
40709 unsigned long flags;
40710 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
40711diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
40712index 130f29a..6179d03 100644
40713--- a/drivers/rtc/rtc-m48t59.c
40714+++ b/drivers/rtc/rtc-m48t59.c
40715@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
40716 goto out;
40717 }
40718
40719- m48t59_nvram_attr.size = pdata->offset;
40720+ pax_open_kernel();
40721+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
40722+ pax_close_kernel();
40723
40724 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
40725 if (ret) {
40726diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
40727index e693af6..2e525b6 100644
40728--- a/drivers/scsi/bfa/bfa_fcpim.h
40729+++ b/drivers/scsi/bfa/bfa_fcpim.h
40730@@ -36,7 +36,7 @@ struct bfa_iotag_s {
40731
40732 struct bfa_itn_s {
40733 bfa_isr_func_t isr;
40734-};
40735+} __no_const;
40736
40737 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
40738 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
40739diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40740index 23a90e7..9cf04ee 100644
40741--- a/drivers/scsi/bfa/bfa_ioc.h
40742+++ b/drivers/scsi/bfa/bfa_ioc.h
40743@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
40744 bfa_ioc_disable_cbfn_t disable_cbfn;
40745 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40746 bfa_ioc_reset_cbfn_t reset_cbfn;
40747-};
40748+} __no_const;
40749
40750 /*
40751 * IOC event notification mechanism.
40752@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
40753 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
40754 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
40755 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
40756-};
40757+} __no_const;
40758
40759 /*
40760 * Queue element to wait for room in request queue. FIFO order is
40761diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
40762index 593085a..47aa999 100644
40763--- a/drivers/scsi/hosts.c
40764+++ b/drivers/scsi/hosts.c
40765@@ -42,7 +42,7 @@
40766 #include "scsi_logging.h"
40767
40768
40769-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
40770+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
40771
40772
40773 static void scsi_host_cls_release(struct device *dev)
40774@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
40775 * subtract one because we increment first then return, but we need to
40776 * know what the next host number was before increment
40777 */
40778- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
40779+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
40780 shost->dma_channel = 0xff;
40781
40782 /* These three are default values which can be overridden */
40783diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
40784index 4f33806..afd6f60 100644
40785--- a/drivers/scsi/hpsa.c
40786+++ b/drivers/scsi/hpsa.c
40787@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
40788 unsigned long flags;
40789
40790 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
40791- return h->access.command_completed(h, q);
40792+ return h->access->command_completed(h, q);
40793
40794 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
40795 a = rq->head[rq->current_entry];
40796@@ -3374,7 +3374,7 @@ static void start_io(struct ctlr_info *h)
40797 while (!list_empty(&h->reqQ)) {
40798 c = list_entry(h->reqQ.next, struct CommandList, list);
40799 /* can't do anything if fifo is full */
40800- if ((h->access.fifo_full(h))) {
40801+ if ((h->access->fifo_full(h))) {
40802 dev_warn(&h->pdev->dev, "fifo full\n");
40803 break;
40804 }
40805@@ -3396,7 +3396,7 @@ static void start_io(struct ctlr_info *h)
40806
40807 /* Tell the controller execute command */
40808 spin_unlock_irqrestore(&h->lock, flags);
40809- h->access.submit_command(h, c);
40810+ h->access->submit_command(h, c);
40811 spin_lock_irqsave(&h->lock, flags);
40812 }
40813 spin_unlock_irqrestore(&h->lock, flags);
40814@@ -3404,17 +3404,17 @@ static void start_io(struct ctlr_info *h)
40815
40816 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
40817 {
40818- return h->access.command_completed(h, q);
40819+ return h->access->command_completed(h, q);
40820 }
40821
40822 static inline bool interrupt_pending(struct ctlr_info *h)
40823 {
40824- return h->access.intr_pending(h);
40825+ return h->access->intr_pending(h);
40826 }
40827
40828 static inline long interrupt_not_for_us(struct ctlr_info *h)
40829 {
40830- return (h->access.intr_pending(h) == 0) ||
40831+ return (h->access->intr_pending(h) == 0) ||
40832 (h->interrupts_enabled == 0);
40833 }
40834
40835@@ -4316,7 +4316,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
40836 if (prod_index < 0)
40837 return -ENODEV;
40838 h->product_name = products[prod_index].product_name;
40839- h->access = *(products[prod_index].access);
40840+ h->access = products[prod_index].access;
40841
40842 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
40843 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
40844@@ -4598,7 +4598,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
40845
40846 assert_spin_locked(&lockup_detector_lock);
40847 remove_ctlr_from_lockup_detector_list(h);
40848- h->access.set_intr_mask(h, HPSA_INTR_OFF);
40849+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
40850 spin_lock_irqsave(&h->lock, flags);
40851 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
40852 spin_unlock_irqrestore(&h->lock, flags);
40853@@ -4775,7 +4775,7 @@ reinit_after_soft_reset:
40854 }
40855
40856 /* make sure the board interrupts are off */
40857- h->access.set_intr_mask(h, HPSA_INTR_OFF);
40858+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
40859
40860 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
40861 goto clean2;
40862@@ -4809,7 +4809,7 @@ reinit_after_soft_reset:
40863 * fake ones to scoop up any residual completions.
40864 */
40865 spin_lock_irqsave(&h->lock, flags);
40866- h->access.set_intr_mask(h, HPSA_INTR_OFF);
40867+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
40868 spin_unlock_irqrestore(&h->lock, flags);
40869 free_irqs(h);
40870 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
40871@@ -4828,9 +4828,9 @@ reinit_after_soft_reset:
40872 dev_info(&h->pdev->dev, "Board READY.\n");
40873 dev_info(&h->pdev->dev,
40874 "Waiting for stale completions to drain.\n");
40875- h->access.set_intr_mask(h, HPSA_INTR_ON);
40876+ h->access->set_intr_mask(h, HPSA_INTR_ON);
40877 msleep(10000);
40878- h->access.set_intr_mask(h, HPSA_INTR_OFF);
40879+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
40880
40881 rc = controller_reset_failed(h->cfgtable);
40882 if (rc)
40883@@ -4851,7 +4851,7 @@ reinit_after_soft_reset:
40884 }
40885
40886 /* Turn the interrupts on so we can service requests */
40887- h->access.set_intr_mask(h, HPSA_INTR_ON);
40888+ h->access->set_intr_mask(h, HPSA_INTR_ON);
40889
40890 hpsa_hba_inquiry(h);
40891 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
40892@@ -4903,7 +4903,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
40893 * To write all data in the battery backed cache to disks
40894 */
40895 hpsa_flush_cache(h);
40896- h->access.set_intr_mask(h, HPSA_INTR_OFF);
40897+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
40898 hpsa_free_irqs_and_disable_msix(h);
40899 }
40900
40901@@ -5071,7 +5071,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
40902 return;
40903 }
40904 /* Change the access methods to the performant access methods */
40905- h->access = SA5_performant_access;
40906+ h->access = &SA5_performant_access;
40907 h->transMethod = CFGTBL_Trans_Performant;
40908 }
40909
40910diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
40911index 9816479..c5d4e97 100644
40912--- a/drivers/scsi/hpsa.h
40913+++ b/drivers/scsi/hpsa.h
40914@@ -79,7 +79,7 @@ struct ctlr_info {
40915 unsigned int msix_vector;
40916 unsigned int msi_vector;
40917 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
40918- struct access_method access;
40919+ struct access_method *access;
40920
40921 /* queue and queue Info */
40922 struct list_head reqQ;
40923diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
40924index c772d8d..35c362c 100644
40925--- a/drivers/scsi/libfc/fc_exch.c
40926+++ b/drivers/scsi/libfc/fc_exch.c
40927@@ -100,12 +100,12 @@ struct fc_exch_mgr {
40928 u16 pool_max_index;
40929
40930 struct {
40931- atomic_t no_free_exch;
40932- atomic_t no_free_exch_xid;
40933- atomic_t xid_not_found;
40934- atomic_t xid_busy;
40935- atomic_t seq_not_found;
40936- atomic_t non_bls_resp;
40937+ atomic_unchecked_t no_free_exch;
40938+ atomic_unchecked_t no_free_exch_xid;
40939+ atomic_unchecked_t xid_not_found;
40940+ atomic_unchecked_t xid_busy;
40941+ atomic_unchecked_t seq_not_found;
40942+ atomic_unchecked_t non_bls_resp;
40943 } stats;
40944 };
40945
40946@@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
40947 /* allocate memory for exchange */
40948 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
40949 if (!ep) {
40950- atomic_inc(&mp->stats.no_free_exch);
40951+ atomic_inc_unchecked(&mp->stats.no_free_exch);
40952 goto out;
40953 }
40954 memset(ep, 0, sizeof(*ep));
40955@@ -786,7 +786,7 @@ out:
40956 return ep;
40957 err:
40958 spin_unlock_bh(&pool->lock);
40959- atomic_inc(&mp->stats.no_free_exch_xid);
40960+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
40961 mempool_free(ep, mp->ep_pool);
40962 return NULL;
40963 }
40964@@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40965 xid = ntohs(fh->fh_ox_id); /* we originated exch */
40966 ep = fc_exch_find(mp, xid);
40967 if (!ep) {
40968- atomic_inc(&mp->stats.xid_not_found);
40969+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40970 reject = FC_RJT_OX_ID;
40971 goto out;
40972 }
40973@@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40974 ep = fc_exch_find(mp, xid);
40975 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
40976 if (ep) {
40977- atomic_inc(&mp->stats.xid_busy);
40978+ atomic_inc_unchecked(&mp->stats.xid_busy);
40979 reject = FC_RJT_RX_ID;
40980 goto rel;
40981 }
40982@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40983 }
40984 xid = ep->xid; /* get our XID */
40985 } else if (!ep) {
40986- atomic_inc(&mp->stats.xid_not_found);
40987+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40988 reject = FC_RJT_RX_ID; /* XID not found */
40989 goto out;
40990 }
40991@@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40992 } else {
40993 sp = &ep->seq;
40994 if (sp->id != fh->fh_seq_id) {
40995- atomic_inc(&mp->stats.seq_not_found);
40996+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40997 if (f_ctl & FC_FC_END_SEQ) {
40998 /*
40999 * Update sequence_id based on incoming last
41000@@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41001
41002 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41003 if (!ep) {
41004- atomic_inc(&mp->stats.xid_not_found);
41005+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41006 goto out;
41007 }
41008 if (ep->esb_stat & ESB_ST_COMPLETE) {
41009- atomic_inc(&mp->stats.xid_not_found);
41010+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41011 goto rel;
41012 }
41013 if (ep->rxid == FC_XID_UNKNOWN)
41014 ep->rxid = ntohs(fh->fh_rx_id);
41015 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41016- atomic_inc(&mp->stats.xid_not_found);
41017+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41018 goto rel;
41019 }
41020 if (ep->did != ntoh24(fh->fh_s_id) &&
41021 ep->did != FC_FID_FLOGI) {
41022- atomic_inc(&mp->stats.xid_not_found);
41023+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41024 goto rel;
41025 }
41026 sof = fr_sof(fp);
41027@@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41028 sp->ssb_stat |= SSB_ST_RESP;
41029 sp->id = fh->fh_seq_id;
41030 } else if (sp->id != fh->fh_seq_id) {
41031- atomic_inc(&mp->stats.seq_not_found);
41032+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41033 goto rel;
41034 }
41035
41036@@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41037 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41038
41039 if (!sp)
41040- atomic_inc(&mp->stats.xid_not_found);
41041+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41042 else
41043- atomic_inc(&mp->stats.non_bls_resp);
41044+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
41045
41046 fc_frame_free(fp);
41047 }
41048@@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
41049
41050 list_for_each_entry(ema, &lport->ema_list, ema_list) {
41051 mp = ema->mp;
41052- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
41053+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
41054 st->fc_no_free_exch_xid +=
41055- atomic_read(&mp->stats.no_free_exch_xid);
41056- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
41057- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
41058- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
41059- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
41060+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
41061+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
41062+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
41063+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
41064+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
41065 }
41066 }
41067 EXPORT_SYMBOL(fc_exch_update_stats);
41068diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41069index bdb81cd..d3c7c2c 100644
41070--- a/drivers/scsi/libsas/sas_ata.c
41071+++ b/drivers/scsi/libsas/sas_ata.c
41072@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
41073 .postreset = ata_std_postreset,
41074 .error_handler = ata_std_error_handler,
41075 .post_internal_cmd = sas_ata_post_internal,
41076- .qc_defer = ata_std_qc_defer,
41077+ .qc_defer = ata_std_qc_defer,
41078 .qc_prep = ata_noop_qc_prep,
41079 .qc_issue = sas_ata_qc_issue,
41080 .qc_fill_rtf = sas_ata_qc_fill_rtf,
41081diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41082index df4c13a..a51e90c 100644
41083--- a/drivers/scsi/lpfc/lpfc.h
41084+++ b/drivers/scsi/lpfc/lpfc.h
41085@@ -424,7 +424,7 @@ struct lpfc_vport {
41086 struct dentry *debug_nodelist;
41087 struct dentry *vport_debugfs_root;
41088 struct lpfc_debugfs_trc *disc_trc;
41089- atomic_t disc_trc_cnt;
41090+ atomic_unchecked_t disc_trc_cnt;
41091 #endif
41092 uint8_t stat_data_enabled;
41093 uint8_t stat_data_blocked;
41094@@ -842,8 +842,8 @@ struct lpfc_hba {
41095 struct timer_list fabric_block_timer;
41096 unsigned long bit_flags;
41097 #define FABRIC_COMANDS_BLOCKED 0
41098- atomic_t num_rsrc_err;
41099- atomic_t num_cmd_success;
41100+ atomic_unchecked_t num_rsrc_err;
41101+ atomic_unchecked_t num_cmd_success;
41102 unsigned long last_rsrc_error_time;
41103 unsigned long last_ramp_down_time;
41104 unsigned long last_ramp_up_time;
41105@@ -879,7 +879,7 @@ struct lpfc_hba {
41106
41107 struct dentry *debug_slow_ring_trc;
41108 struct lpfc_debugfs_trc *slow_ring_trc;
41109- atomic_t slow_ring_trc_cnt;
41110+ atomic_unchecked_t slow_ring_trc_cnt;
41111 /* iDiag debugfs sub-directory */
41112 struct dentry *idiag_root;
41113 struct dentry *idiag_pci_cfg;
41114diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41115index f63f5ff..de29189 100644
41116--- a/drivers/scsi/lpfc/lpfc_debugfs.c
41117+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41118@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
41119
41120 #include <linux/debugfs.h>
41121
41122-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41123+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41124 static unsigned long lpfc_debugfs_start_time = 0L;
41125
41126 /* iDiag */
41127@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41128 lpfc_debugfs_enable = 0;
41129
41130 len = 0;
41131- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41132+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41133 (lpfc_debugfs_max_disc_trc - 1);
41134 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41135 dtp = vport->disc_trc + i;
41136@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41137 lpfc_debugfs_enable = 0;
41138
41139 len = 0;
41140- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41141+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41142 (lpfc_debugfs_max_slow_ring_trc - 1);
41143 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41144 dtp = phba->slow_ring_trc + i;
41145@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41146 !vport || !vport->disc_trc)
41147 return;
41148
41149- index = atomic_inc_return(&vport->disc_trc_cnt) &
41150+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41151 (lpfc_debugfs_max_disc_trc - 1);
41152 dtp = vport->disc_trc + index;
41153 dtp->fmt = fmt;
41154 dtp->data1 = data1;
41155 dtp->data2 = data2;
41156 dtp->data3 = data3;
41157- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41158+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41159 dtp->jif = jiffies;
41160 #endif
41161 return;
41162@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41163 !phba || !phba->slow_ring_trc)
41164 return;
41165
41166- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41167+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41168 (lpfc_debugfs_max_slow_ring_trc - 1);
41169 dtp = phba->slow_ring_trc + index;
41170 dtp->fmt = fmt;
41171 dtp->data1 = data1;
41172 dtp->data2 = data2;
41173 dtp->data3 = data3;
41174- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41175+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41176 dtp->jif = jiffies;
41177 #endif
41178 return;
41179@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41180 "slow_ring buffer\n");
41181 goto debug_failed;
41182 }
41183- atomic_set(&phba->slow_ring_trc_cnt, 0);
41184+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41185 memset(phba->slow_ring_trc, 0,
41186 (sizeof(struct lpfc_debugfs_trc) *
41187 lpfc_debugfs_max_slow_ring_trc));
41188@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41189 "buffer\n");
41190 goto debug_failed;
41191 }
41192- atomic_set(&vport->disc_trc_cnt, 0);
41193+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41194
41195 snprintf(name, sizeof(name), "discovery_trace");
41196 vport->debug_disc_trc =
41197diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41198index 89ad558..76956c4 100644
41199--- a/drivers/scsi/lpfc/lpfc_init.c
41200+++ b/drivers/scsi/lpfc/lpfc_init.c
41201@@ -10618,8 +10618,10 @@ lpfc_init(void)
41202 "misc_register returned with status %d", error);
41203
41204 if (lpfc_enable_npiv) {
41205- lpfc_transport_functions.vport_create = lpfc_vport_create;
41206- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41207+ pax_open_kernel();
41208+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41209+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41210+ pax_close_kernel();
41211 }
41212 lpfc_transport_template =
41213 fc_attach_transport(&lpfc_transport_functions);
41214diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41215index 60e5a17..ff7a793 100644
41216--- a/drivers/scsi/lpfc/lpfc_scsi.c
41217+++ b/drivers/scsi/lpfc/lpfc_scsi.c
41218@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41219 uint32_t evt_posted;
41220
41221 spin_lock_irqsave(&phba->hbalock, flags);
41222- atomic_inc(&phba->num_rsrc_err);
41223+ atomic_inc_unchecked(&phba->num_rsrc_err);
41224 phba->last_rsrc_error_time = jiffies;
41225
41226 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41227@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41228 unsigned long flags;
41229 struct lpfc_hba *phba = vport->phba;
41230 uint32_t evt_posted;
41231- atomic_inc(&phba->num_cmd_success);
41232+ atomic_inc_unchecked(&phba->num_cmd_success);
41233
41234 if (vport->cfg_lun_queue_depth <= queue_depth)
41235 return;
41236@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41237 unsigned long num_rsrc_err, num_cmd_success;
41238 int i;
41239
41240- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41241- num_cmd_success = atomic_read(&phba->num_cmd_success);
41242+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41243+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41244
41245 /*
41246 * The error and success command counters are global per
41247@@ -419,8 +419,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41248 }
41249 }
41250 lpfc_destroy_vport_work_array(phba, vports);
41251- atomic_set(&phba->num_rsrc_err, 0);
41252- atomic_set(&phba->num_cmd_success, 0);
41253+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41254+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41255 }
41256
41257 /**
41258@@ -454,8 +454,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41259 }
41260 }
41261 lpfc_destroy_vport_work_array(phba, vports);
41262- atomic_set(&phba->num_rsrc_err, 0);
41263- atomic_set(&phba->num_cmd_success, 0);
41264+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41265+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41266 }
41267
41268 /**
41269diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41270index b46f5e9..c4c4ccb 100644
41271--- a/drivers/scsi/pmcraid.c
41272+++ b/drivers/scsi/pmcraid.c
41273@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41274 res->scsi_dev = scsi_dev;
41275 scsi_dev->hostdata = res;
41276 res->change_detected = 0;
41277- atomic_set(&res->read_failures, 0);
41278- atomic_set(&res->write_failures, 0);
41279+ atomic_set_unchecked(&res->read_failures, 0);
41280+ atomic_set_unchecked(&res->write_failures, 0);
41281 rc = 0;
41282 }
41283 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41284@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41285
41286 /* If this was a SCSI read/write command keep count of errors */
41287 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41288- atomic_inc(&res->read_failures);
41289+ atomic_inc_unchecked(&res->read_failures);
41290 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41291- atomic_inc(&res->write_failures);
41292+ atomic_inc_unchecked(&res->write_failures);
41293
41294 if (!RES_IS_GSCSI(res->cfg_entry) &&
41295 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41296@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
41297 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
41298 * hrrq_id assigned here in queuecommand
41299 */
41300- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
41301+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
41302 pinstance->num_hrrq;
41303 cmd->cmd_done = pmcraid_io_done;
41304
41305@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
41306 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
41307 * hrrq_id assigned here in queuecommand
41308 */
41309- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
41310+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
41311 pinstance->num_hrrq;
41312
41313 if (request_size) {
41314@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41315
41316 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41317 /* add resources only after host is added into system */
41318- if (!atomic_read(&pinstance->expose_resources))
41319+ if (!atomic_read_unchecked(&pinstance->expose_resources))
41320 return;
41321
41322 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
41323@@ -5324,8 +5324,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
41324 init_waitqueue_head(&pinstance->reset_wait_q);
41325
41326 atomic_set(&pinstance->outstanding_cmds, 0);
41327- atomic_set(&pinstance->last_message_id, 0);
41328- atomic_set(&pinstance->expose_resources, 0);
41329+ atomic_set_unchecked(&pinstance->last_message_id, 0);
41330+ atomic_set_unchecked(&pinstance->expose_resources, 0);
41331
41332 INIT_LIST_HEAD(&pinstance->free_res_q);
41333 INIT_LIST_HEAD(&pinstance->used_res_q);
41334@@ -6038,7 +6038,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
41335 /* Schedule worker thread to handle CCN and take care of adding and
41336 * removing devices to OS
41337 */
41338- atomic_set(&pinstance->expose_resources, 1);
41339+ atomic_set_unchecked(&pinstance->expose_resources, 1);
41340 schedule_work(&pinstance->worker_q);
41341 return rc;
41342
41343diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41344index e1d150f..6c6df44 100644
41345--- a/drivers/scsi/pmcraid.h
41346+++ b/drivers/scsi/pmcraid.h
41347@@ -748,7 +748,7 @@ struct pmcraid_instance {
41348 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
41349
41350 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
41351- atomic_t last_message_id;
41352+ atomic_unchecked_t last_message_id;
41353
41354 /* configuration table */
41355 struct pmcraid_config_table *cfg_table;
41356@@ -777,7 +777,7 @@ struct pmcraid_instance {
41357 atomic_t outstanding_cmds;
41358
41359 /* should add/delete resources to mid-layer now ?*/
41360- atomic_t expose_resources;
41361+ atomic_unchecked_t expose_resources;
41362
41363
41364
41365@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
41366 struct pmcraid_config_table_entry_ext cfg_entry_ext;
41367 };
41368 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41369- atomic_t read_failures; /* count of failed READ commands */
41370- atomic_t write_failures; /* count of failed WRITE commands */
41371+ atomic_unchecked_t read_failures; /* count of failed READ commands */
41372+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41373
41374 /* To indicate add/delete/modify during CCN */
41375 u8 change_detected;
41376diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
41377index 83d7984..a27d947 100644
41378--- a/drivers/scsi/qla2xxx/qla_attr.c
41379+++ b/drivers/scsi/qla2xxx/qla_attr.c
41380@@ -1969,7 +1969,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
41381 return 0;
41382 }
41383
41384-struct fc_function_template qla2xxx_transport_functions = {
41385+fc_function_template_no_const qla2xxx_transport_functions = {
41386
41387 .show_host_node_name = 1,
41388 .show_host_port_name = 1,
41389@@ -2016,7 +2016,7 @@ struct fc_function_template qla2xxx_transport_functions = {
41390 .bsg_timeout = qla24xx_bsg_timeout,
41391 };
41392
41393-struct fc_function_template qla2xxx_transport_vport_functions = {
41394+fc_function_template_no_const qla2xxx_transport_vport_functions = {
41395
41396 .show_host_node_name = 1,
41397 .show_host_port_name = 1,
41398diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
41399index 2411d1a..4673766 100644
41400--- a/drivers/scsi/qla2xxx/qla_gbl.h
41401+++ b/drivers/scsi/qla2xxx/qla_gbl.h
41402@@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
41403 struct device_attribute;
41404 extern struct device_attribute *qla2x00_host_attrs[];
41405 struct fc_function_template;
41406-extern struct fc_function_template qla2xxx_transport_functions;
41407-extern struct fc_function_template qla2xxx_transport_vport_functions;
41408+extern fc_function_template_no_const qla2xxx_transport_functions;
41409+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
41410 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
41411 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
41412 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
41413diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
41414index 10d23f8..a7d5d4c 100644
41415--- a/drivers/scsi/qla2xxx/qla_os.c
41416+++ b/drivers/scsi/qla2xxx/qla_os.c
41417@@ -1472,8 +1472,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
41418 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
41419 /* Ok, a 64bit DMA mask is applicable. */
41420 ha->flags.enable_64bit_addressing = 1;
41421- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
41422- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
41423+ pax_open_kernel();
41424+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
41425+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
41426+ pax_close_kernel();
41427 return;
41428 }
41429 }
41430diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41431index 329d553..f20d31d 100644
41432--- a/drivers/scsi/qla4xxx/ql4_def.h
41433+++ b/drivers/scsi/qla4xxx/ql4_def.h
41434@@ -273,7 +273,7 @@ struct ddb_entry {
41435 * (4000 only) */
41436 atomic_t relogin_timer; /* Max Time to wait for
41437 * relogin to complete */
41438- atomic_t relogin_retry_count; /* Num of times relogin has been
41439+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41440 * retried */
41441 uint32_t default_time2wait; /* Default Min time between
41442 * relogins (+aens) */
41443diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41444index 4cec123..7c1329f 100644
41445--- a/drivers/scsi/qla4xxx/ql4_os.c
41446+++ b/drivers/scsi/qla4xxx/ql4_os.c
41447@@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
41448 */
41449 if (!iscsi_is_session_online(cls_sess)) {
41450 /* Reset retry relogin timer */
41451- atomic_inc(&ddb_entry->relogin_retry_count);
41452+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41453 DEBUG2(ql4_printk(KERN_INFO, ha,
41454 "%s: index[%d] relogin timed out-retrying"
41455 " relogin (%d), retry (%d)\n", __func__,
41456 ddb_entry->fw_ddb_index,
41457- atomic_read(&ddb_entry->relogin_retry_count),
41458+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
41459 ddb_entry->default_time2wait + 4));
41460 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
41461 atomic_set(&ddb_entry->retry_relogin_timer,
41462@@ -4738,7 +4738,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
41463
41464 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41465 atomic_set(&ddb_entry->relogin_timer, 0);
41466- atomic_set(&ddb_entry->relogin_retry_count, 0);
41467+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41468 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
41469 ddb_entry->default_relogin_timeout =
41470 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
41471diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41472index 2c0d0ec..4e8681a 100644
41473--- a/drivers/scsi/scsi.c
41474+++ b/drivers/scsi/scsi.c
41475@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41476 unsigned long timeout;
41477 int rtn = 0;
41478
41479- atomic_inc(&cmd->device->iorequest_cnt);
41480+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41481
41482 /* check if the device is still usable */
41483 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41484diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41485index f1bf5af..f67e943 100644
41486--- a/drivers/scsi/scsi_lib.c
41487+++ b/drivers/scsi/scsi_lib.c
41488@@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41489 shost = sdev->host;
41490 scsi_init_cmd_errh(cmd);
41491 cmd->result = DID_NO_CONNECT << 16;
41492- atomic_inc(&cmd->device->iorequest_cnt);
41493+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41494
41495 /*
41496 * SCSI request completion path will do scsi_device_unbusy(),
41497@@ -1480,9 +1480,9 @@ static void scsi_softirq_done(struct request *rq)
41498
41499 INIT_LIST_HEAD(&cmd->eh_entry);
41500
41501- atomic_inc(&cmd->device->iodone_cnt);
41502+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
41503 if (cmd->result)
41504- atomic_inc(&cmd->device->ioerr_cnt);
41505+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41506
41507 disposition = scsi_decide_disposition(cmd);
41508 if (disposition != SUCCESS &&
41509diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41510index 931a7d9..0c2a754 100644
41511--- a/drivers/scsi/scsi_sysfs.c
41512+++ b/drivers/scsi/scsi_sysfs.c
41513@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41514 char *buf) \
41515 { \
41516 struct scsi_device *sdev = to_scsi_device(dev); \
41517- unsigned long long count = atomic_read(&sdev->field); \
41518+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
41519 return snprintf(buf, 20, "0x%llx\n", count); \
41520 } \
41521 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41522diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41523index 84a1fdf..693b0d6 100644
41524--- a/drivers/scsi/scsi_tgt_lib.c
41525+++ b/drivers/scsi/scsi_tgt_lib.c
41526@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41527 int err;
41528
41529 dprintk("%lx %u\n", uaddr, len);
41530- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41531+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41532 if (err) {
41533 /*
41534 * TODO: need to fixup sg_tablesize, max_segment_size,
41535diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41536index e894ca7..de9d7660 100644
41537--- a/drivers/scsi/scsi_transport_fc.c
41538+++ b/drivers/scsi/scsi_transport_fc.c
41539@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
41540 * Netlink Infrastructure
41541 */
41542
41543-static atomic_t fc_event_seq;
41544+static atomic_unchecked_t fc_event_seq;
41545
41546 /**
41547 * fc_get_event_number - Obtain the next sequential FC event number
41548@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
41549 u32
41550 fc_get_event_number(void)
41551 {
41552- return atomic_add_return(1, &fc_event_seq);
41553+ return atomic_add_return_unchecked(1, &fc_event_seq);
41554 }
41555 EXPORT_SYMBOL(fc_get_event_number);
41556
41557@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
41558 {
41559 int error;
41560
41561- atomic_set(&fc_event_seq, 0);
41562+ atomic_set_unchecked(&fc_event_seq, 0);
41563
41564 error = transport_class_register(&fc_host_class);
41565 if (error)
41566@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
41567 char *cp;
41568
41569 *val = simple_strtoul(buf, &cp, 0);
41570- if ((*cp && (*cp != '\n')) || (*val < 0))
41571+ if (*cp && (*cp != '\n'))
41572 return -EINVAL;
41573 /*
41574 * Check for overflow; dev_loss_tmo is u32
41575diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41576index 31969f2..2b348f0 100644
41577--- a/drivers/scsi/scsi_transport_iscsi.c
41578+++ b/drivers/scsi/scsi_transport_iscsi.c
41579@@ -79,7 +79,7 @@ struct iscsi_internal {
41580 struct transport_container session_cont;
41581 };
41582
41583-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41584+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41585 static struct workqueue_struct *iscsi_eh_timer_workq;
41586
41587 static DEFINE_IDA(iscsi_sess_ida);
41588@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41589 int err;
41590
41591 ihost = shost->shost_data;
41592- session->sid = atomic_add_return(1, &iscsi_session_nr);
41593+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41594
41595 if (target_id == ISCSI_MAX_TARGET) {
41596 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
41597@@ -2943,7 +2943,7 @@ static __init int iscsi_transport_init(void)
41598 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41599 ISCSI_TRANSPORT_VERSION);
41600
41601- atomic_set(&iscsi_session_nr, 0);
41602+ atomic_set_unchecked(&iscsi_session_nr, 0);
41603
41604 err = class_register(&iscsi_transport_class);
41605 if (err)
41606diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41607index f379c7f..e8fc69c 100644
41608--- a/drivers/scsi/scsi_transport_srp.c
41609+++ b/drivers/scsi/scsi_transport_srp.c
41610@@ -33,7 +33,7 @@
41611 #include "scsi_transport_srp_internal.h"
41612
41613 struct srp_host_attrs {
41614- atomic_t next_port_id;
41615+ atomic_unchecked_t next_port_id;
41616 };
41617 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41618
41619@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41620 struct Scsi_Host *shost = dev_to_shost(dev);
41621 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41622
41623- atomic_set(&srp_host->next_port_id, 0);
41624+ atomic_set_unchecked(&srp_host->next_port_id, 0);
41625 return 0;
41626 }
41627
41628@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41629 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41630 rport->roles = ids->roles;
41631
41632- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41633+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41634 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41635
41636 transport_setup_device(&rport->dev);
41637diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
41638index 7992635..609faf8 100644
41639--- a/drivers/scsi/sd.c
41640+++ b/drivers/scsi/sd.c
41641@@ -2909,7 +2909,7 @@ static int sd_probe(struct device *dev)
41642 sdkp->disk = gd;
41643 sdkp->index = index;
41644 atomic_set(&sdkp->openers, 0);
41645- atomic_set(&sdkp->device->ioerr_cnt, 0);
41646+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
41647
41648 if (!sdp->request_queue->rq_timeout) {
41649 if (sdp->type != TYPE_MOD)
41650diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41651index be2c9a6..275525c 100644
41652--- a/drivers/scsi/sg.c
41653+++ b/drivers/scsi/sg.c
41654@@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
41655 sdp->disk->disk_name,
41656 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41657 NULL,
41658- (char *)arg);
41659+ (char __user *)arg);
41660 case BLKTRACESTART:
41661 return blk_trace_startstop(sdp->device->request_queue, 1);
41662 case BLKTRACESTOP:
41663diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41664index 19ee901..6e8c2ef 100644
41665--- a/drivers/spi/spi.c
41666+++ b/drivers/spi/spi.c
41667@@ -1616,7 +1616,7 @@ int spi_bus_unlock(struct spi_master *master)
41668 EXPORT_SYMBOL_GPL(spi_bus_unlock);
41669
41670 /* portable code must never pass more than 32 bytes */
41671-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41672+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
41673
41674 static u8 *buf;
41675
41676diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
41677index c7a5f97..71ecd35 100644
41678--- a/drivers/staging/iio/iio_hwmon.c
41679+++ b/drivers/staging/iio/iio_hwmon.c
41680@@ -72,7 +72,7 @@ static void iio_hwmon_free_attrs(struct iio_hwmon_state *st)
41681 static int iio_hwmon_probe(struct platform_device *pdev)
41682 {
41683 struct iio_hwmon_state *st;
41684- struct sensor_device_attribute *a;
41685+ sensor_device_attribute_no_const *a;
41686 int ret, i;
41687 int in_i = 1, temp_i = 1, curr_i = 1;
41688 enum iio_chan_type type;
41689diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
41690index 34afc16..ffe44dd 100644
41691--- a/drivers/staging/octeon/ethernet-rx.c
41692+++ b/drivers/staging/octeon/ethernet-rx.c
41693@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
41694 /* Increment RX stats for virtual ports */
41695 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
41696 #ifdef CONFIG_64BIT
41697- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
41698- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
41699+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
41700+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
41701 #else
41702- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
41703- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
41704+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
41705+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
41706 #endif
41707 }
41708 netif_receive_skb(skb);
41709@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
41710 dev->name);
41711 */
41712 #ifdef CONFIG_64BIT
41713- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
41714+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
41715 #else
41716- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
41717+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
41718 #endif
41719 dev_kfree_skb_irq(skb);
41720 }
41721diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
41722index ef32dc1..a159d68 100644
41723--- a/drivers/staging/octeon/ethernet.c
41724+++ b/drivers/staging/octeon/ethernet.c
41725@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
41726 * since the RX tasklet also increments it.
41727 */
41728 #ifdef CONFIG_64BIT
41729- atomic64_add(rx_status.dropped_packets,
41730- (atomic64_t *)&priv->stats.rx_dropped);
41731+ atomic64_add_unchecked(rx_status.dropped_packets,
41732+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
41733 #else
41734- atomic_add(rx_status.dropped_packets,
41735- (atomic_t *)&priv->stats.rx_dropped);
41736+ atomic_add_unchecked(rx_status.dropped_packets,
41737+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
41738 #endif
41739 }
41740
41741diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
41742index a2b7e03..aaf3630 100644
41743--- a/drivers/staging/ramster/tmem.c
41744+++ b/drivers/staging/ramster/tmem.c
41745@@ -50,25 +50,25 @@
41746 * A tmem host implementation must use this function to register callbacks
41747 * for memory allocation.
41748 */
41749-static struct tmem_hostops tmem_hostops;
41750+static struct tmem_hostops *tmem_hostops;
41751
41752 static void tmem_objnode_tree_init(void);
41753
41754 void tmem_register_hostops(struct tmem_hostops *m)
41755 {
41756 tmem_objnode_tree_init();
41757- tmem_hostops = *m;
41758+ tmem_hostops = m;
41759 }
41760
41761 /*
41762 * A tmem host implementation must use this function to register
41763 * callbacks for a page-accessible memory (PAM) implementation.
41764 */
41765-static struct tmem_pamops tmem_pamops;
41766+static struct tmem_pamops *tmem_pamops;
41767
41768 void tmem_register_pamops(struct tmem_pamops *m)
41769 {
41770- tmem_pamops = *m;
41771+ tmem_pamops = m;
41772 }
41773
41774 /*
41775@@ -174,7 +174,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
41776 obj->pampd_count = 0;
41777 #ifdef CONFIG_RAMSTER
41778 if (tmem_pamops.new_obj != NULL)
41779- (*tmem_pamops.new_obj)(obj);
41780+ (tmem_pamops->new_obj)(obj);
41781 #endif
41782 SET_SENTINEL(obj, OBJ);
41783
41784@@ -210,7 +210,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
41785 rbnode = rb_next(rbnode);
41786 tmem_pampd_destroy_all_in_obj(obj, true);
41787 tmem_obj_free(obj, hb);
41788- (*tmem_hostops.obj_free)(obj, pool);
41789+ (tmem_hostops->obj_free)(obj, pool);
41790 }
41791 spin_unlock(&hb->lock);
41792 }
41793@@ -261,7 +261,7 @@ static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
41794 ASSERT_SENTINEL(obj, OBJ);
41795 BUG_ON(obj->pool == NULL);
41796 ASSERT_SENTINEL(obj->pool, POOL);
41797- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
41798+ objnode = (tmem_hostops->objnode_alloc)(obj->pool);
41799 if (unlikely(objnode == NULL))
41800 goto out;
41801 objnode->obj = obj;
41802@@ -290,7 +290,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
41803 ASSERT_SENTINEL(pool, POOL);
41804 objnode->obj->objnode_count--;
41805 objnode->obj = NULL;
41806- (*tmem_hostops.objnode_free)(objnode, pool);
41807+ (tmem_hostops->objnode_free)(objnode, pool);
41808 }
41809
41810 /*
41811@@ -348,7 +348,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
41812 void *old_pampd = *(void **)slot;
41813 *(void **)slot = new_pampd;
41814 if (!no_free)
41815- (*tmem_pamops.free)(old_pampd, obj->pool,
41816+ (tmem_pamops->free)(old_pampd, obj->pool,
41817 NULL, 0, false);
41818 ret = new_pampd;
41819 }
41820@@ -505,7 +505,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
41821 if (objnode->slots[i]) {
41822 if (ht == 1) {
41823 obj->pampd_count--;
41824- (*tmem_pamops.free)(objnode->slots[i],
41825+ (tmem_pamops->free)(objnode->slots[i],
41826 obj->pool, NULL, 0, true);
41827 objnode->slots[i] = NULL;
41828 continue;
41829@@ -524,7 +524,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
41830 return;
41831 if (obj->objnode_tree_height == 0) {
41832 obj->pampd_count--;
41833- (*tmem_pamops.free)(obj->objnode_tree_root,
41834+ (tmem_pamops->free)(obj->objnode_tree_root,
41835 obj->pool, NULL, 0, true);
41836 } else {
41837 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
41838@@ -535,7 +535,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
41839 obj->objnode_tree_root = NULL;
41840 #ifdef CONFIG_RAMSTER
41841 if (tmem_pamops.free_obj != NULL)
41842- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
41843+ (tmem_pamops->free_obj)(obj->pool, obj, pool_destroy);
41844 #endif
41845 }
41846
41847@@ -574,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
41848 /* if found, is a dup put, flush the old one */
41849 pampd_del = tmem_pampd_delete_from_obj(obj, index);
41850 BUG_ON(pampd_del != pampd);
41851- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
41852+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
41853 if (obj->pampd_count == 0) {
41854 objnew = obj;
41855 objfound = NULL;
41856@@ -582,7 +582,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
41857 pampd = NULL;
41858 }
41859 } else {
41860- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
41861+ obj = objnew = (tmem_hostops->obj_alloc)(pool);
41862 if (unlikely(obj == NULL)) {
41863 ret = -ENOMEM;
41864 goto out;
41865@@ -597,16 +597,16 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
41866 if (unlikely(ret == -ENOMEM))
41867 /* may have partially built objnode tree ("stump") */
41868 goto delete_and_free;
41869- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
41870+ (tmem_pamops->create_finish)(pampd, is_ephemeral(pool));
41871 goto out;
41872
41873 delete_and_free:
41874 (void)tmem_pampd_delete_from_obj(obj, index);
41875 if (pampd)
41876- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
41877+ (tmem_pamops->free)(pampd, pool, NULL, 0, true);
41878 if (objnew) {
41879 tmem_obj_free(objnew, hb);
41880- (*tmem_hostops.obj_free)(objnew, pool);
41881+ (tmem_hostops->obj_free)(objnew, pool);
41882 }
41883 out:
41884 spin_unlock(&hb->lock);
41885@@ -651,7 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
41886 if (pampd != NULL) {
41887 BUG_ON(obj == NULL);
41888 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
41889- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
41890+ (tmem_pamops->create_finish)(pampd, is_ephemeral(obj->pool));
41891 } else if (delete) {
41892 BUG_ON(obj == NULL);
41893 (void)tmem_pampd_delete_from_obj(obj, index);
41894@@ -671,7 +671,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
41895 int ret = 0;
41896
41897 if (!is_ephemeral(pool))
41898- new_pampd = (*tmem_pamops.repatriate_preload)(
41899+ new_pampd = (tmem_pamops->repatriate_preload)(
41900 old_pampd, pool, oidp, index, &intransit);
41901 if (intransit)
41902 ret = -EAGAIN;
41903@@ -680,7 +680,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
41904 /* must release the hb->lock else repatriate can't sleep */
41905 spin_unlock(&hb->lock);
41906 if (!intransit)
41907- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
41908+ ret = (tmem_pamops->repatriate)(old_pampd, new_pampd, pool,
41909 oidp, index, free, data);
41910 if (ret == -EAGAIN) {
41911 /* rare I think, but should cond_resched()??? */
41912@@ -714,7 +714,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
41913 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
41914 /* if we bug here, pamops wasn't properly set up for ramster */
41915 BUG_ON(tmem_pamops.replace_in_obj == NULL);
41916- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
41917+ ret = (tmem_pamops->replace_in_obj)(new_pampd, obj);
41918 out:
41919 spin_unlock(&hb->lock);
41920 return ret;
41921@@ -776,15 +776,15 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
41922 if (free) {
41923 if (obj->pampd_count == 0) {
41924 tmem_obj_free(obj, hb);
41925- (*tmem_hostops.obj_free)(obj, pool);
41926+ (tmem_hostops->obj_free)(obj, pool);
41927 obj = NULL;
41928 }
41929 }
41930 if (free)
41931- ret = (*tmem_pamops.get_data_and_free)(
41932+ ret = (tmem_pamops->get_data_and_free)(
41933 data, sizep, raw, pampd, pool, oidp, index);
41934 else
41935- ret = (*tmem_pamops.get_data)(
41936+ ret = (tmem_pamops->get_data)(
41937 data, sizep, raw, pampd, pool, oidp, index);
41938 if (ret < 0)
41939 goto out;
41940@@ -816,10 +816,10 @@ int tmem_flush_page(struct tmem_pool *pool,
41941 pampd = tmem_pampd_delete_from_obj(obj, index);
41942 if (pampd == NULL)
41943 goto out;
41944- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
41945+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
41946 if (obj->pampd_count == 0) {
41947 tmem_obj_free(obj, hb);
41948- (*tmem_hostops.obj_free)(obj, pool);
41949+ (tmem_hostops->obj_free)(obj, pool);
41950 }
41951 ret = 0;
41952
41953@@ -844,7 +844,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
41954 goto out;
41955 tmem_pampd_destroy_all_in_obj(obj, false);
41956 tmem_obj_free(obj, hb);
41957- (*tmem_hostops.obj_free)(obj, pool);
41958+ (tmem_hostops->obj_free)(obj, pool);
41959 ret = 0;
41960
41961 out:
41962diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
41963index dc23395..cf7e9b1 100644
41964--- a/drivers/staging/rtl8712/rtl871x_io.h
41965+++ b/drivers/staging/rtl8712/rtl871x_io.h
41966@@ -108,7 +108,7 @@ struct _io_ops {
41967 u8 *pmem);
41968 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
41969 u8 *pmem);
41970-};
41971+} __no_const;
41972
41973 struct io_req {
41974 struct list_head list;
41975diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
41976index 1f5088b..0e59820 100644
41977--- a/drivers/staging/sbe-2t3e3/netdev.c
41978+++ b/drivers/staging/sbe-2t3e3/netdev.c
41979@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
41980 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
41981
41982 if (rlen)
41983- if (copy_to_user(data, &resp, rlen))
41984+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
41985 return -EFAULT;
41986
41987 return 0;
41988diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
41989index 5dddc4d..34fcb2f 100644
41990--- a/drivers/staging/usbip/vhci.h
41991+++ b/drivers/staging/usbip/vhci.h
41992@@ -83,7 +83,7 @@ struct vhci_hcd {
41993 unsigned resuming:1;
41994 unsigned long re_timeout;
41995
41996- atomic_t seqnum;
41997+ atomic_unchecked_t seqnum;
41998
41999 /*
42000 * NOTE:
42001diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42002index c3aa219..bf8b3de 100644
42003--- a/drivers/staging/usbip/vhci_hcd.c
42004+++ b/drivers/staging/usbip/vhci_hcd.c
42005@@ -451,7 +451,7 @@ static void vhci_tx_urb(struct urb *urb)
42006 return;
42007 }
42008
42009- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42010+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42011 if (priv->seqnum == 0xffff)
42012 dev_info(&urb->dev->dev, "seqnum max\n");
42013
42014@@ -703,7 +703,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42015 return -ENOMEM;
42016 }
42017
42018- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42019+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42020 if (unlink->seqnum == 0xffff)
42021 pr_info("seqnum max\n");
42022
42023@@ -907,7 +907,7 @@ static int vhci_start(struct usb_hcd *hcd)
42024 vdev->rhport = rhport;
42025 }
42026
42027- atomic_set(&vhci->seqnum, 0);
42028+ atomic_set_unchecked(&vhci->seqnum, 0);
42029 spin_lock_init(&vhci->lock);
42030
42031 hcd->power_budget = 0; /* no limit */
42032diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42033index ba5f1c0..11d8122 100644
42034--- a/drivers/staging/usbip/vhci_rx.c
42035+++ b/drivers/staging/usbip/vhci_rx.c
42036@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42037 if (!urb) {
42038 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
42039 pr_info("max seqnum %d\n",
42040- atomic_read(&the_controller->seqnum));
42041+ atomic_read_unchecked(&the_controller->seqnum));
42042 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42043 return;
42044 }
42045diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42046index 5f13890..36a044b 100644
42047--- a/drivers/staging/vt6655/hostap.c
42048+++ b/drivers/staging/vt6655/hostap.c
42049@@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
42050 *
42051 */
42052
42053+static net_device_ops_no_const apdev_netdev_ops;
42054+
42055 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42056 {
42057 PSDevice apdev_priv;
42058 struct net_device *dev = pDevice->dev;
42059 int ret;
42060- const struct net_device_ops apdev_netdev_ops = {
42061- .ndo_start_xmit = pDevice->tx_80211,
42062- };
42063
42064 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
42065
42066@@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42067 *apdev_priv = *pDevice;
42068 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
42069
42070+ /* only half broken now */
42071+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
42072 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
42073
42074 pDevice->apdev->type = ARPHRD_IEEE80211;
42075diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42076index 26a7d0e..897b083 100644
42077--- a/drivers/staging/vt6656/hostap.c
42078+++ b/drivers/staging/vt6656/hostap.c
42079@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
42080 *
42081 */
42082
42083+static net_device_ops_no_const apdev_netdev_ops;
42084+
42085 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42086 {
42087 PSDevice apdev_priv;
42088 struct net_device *dev = pDevice->dev;
42089 int ret;
42090- const struct net_device_ops apdev_netdev_ops = {
42091- .ndo_start_xmit = pDevice->tx_80211,
42092- };
42093
42094 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
42095
42096@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42097 *apdev_priv = *pDevice;
42098 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
42099
42100+ /* only half broken now */
42101+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
42102 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
42103
42104 pDevice->apdev->type = ARPHRD_IEEE80211;
42105diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
42106index 56c8e60..1920c63 100644
42107--- a/drivers/staging/zcache/tmem.c
42108+++ b/drivers/staging/zcache/tmem.c
42109@@ -39,7 +39,7 @@
42110 * A tmem host implementation must use this function to register callbacks
42111 * for memory allocation.
42112 */
42113-static struct tmem_hostops tmem_hostops;
42114+static tmem_hostops_no_const tmem_hostops;
42115
42116 static void tmem_objnode_tree_init(void);
42117
42118@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
42119 * A tmem host implementation must use this function to register
42120 * callbacks for a page-accessible memory (PAM) implementation
42121 */
42122-static struct tmem_pamops tmem_pamops;
42123+static tmem_pamops_no_const tmem_pamops;
42124
42125 void tmem_register_pamops(struct tmem_pamops *m)
42126 {
42127diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
42128index 0d4aa82..f7832d4 100644
42129--- a/drivers/staging/zcache/tmem.h
42130+++ b/drivers/staging/zcache/tmem.h
42131@@ -180,6 +180,7 @@ struct tmem_pamops {
42132 void (*new_obj)(struct tmem_obj *);
42133 int (*replace_in_obj)(void *, struct tmem_obj *);
42134 };
42135+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
42136 extern void tmem_register_pamops(struct tmem_pamops *m);
42137
42138 /* memory allocation methods provided by the host implementation */
42139@@ -189,6 +190,7 @@ struct tmem_hostops {
42140 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
42141 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
42142 };
42143+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
42144 extern void tmem_register_hostops(struct tmem_hostops *m);
42145
42146 /* core tmem accessor functions */
42147diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
42148index 96f4981..4daaa7e 100644
42149--- a/drivers/target/target_core_device.c
42150+++ b/drivers/target/target_core_device.c
42151@@ -1370,7 +1370,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
42152 spin_lock_init(&dev->se_port_lock);
42153 spin_lock_init(&dev->se_tmr_lock);
42154 spin_lock_init(&dev->qf_cmd_lock);
42155- atomic_set(&dev->dev_ordered_id, 0);
42156+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
42157 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
42158 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
42159 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
42160diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
42161index bd587b7..173daf3 100644
42162--- a/drivers/target/target_core_transport.c
42163+++ b/drivers/target/target_core_transport.c
42164@@ -1077,7 +1077,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
42165 * Used to determine when ORDERED commands should go from
42166 * Dormant to Active status.
42167 */
42168- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
42169+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
42170 smp_mb__after_atomic_inc();
42171 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
42172 cmd->se_ordered_id, cmd->sam_task_attr,
42173diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
42174index b09c8d1f..c4225c0 100644
42175--- a/drivers/tty/cyclades.c
42176+++ b/drivers/tty/cyclades.c
42177@@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
42178 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
42179 info->port.count);
42180 #endif
42181- info->port.count++;
42182+ atomic_inc(&info->port.count);
42183 #ifdef CY_DEBUG_COUNT
42184 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
42185- current->pid, info->port.count);
42186+ current->pid, atomic_read(&info->port.count));
42187 #endif
42188
42189 /*
42190@@ -3991,7 +3991,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
42191 for (j = 0; j < cy_card[i].nports; j++) {
42192 info = &cy_card[i].ports[j];
42193
42194- if (info->port.count) {
42195+ if (atomic_read(&info->port.count)) {
42196 /* XXX is the ldisc num worth this? */
42197 struct tty_struct *tty;
42198 struct tty_ldisc *ld;
42199diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
42200index 13ee53b..418d164 100644
42201--- a/drivers/tty/hvc/hvc_console.c
42202+++ b/drivers/tty/hvc/hvc_console.c
42203@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
42204
42205 spin_lock_irqsave(&hp->port.lock, flags);
42206 /* Check and then increment for fast path open. */
42207- if (hp->port.count++ > 0) {
42208+ if (atomic_inc_return(&hp->port.count) > 1) {
42209 spin_unlock_irqrestore(&hp->port.lock, flags);
42210 hvc_kick();
42211 return 0;
42212@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
42213
42214 spin_lock_irqsave(&hp->port.lock, flags);
42215
42216- if (--hp->port.count == 0) {
42217+ if (atomic_dec_return(&hp->port.count) == 0) {
42218 spin_unlock_irqrestore(&hp->port.lock, flags);
42219 /* We are done with the tty pointer now. */
42220 tty_port_tty_set(&hp->port, NULL);
42221@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
42222 */
42223 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
42224 } else {
42225- if (hp->port.count < 0)
42226+ if (atomic_read(&hp->port.count) < 0)
42227 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
42228- hp->vtermno, hp->port.count);
42229+ hp->vtermno, atomic_read(&hp->port.count));
42230 spin_unlock_irqrestore(&hp->port.lock, flags);
42231 }
42232 }
42233@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
42234 * open->hangup case this can be called after the final close so prevent
42235 * that from happening for now.
42236 */
42237- if (hp->port.count <= 0) {
42238+ if (atomic_read(&hp->port.count) <= 0) {
42239 spin_unlock_irqrestore(&hp->port.lock, flags);
42240 return;
42241 }
42242
42243- hp->port.count = 0;
42244+ atomic_set(&hp->port.count, 0);
42245 spin_unlock_irqrestore(&hp->port.lock, flags);
42246 tty_port_tty_set(&hp->port, NULL);
42247
42248@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
42249 return -EPIPE;
42250
42251 /* FIXME what's this (unprotected) check for? */
42252- if (hp->port.count <= 0)
42253+ if (atomic_read(&hp->port.count) <= 0)
42254 return -EIO;
42255
42256 spin_lock_irqsave(&hp->lock, flags);
42257diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
42258index 8776357..b2d4afd 100644
42259--- a/drivers/tty/hvc/hvcs.c
42260+++ b/drivers/tty/hvc/hvcs.c
42261@@ -83,6 +83,7 @@
42262 #include <asm/hvcserver.h>
42263 #include <asm/uaccess.h>
42264 #include <asm/vio.h>
42265+#include <asm/local.h>
42266
42267 /*
42268 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
42269@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
42270
42271 spin_lock_irqsave(&hvcsd->lock, flags);
42272
42273- if (hvcsd->port.count > 0) {
42274+ if (atomic_read(&hvcsd->port.count) > 0) {
42275 spin_unlock_irqrestore(&hvcsd->lock, flags);
42276 printk(KERN_INFO "HVCS: vterm state unchanged. "
42277 "The hvcs device node is still in use.\n");
42278@@ -1132,7 +1133,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
42279 }
42280 }
42281
42282- hvcsd->port.count = 0;
42283+ atomic_set(&hvcsd->port.count, 0);
42284 hvcsd->port.tty = tty;
42285 tty->driver_data = hvcsd;
42286
42287@@ -1185,7 +1186,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
42288 unsigned long flags;
42289
42290 spin_lock_irqsave(&hvcsd->lock, flags);
42291- hvcsd->port.count++;
42292+ atomic_inc(&hvcsd->port.count);
42293 hvcsd->todo_mask |= HVCS_SCHED_READ;
42294 spin_unlock_irqrestore(&hvcsd->lock, flags);
42295
42296@@ -1221,7 +1222,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
42297 hvcsd = tty->driver_data;
42298
42299 spin_lock_irqsave(&hvcsd->lock, flags);
42300- if (--hvcsd->port.count == 0) {
42301+ if (atomic_dec_and_test(&hvcsd->port.count)) {
42302
42303 vio_disable_interrupts(hvcsd->vdev);
42304
42305@@ -1246,10 +1247,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
42306
42307 free_irq(irq, hvcsd);
42308 return;
42309- } else if (hvcsd->port.count < 0) {
42310+ } else if (atomic_read(&hvcsd->port.count) < 0) {
42311 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
42312 " is missmanaged.\n",
42313- hvcsd->vdev->unit_address, hvcsd->port.count);
42314+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
42315 }
42316
42317 spin_unlock_irqrestore(&hvcsd->lock, flags);
42318@@ -1271,7 +1272,7 @@ static void hvcs_hangup(struct tty_struct * tty)
42319
42320 spin_lock_irqsave(&hvcsd->lock, flags);
42321 /* Preserve this so that we know how many kref refs to put */
42322- temp_open_count = hvcsd->port.count;
42323+ temp_open_count = atomic_read(&hvcsd->port.count);
42324
42325 /*
42326 * Don't kref put inside the spinlock because the destruction
42327@@ -1286,7 +1287,7 @@ static void hvcs_hangup(struct tty_struct * tty)
42328 tty->driver_data = NULL;
42329 hvcsd->port.tty = NULL;
42330
42331- hvcsd->port.count = 0;
42332+ atomic_set(&hvcsd->port.count, 0);
42333
42334 /* This will drop any buffered data on the floor which is OK in a hangup
42335 * scenario. */
42336@@ -1357,7 +1358,7 @@ static int hvcs_write(struct tty_struct *tty,
42337 * the middle of a write operation? This is a crummy place to do this
42338 * but we want to keep it all in the spinlock.
42339 */
42340- if (hvcsd->port.count <= 0) {
42341+ if (atomic_read(&hvcsd->port.count) <= 0) {
42342 spin_unlock_irqrestore(&hvcsd->lock, flags);
42343 return -ENODEV;
42344 }
42345@@ -1431,7 +1432,7 @@ static int hvcs_write_room(struct tty_struct *tty)
42346 {
42347 struct hvcs_struct *hvcsd = tty->driver_data;
42348
42349- if (!hvcsd || hvcsd->port.count <= 0)
42350+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
42351 return 0;
42352
42353 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
42354diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
42355index 2cde13d..645d78f 100644
42356--- a/drivers/tty/ipwireless/tty.c
42357+++ b/drivers/tty/ipwireless/tty.c
42358@@ -29,6 +29,7 @@
42359 #include <linux/tty_driver.h>
42360 #include <linux/tty_flip.h>
42361 #include <linux/uaccess.h>
42362+#include <asm/local.h>
42363
42364 #include "tty.h"
42365 #include "network.h"
42366@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
42367 mutex_unlock(&tty->ipw_tty_mutex);
42368 return -ENODEV;
42369 }
42370- if (tty->port.count == 0)
42371+ if (atomic_read(&tty->port.count) == 0)
42372 tty->tx_bytes_queued = 0;
42373
42374- tty->port.count++;
42375+ atomic_inc(&tty->port.count);
42376
42377 tty->port.tty = linux_tty;
42378 linux_tty->driver_data = tty;
42379@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
42380
42381 static void do_ipw_close(struct ipw_tty *tty)
42382 {
42383- tty->port.count--;
42384-
42385- if (tty->port.count == 0) {
42386+ if (atomic_dec_return(&tty->port.count) == 0) {
42387 struct tty_struct *linux_tty = tty->port.tty;
42388
42389 if (linux_tty != NULL) {
42390@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
42391 return;
42392
42393 mutex_lock(&tty->ipw_tty_mutex);
42394- if (tty->port.count == 0) {
42395+ if (atomic_read(&tty->port.count) == 0) {
42396 mutex_unlock(&tty->ipw_tty_mutex);
42397 return;
42398 }
42399@@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
42400 return;
42401 }
42402
42403- if (!tty->port.count) {
42404+ if (!atomic_read(&tty->port.count)) {
42405 mutex_unlock(&tty->ipw_tty_mutex);
42406 return;
42407 }
42408@@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
42409 return -ENODEV;
42410
42411 mutex_lock(&tty->ipw_tty_mutex);
42412- if (!tty->port.count) {
42413+ if (!atomic_read(&tty->port.count)) {
42414 mutex_unlock(&tty->ipw_tty_mutex);
42415 return -EINVAL;
42416 }
42417@@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
42418 if (!tty)
42419 return -ENODEV;
42420
42421- if (!tty->port.count)
42422+ if (!atomic_read(&tty->port.count))
42423 return -EINVAL;
42424
42425 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
42426@@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
42427 if (!tty)
42428 return 0;
42429
42430- if (!tty->port.count)
42431+ if (!atomic_read(&tty->port.count))
42432 return 0;
42433
42434 return tty->tx_bytes_queued;
42435@@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
42436 if (!tty)
42437 return -ENODEV;
42438
42439- if (!tty->port.count)
42440+ if (!atomic_read(&tty->port.count))
42441 return -EINVAL;
42442
42443 return get_control_lines(tty);
42444@@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
42445 if (!tty)
42446 return -ENODEV;
42447
42448- if (!tty->port.count)
42449+ if (!atomic_read(&tty->port.count))
42450 return -EINVAL;
42451
42452 return set_control_lines(tty, set, clear);
42453@@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
42454 if (!tty)
42455 return -ENODEV;
42456
42457- if (!tty->port.count)
42458+ if (!atomic_read(&tty->port.count))
42459 return -EINVAL;
42460
42461 /* FIXME: Exactly how is the tty object locked here .. */
42462@@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
42463 * are gone */
42464 mutex_lock(&ttyj->ipw_tty_mutex);
42465 }
42466- while (ttyj->port.count)
42467+ while (atomic_read(&ttyj->port.count))
42468 do_ipw_close(ttyj);
42469 ipwireless_disassociate_network_ttys(network,
42470 ttyj->channel_idx);
42471diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
42472index f9d2850..b006f04 100644
42473--- a/drivers/tty/moxa.c
42474+++ b/drivers/tty/moxa.c
42475@@ -1193,7 +1193,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
42476 }
42477
42478 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
42479- ch->port.count++;
42480+ atomic_inc(&ch->port.count);
42481 tty->driver_data = ch;
42482 tty_port_tty_set(&ch->port, tty);
42483 mutex_lock(&ch->port.mutex);
42484diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
42485index bfd6771..e0d93c4 100644
42486--- a/drivers/tty/n_gsm.c
42487+++ b/drivers/tty/n_gsm.c
42488@@ -1636,7 +1636,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
42489 spin_lock_init(&dlci->lock);
42490 mutex_init(&dlci->mutex);
42491 dlci->fifo = &dlci->_fifo;
42492- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
42493+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
42494 kfree(dlci);
42495 return NULL;
42496 }
42497@@ -2936,7 +2936,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
42498 struct gsm_dlci *dlci = tty->driver_data;
42499 struct tty_port *port = &dlci->port;
42500
42501- port->count++;
42502+ atomic_inc(&port->count);
42503 dlci_get(dlci);
42504 dlci_get(dlci->gsm->dlci[0]);
42505 mux_get(dlci->gsm);
42506diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
42507index 19083ef..6e34e97 100644
42508--- a/drivers/tty/n_tty.c
42509+++ b/drivers/tty/n_tty.c
42510@@ -2196,6 +2196,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
42511 {
42512 *ops = tty_ldisc_N_TTY;
42513 ops->owner = NULL;
42514- ops->refcount = ops->flags = 0;
42515+ atomic_set(&ops->refcount, 0);
42516+ ops->flags = 0;
42517 }
42518 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
42519diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
42520index 79ff3a5..1fe9399 100644
42521--- a/drivers/tty/pty.c
42522+++ b/drivers/tty/pty.c
42523@@ -791,8 +791,10 @@ static void __init unix98_pty_init(void)
42524 panic("Couldn't register Unix98 pts driver");
42525
42526 /* Now create the /dev/ptmx special device */
42527+ pax_open_kernel();
42528 tty_default_fops(&ptmx_fops);
42529- ptmx_fops.open = ptmx_open;
42530+ *(void **)&ptmx_fops.open = ptmx_open;
42531+ pax_close_kernel();
42532
42533 cdev_init(&ptmx_cdev, &ptmx_fops);
42534 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
42535diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
42536index e42009a..566a036 100644
42537--- a/drivers/tty/rocket.c
42538+++ b/drivers/tty/rocket.c
42539@@ -925,7 +925,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
42540 tty->driver_data = info;
42541 tty_port_tty_set(port, tty);
42542
42543- if (port->count++ == 0) {
42544+ if (atomic_inc_return(&port->count) == 1) {
42545 atomic_inc(&rp_num_ports_open);
42546
42547 #ifdef ROCKET_DEBUG_OPEN
42548@@ -934,7 +934,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
42549 #endif
42550 }
42551 #ifdef ROCKET_DEBUG_OPEN
42552- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
42553+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
42554 #endif
42555
42556 /*
42557@@ -1529,7 +1529,7 @@ static void rp_hangup(struct tty_struct *tty)
42558 spin_unlock_irqrestore(&info->port.lock, flags);
42559 return;
42560 }
42561- if (info->port.count)
42562+ if (atomic_read(&info->port.count))
42563 atomic_dec(&rp_num_ports_open);
42564 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
42565 spin_unlock_irqrestore(&info->port.lock, flags);
42566diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
42567index 1002054..dd644a8 100644
42568--- a/drivers/tty/serial/kgdboc.c
42569+++ b/drivers/tty/serial/kgdboc.c
42570@@ -24,8 +24,9 @@
42571 #define MAX_CONFIG_LEN 40
42572
42573 static struct kgdb_io kgdboc_io_ops;
42574+static struct kgdb_io kgdboc_io_ops_console;
42575
42576-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
42577+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
42578 static int configured = -1;
42579
42580 static char config[MAX_CONFIG_LEN];
42581@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
42582 kgdboc_unregister_kbd();
42583 if (configured == 1)
42584 kgdb_unregister_io_module(&kgdboc_io_ops);
42585+ else if (configured == 2)
42586+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
42587 }
42588
42589 static int configure_kgdboc(void)
42590@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
42591 int err;
42592 char *cptr = config;
42593 struct console *cons;
42594+ int is_console = 0;
42595
42596 err = kgdboc_option_setup(config);
42597 if (err || !strlen(config) || isspace(config[0]))
42598 goto noconfig;
42599
42600 err = -ENODEV;
42601- kgdboc_io_ops.is_console = 0;
42602 kgdb_tty_driver = NULL;
42603
42604 kgdboc_use_kms = 0;
42605@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
42606 int idx;
42607 if (cons->device && cons->device(cons, &idx) == p &&
42608 idx == tty_line) {
42609- kgdboc_io_ops.is_console = 1;
42610+ is_console = 1;
42611 break;
42612 }
42613 cons = cons->next;
42614@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
42615 kgdb_tty_line = tty_line;
42616
42617 do_register:
42618- err = kgdb_register_io_module(&kgdboc_io_ops);
42619+ if (is_console) {
42620+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
42621+ configured = 2;
42622+ } else {
42623+ err = kgdb_register_io_module(&kgdboc_io_ops);
42624+ configured = 1;
42625+ }
42626 if (err)
42627 goto noconfig;
42628
42629@@ -205,8 +214,6 @@ do_register:
42630 if (err)
42631 goto nmi_con_failed;
42632
42633- configured = 1;
42634-
42635 return 0;
42636
42637 nmi_con_failed:
42638@@ -223,7 +230,7 @@ noconfig:
42639 static int __init init_kgdboc(void)
42640 {
42641 /* Already configured? */
42642- if (configured == 1)
42643+ if (configured >= 1)
42644 return 0;
42645
42646 return configure_kgdboc();
42647@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
42648 if (config[len - 1] == '\n')
42649 config[len - 1] = '\0';
42650
42651- if (configured == 1)
42652+ if (configured >= 1)
42653 cleanup_kgdboc();
42654
42655 /* Go and configure with the new params. */
42656@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
42657 .post_exception = kgdboc_post_exp_handler,
42658 };
42659
42660+static struct kgdb_io kgdboc_io_ops_console = {
42661+ .name = "kgdboc",
42662+ .read_char = kgdboc_get_char,
42663+ .write_char = kgdboc_put_char,
42664+ .pre_exception = kgdboc_pre_exp_handler,
42665+ .post_exception = kgdboc_post_exp_handler,
42666+ .is_console = 1
42667+};
42668+
42669 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
42670 /* This is only available if kgdboc is a built in for early debugging */
42671 static int __init kgdboc_early_init(char *opt)
42672diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
42673index e514b3a..c73d614 100644
42674--- a/drivers/tty/serial/samsung.c
42675+++ b/drivers/tty/serial/samsung.c
42676@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
42677 }
42678 }
42679
42680+static int s3c64xx_serial_startup(struct uart_port *port);
42681 static int s3c24xx_serial_startup(struct uart_port *port)
42682 {
42683 struct s3c24xx_uart_port *ourport = to_ourport(port);
42684 int ret;
42685
42686+ /* Startup sequence is different for s3c64xx and higher SoC's */
42687+ if (s3c24xx_serial_has_interrupt_mask(port))
42688+ return s3c64xx_serial_startup(port);
42689+
42690 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
42691 port->mapbase, port->membase);
42692
42693@@ -1122,10 +1127,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
42694 /* setup info for port */
42695 port->dev = &platdev->dev;
42696
42697- /* Startup sequence is different for s3c64xx and higher SoC's */
42698- if (s3c24xx_serial_has_interrupt_mask(port))
42699- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
42700-
42701 port->uartclk = 1;
42702
42703 if (cfg->uart_flags & UPF_CONS_FLOW) {
42704diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
42705index 2c7230a..2104f16 100644
42706--- a/drivers/tty/serial/serial_core.c
42707+++ b/drivers/tty/serial/serial_core.c
42708@@ -1455,7 +1455,7 @@ static void uart_hangup(struct tty_struct *tty)
42709 uart_flush_buffer(tty);
42710 uart_shutdown(tty, state);
42711 spin_lock_irqsave(&port->lock, flags);
42712- port->count = 0;
42713+ atomic_set(&port->count, 0);
42714 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
42715 spin_unlock_irqrestore(&port->lock, flags);
42716 tty_port_tty_set(port, NULL);
42717@@ -1551,7 +1551,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
42718 goto end;
42719 }
42720
42721- port->count++;
42722+ atomic_inc(&port->count);
42723 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
42724 retval = -ENXIO;
42725 goto err_dec_count;
42726@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
42727 /*
42728 * Make sure the device is in D0 state.
42729 */
42730- if (port->count == 1)
42731+ if (atomic_read(&port->count) == 1)
42732 uart_change_pm(state, 0);
42733
42734 /*
42735@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
42736 end:
42737 return retval;
42738 err_dec_count:
42739- port->count--;
42740+ atomic_inc(&port->count);
42741 mutex_unlock(&port->mutex);
42742 goto end;
42743 }
42744diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
42745index 9e071f6..f30ae69 100644
42746--- a/drivers/tty/synclink.c
42747+++ b/drivers/tty/synclink.c
42748@@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
42749
42750 if (debug_level >= DEBUG_LEVEL_INFO)
42751 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
42752- __FILE__,__LINE__, info->device_name, info->port.count);
42753+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
42754
42755 if (tty_port_close_start(&info->port, tty, filp) == 0)
42756 goto cleanup;
42757@@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
42758 cleanup:
42759 if (debug_level >= DEBUG_LEVEL_INFO)
42760 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
42761- tty->driver->name, info->port.count);
42762+ tty->driver->name, atomic_read(&info->port.count));
42763
42764 } /* end of mgsl_close() */
42765
42766@@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
42767
42768 mgsl_flush_buffer(tty);
42769 shutdown(info);
42770-
42771- info->port.count = 0;
42772+
42773+ atomic_set(&info->port.count, 0);
42774 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
42775 info->port.tty = NULL;
42776
42777@@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
42778
42779 if (debug_level >= DEBUG_LEVEL_INFO)
42780 printk("%s(%d):block_til_ready before block on %s count=%d\n",
42781- __FILE__,__LINE__, tty->driver->name, port->count );
42782+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
42783
42784 spin_lock_irqsave(&info->irq_spinlock, flags);
42785 if (!tty_hung_up_p(filp)) {
42786 extra_count = true;
42787- port->count--;
42788+ atomic_dec(&port->count);
42789 }
42790 spin_unlock_irqrestore(&info->irq_spinlock, flags);
42791 port->blocked_open++;
42792@@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
42793
42794 if (debug_level >= DEBUG_LEVEL_INFO)
42795 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
42796- __FILE__,__LINE__, tty->driver->name, port->count );
42797+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
42798
42799 tty_unlock(tty);
42800 schedule();
42801@@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
42802
42803 /* FIXME: Racy on hangup during close wait */
42804 if (extra_count)
42805- port->count++;
42806+ atomic_inc(&port->count);
42807 port->blocked_open--;
42808
42809 if (debug_level >= DEBUG_LEVEL_INFO)
42810 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
42811- __FILE__,__LINE__, tty->driver->name, port->count );
42812+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
42813
42814 if (!retval)
42815 port->flags |= ASYNC_NORMAL_ACTIVE;
42816@@ -3405,7 +3405,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
42817
42818 if (debug_level >= DEBUG_LEVEL_INFO)
42819 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
42820- __FILE__,__LINE__,tty->driver->name, info->port.count);
42821+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
42822
42823 /* If port is closing, signal caller to try again */
42824 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
42825@@ -3424,10 +3424,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
42826 spin_unlock_irqrestore(&info->netlock, flags);
42827 goto cleanup;
42828 }
42829- info->port.count++;
42830+ atomic_inc(&info->port.count);
42831 spin_unlock_irqrestore(&info->netlock, flags);
42832
42833- if (info->port.count == 1) {
42834+ if (atomic_read(&info->port.count) == 1) {
42835 /* 1st open on this device, init hardware */
42836 retval = startup(info);
42837 if (retval < 0)
42838@@ -3451,8 +3451,8 @@ cleanup:
42839 if (retval) {
42840 if (tty->count == 1)
42841 info->port.tty = NULL; /* tty layer will release tty struct */
42842- if(info->port.count)
42843- info->port.count--;
42844+ if (atomic_read(&info->port.count))
42845+ atomic_dec(&info->port.count);
42846 }
42847
42848 return retval;
42849@@ -7662,7 +7662,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
42850 unsigned short new_crctype;
42851
42852 /* return error if TTY interface open */
42853- if (info->port.count)
42854+ if (atomic_read(&info->port.count))
42855 return -EBUSY;
42856
42857 switch (encoding)
42858@@ -7757,7 +7757,7 @@ static int hdlcdev_open(struct net_device *dev)
42859
42860 /* arbitrate between network and tty opens */
42861 spin_lock_irqsave(&info->netlock, flags);
42862- if (info->port.count != 0 || info->netcount != 0) {
42863+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
42864 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
42865 spin_unlock_irqrestore(&info->netlock, flags);
42866 return -EBUSY;
42867@@ -7843,7 +7843,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42868 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
42869
42870 /* return error if TTY interface open */
42871- if (info->port.count)
42872+ if (atomic_read(&info->port.count))
42873 return -EBUSY;
42874
42875 if (cmd != SIOCWANDEV)
42876diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
42877index aba1e59..877ac33 100644
42878--- a/drivers/tty/synclink_gt.c
42879+++ b/drivers/tty/synclink_gt.c
42880@@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
42881 tty->driver_data = info;
42882 info->port.tty = tty;
42883
42884- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
42885+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
42886
42887 /* If port is closing, signal caller to try again */
42888 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
42889@@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
42890 mutex_unlock(&info->port.mutex);
42891 goto cleanup;
42892 }
42893- info->port.count++;
42894+ atomic_inc(&info->port.count);
42895 spin_unlock_irqrestore(&info->netlock, flags);
42896
42897- if (info->port.count == 1) {
42898+ if (atomic_read(&info->port.count) == 1) {
42899 /* 1st open on this device, init hardware */
42900 retval = startup(info);
42901 if (retval < 0) {
42902@@ -716,8 +716,8 @@ cleanup:
42903 if (retval) {
42904 if (tty->count == 1)
42905 info->port.tty = NULL; /* tty layer will release tty struct */
42906- if(info->port.count)
42907- info->port.count--;
42908+ if(atomic_read(&info->port.count))
42909+ atomic_dec(&info->port.count);
42910 }
42911
42912 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
42913@@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
42914
42915 if (sanity_check(info, tty->name, "close"))
42916 return;
42917- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
42918+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
42919
42920 if (tty_port_close_start(&info->port, tty, filp) == 0)
42921 goto cleanup;
42922@@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
42923 tty_port_close_end(&info->port, tty);
42924 info->port.tty = NULL;
42925 cleanup:
42926- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
42927+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
42928 }
42929
42930 static void hangup(struct tty_struct *tty)
42931@@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
42932 shutdown(info);
42933
42934 spin_lock_irqsave(&info->port.lock, flags);
42935- info->port.count = 0;
42936+ atomic_set(&info->port.count, 0);
42937 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
42938 info->port.tty = NULL;
42939 spin_unlock_irqrestore(&info->port.lock, flags);
42940@@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
42941 unsigned short new_crctype;
42942
42943 /* return error if TTY interface open */
42944- if (info->port.count)
42945+ if (atomic_read(&info->port.count))
42946 return -EBUSY;
42947
42948 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
42949@@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
42950
42951 /* arbitrate between network and tty opens */
42952 spin_lock_irqsave(&info->netlock, flags);
42953- if (info->port.count != 0 || info->netcount != 0) {
42954+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
42955 DBGINFO(("%s hdlc_open busy\n", dev->name));
42956 spin_unlock_irqrestore(&info->netlock, flags);
42957 return -EBUSY;
42958@@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42959 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
42960
42961 /* return error if TTY interface open */
42962- if (info->port.count)
42963+ if (atomic_read(&info->port.count))
42964 return -EBUSY;
42965
42966 if (cmd != SIOCWANDEV)
42967@@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
42968 if (port == NULL)
42969 continue;
42970 spin_lock(&port->lock);
42971- if ((port->port.count || port->netcount) &&
42972+ if ((atomic_read(&port->port.count) || port->netcount) &&
42973 port->pending_bh && !port->bh_running &&
42974 !port->bh_requested) {
42975 DBGISR(("%s bh queued\n", port->device_name));
42976@@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
42977 spin_lock_irqsave(&info->lock, flags);
42978 if (!tty_hung_up_p(filp)) {
42979 extra_count = true;
42980- port->count--;
42981+ atomic_dec(&port->count);
42982 }
42983 spin_unlock_irqrestore(&info->lock, flags);
42984 port->blocked_open++;
42985@@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
42986 remove_wait_queue(&port->open_wait, &wait);
42987
42988 if (extra_count)
42989- port->count++;
42990+ atomic_inc(&port->count);
42991 port->blocked_open--;
42992
42993 if (!retval)
42994diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
42995index fd43fb6..34704ad 100644
42996--- a/drivers/tty/synclinkmp.c
42997+++ b/drivers/tty/synclinkmp.c
42998@@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
42999
43000 if (debug_level >= DEBUG_LEVEL_INFO)
43001 printk("%s(%d):%s open(), old ref count = %d\n",
43002- __FILE__,__LINE__,tty->driver->name, info->port.count);
43003+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43004
43005 /* If port is closing, signal caller to try again */
43006 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43007@@ -770,10 +770,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43008 spin_unlock_irqrestore(&info->netlock, flags);
43009 goto cleanup;
43010 }
43011- info->port.count++;
43012+ atomic_inc(&info->port.count);
43013 spin_unlock_irqrestore(&info->netlock, flags);
43014
43015- if (info->port.count == 1) {
43016+ if (atomic_read(&info->port.count) == 1) {
43017 /* 1st open on this device, init hardware */
43018 retval = startup(info);
43019 if (retval < 0)
43020@@ -797,8 +797,8 @@ cleanup:
43021 if (retval) {
43022 if (tty->count == 1)
43023 info->port.tty = NULL; /* tty layer will release tty struct */
43024- if(info->port.count)
43025- info->port.count--;
43026+ if(atomic_read(&info->port.count))
43027+ atomic_dec(&info->port.count);
43028 }
43029
43030 return retval;
43031@@ -816,7 +816,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43032
43033 if (debug_level >= DEBUG_LEVEL_INFO)
43034 printk("%s(%d):%s close() entry, count=%d\n",
43035- __FILE__,__LINE__, info->device_name, info->port.count);
43036+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43037
43038 if (tty_port_close_start(&info->port, tty, filp) == 0)
43039 goto cleanup;
43040@@ -835,7 +835,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43041 cleanup:
43042 if (debug_level >= DEBUG_LEVEL_INFO)
43043 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
43044- tty->driver->name, info->port.count);
43045+ tty->driver->name, atomic_read(&info->port.count));
43046 }
43047
43048 /* Called by tty_hangup() when a hangup is signaled.
43049@@ -858,7 +858,7 @@ static void hangup(struct tty_struct *tty)
43050 shutdown(info);
43051
43052 spin_lock_irqsave(&info->port.lock, flags);
43053- info->port.count = 0;
43054+ atomic_set(&info->port.count, 0);
43055 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43056 info->port.tty = NULL;
43057 spin_unlock_irqrestore(&info->port.lock, flags);
43058@@ -1566,7 +1566,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43059 unsigned short new_crctype;
43060
43061 /* return error if TTY interface open */
43062- if (info->port.count)
43063+ if (atomic_read(&info->port.count))
43064 return -EBUSY;
43065
43066 switch (encoding)
43067@@ -1661,7 +1661,7 @@ static int hdlcdev_open(struct net_device *dev)
43068
43069 /* arbitrate between network and tty opens */
43070 spin_lock_irqsave(&info->netlock, flags);
43071- if (info->port.count != 0 || info->netcount != 0) {
43072+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43073 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
43074 spin_unlock_irqrestore(&info->netlock, flags);
43075 return -EBUSY;
43076@@ -1747,7 +1747,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43077 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
43078
43079 /* return error if TTY interface open */
43080- if (info->port.count)
43081+ if (atomic_read(&info->port.count))
43082 return -EBUSY;
43083
43084 if (cmd != SIOCWANDEV)
43085@@ -2632,7 +2632,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
43086 * do not request bottom half processing if the
43087 * device is not open in a normal mode.
43088 */
43089- if ( port && (port->port.count || port->netcount) &&
43090+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
43091 port->pending_bh && !port->bh_running &&
43092 !port->bh_requested ) {
43093 if ( debug_level >= DEBUG_LEVEL_ISR )
43094@@ -3330,12 +3330,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43095
43096 if (debug_level >= DEBUG_LEVEL_INFO)
43097 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
43098- __FILE__,__LINE__, tty->driver->name, port->count );
43099+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43100
43101 spin_lock_irqsave(&info->lock, flags);
43102 if (!tty_hung_up_p(filp)) {
43103 extra_count = true;
43104- port->count--;
43105+ atomic_dec(&port->count);
43106 }
43107 spin_unlock_irqrestore(&info->lock, flags);
43108 port->blocked_open++;
43109@@ -3364,7 +3364,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43110
43111 if (debug_level >= DEBUG_LEVEL_INFO)
43112 printk("%s(%d):%s block_til_ready() count=%d\n",
43113- __FILE__,__LINE__, tty->driver->name, port->count );
43114+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43115
43116 tty_unlock(tty);
43117 schedule();
43118@@ -3375,12 +3375,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43119 remove_wait_queue(&port->open_wait, &wait);
43120
43121 if (extra_count)
43122- port->count++;
43123+ atomic_inc(&port->count);
43124 port->blocked_open--;
43125
43126 if (debug_level >= DEBUG_LEVEL_INFO)
43127 printk("%s(%d):%s block_til_ready() after, count=%d\n",
43128- __FILE__,__LINE__, tty->driver->name, port->count );
43129+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43130
43131 if (!retval)
43132 port->flags |= ASYNC_NORMAL_ACTIVE;
43133diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
43134index b3c4a25..723916f 100644
43135--- a/drivers/tty/sysrq.c
43136+++ b/drivers/tty/sysrq.c
43137@@ -867,7 +867,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
43138 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
43139 size_t count, loff_t *ppos)
43140 {
43141- if (count) {
43142+ if (count && capable(CAP_SYS_ADMIN)) {
43143 char c;
43144
43145 if (get_user(c, buf))
43146diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
43147index da9fde8..c07975f 100644
43148--- a/drivers/tty/tty_io.c
43149+++ b/drivers/tty/tty_io.c
43150@@ -3391,7 +3391,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
43151
43152 void tty_default_fops(struct file_operations *fops)
43153 {
43154- *fops = tty_fops;
43155+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
43156 }
43157
43158 /*
43159diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
43160index c578229..45aa9ee 100644
43161--- a/drivers/tty/tty_ldisc.c
43162+++ b/drivers/tty/tty_ldisc.c
43163@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
43164 if (atomic_dec_and_test(&ld->users)) {
43165 struct tty_ldisc_ops *ldo = ld->ops;
43166
43167- ldo->refcount--;
43168+ atomic_dec(&ldo->refcount);
43169 module_put(ldo->owner);
43170 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
43171
43172@@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
43173 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
43174 tty_ldiscs[disc] = new_ldisc;
43175 new_ldisc->num = disc;
43176- new_ldisc->refcount = 0;
43177+ atomic_set(&new_ldisc->refcount, 0);
43178 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
43179
43180 return ret;
43181@@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
43182 return -EINVAL;
43183
43184 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
43185- if (tty_ldiscs[disc]->refcount)
43186+ if (atomic_read(&tty_ldiscs[disc]->refcount))
43187 ret = -EBUSY;
43188 else
43189 tty_ldiscs[disc] = NULL;
43190@@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
43191 if (ldops) {
43192 ret = ERR_PTR(-EAGAIN);
43193 if (try_module_get(ldops->owner)) {
43194- ldops->refcount++;
43195+ atomic_inc(&ldops->refcount);
43196 ret = ldops;
43197 }
43198 }
43199@@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
43200 unsigned long flags;
43201
43202 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
43203- ldops->refcount--;
43204+ atomic_dec(&ldops->refcount);
43205 module_put(ldops->owner);
43206 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
43207 }
43208diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
43209index b7ff59d..7c6105e 100644
43210--- a/drivers/tty/tty_port.c
43211+++ b/drivers/tty/tty_port.c
43212@@ -218,7 +218,7 @@ void tty_port_hangup(struct tty_port *port)
43213 unsigned long flags;
43214
43215 spin_lock_irqsave(&port->lock, flags);
43216- port->count = 0;
43217+ atomic_set(&port->count, 0);
43218 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43219 if (port->tty) {
43220 set_bit(TTY_IO_ERROR, &port->tty->flags);
43221@@ -344,7 +344,7 @@ int tty_port_block_til_ready(struct tty_port *port,
43222 /* The port lock protects the port counts */
43223 spin_lock_irqsave(&port->lock, flags);
43224 if (!tty_hung_up_p(filp))
43225- port->count--;
43226+ atomic_dec(&port->count);
43227 port->blocked_open++;
43228 spin_unlock_irqrestore(&port->lock, flags);
43229
43230@@ -386,7 +386,7 @@ int tty_port_block_til_ready(struct tty_port *port,
43231 we must not mess that up further */
43232 spin_lock_irqsave(&port->lock, flags);
43233 if (!tty_hung_up_p(filp))
43234- port->count++;
43235+ atomic_inc(&port->count);
43236 port->blocked_open--;
43237 if (retval == 0)
43238 port->flags |= ASYNC_NORMAL_ACTIVE;
43239@@ -406,19 +406,19 @@ int tty_port_close_start(struct tty_port *port,
43240 return 0;
43241 }
43242
43243- if (tty->count == 1 && port->count != 1) {
43244+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
43245 printk(KERN_WARNING
43246 "tty_port_close_start: tty->count = 1 port count = %d.\n",
43247- port->count);
43248- port->count = 1;
43249+ atomic_read(&port->count));
43250+ atomic_set(&port->count, 1);
43251 }
43252- if (--port->count < 0) {
43253+ if (atomic_dec_return(&port->count) < 0) {
43254 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
43255- port->count);
43256- port->count = 0;
43257+ atomic_read(&port->count));
43258+ atomic_set(&port->count, 0);
43259 }
43260
43261- if (port->count) {
43262+ if (atomic_read(&port->count)) {
43263 spin_unlock_irqrestore(&port->lock, flags);
43264 if (port->ops->drop)
43265 port->ops->drop(port);
43266@@ -516,7 +516,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
43267 {
43268 spin_lock_irq(&port->lock);
43269 if (!tty_hung_up_p(filp))
43270- ++port->count;
43271+ atomic_inc(&port->count);
43272 spin_unlock_irq(&port->lock);
43273 tty_port_tty_set(port, tty);
43274
43275diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
43276index 681765b..d3ccdf2 100644
43277--- a/drivers/tty/vt/keyboard.c
43278+++ b/drivers/tty/vt/keyboard.c
43279@@ -660,6 +660,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
43280 kbd->kbdmode == VC_OFF) &&
43281 value != KVAL(K_SAK))
43282 return; /* SAK is allowed even in raw mode */
43283+
43284+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43285+ {
43286+ void *func = fn_handler[value];
43287+ if (func == fn_show_state || func == fn_show_ptregs ||
43288+ func == fn_show_mem)
43289+ return;
43290+ }
43291+#endif
43292+
43293 fn_handler[value](vc);
43294 }
43295
43296@@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
43297 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
43298 return -EFAULT;
43299
43300- if (!capable(CAP_SYS_TTY_CONFIG))
43301- perm = 0;
43302-
43303 switch (cmd) {
43304 case KDGKBENT:
43305 /* Ensure another thread doesn't free it under us */
43306@@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
43307 spin_unlock_irqrestore(&kbd_event_lock, flags);
43308 return put_user(val, &user_kbe->kb_value);
43309 case KDSKBENT:
43310+ if (!capable(CAP_SYS_TTY_CONFIG))
43311+ perm = 0;
43312+
43313 if (!perm)
43314 return -EPERM;
43315 if (!i && v == K_NOSUCHMAP) {
43316@@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
43317 int i, j, k;
43318 int ret;
43319
43320- if (!capable(CAP_SYS_TTY_CONFIG))
43321- perm = 0;
43322-
43323 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
43324 if (!kbs) {
43325 ret = -ENOMEM;
43326@@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
43327 kfree(kbs);
43328 return ((p && *p) ? -EOVERFLOW : 0);
43329 case KDSKBSENT:
43330+ if (!capable(CAP_SYS_TTY_CONFIG))
43331+ perm = 0;
43332+
43333 if (!perm) {
43334 ret = -EPERM;
43335 goto reterr;
43336diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
43337index 5110f36..8dc0a74 100644
43338--- a/drivers/uio/uio.c
43339+++ b/drivers/uio/uio.c
43340@@ -25,6 +25,7 @@
43341 #include <linux/kobject.h>
43342 #include <linux/cdev.h>
43343 #include <linux/uio_driver.h>
43344+#include <asm/local.h>
43345
43346 #define UIO_MAX_DEVICES (1U << MINORBITS)
43347
43348@@ -32,10 +33,10 @@ struct uio_device {
43349 struct module *owner;
43350 struct device *dev;
43351 int minor;
43352- atomic_t event;
43353+ atomic_unchecked_t event;
43354 struct fasync_struct *async_queue;
43355 wait_queue_head_t wait;
43356- int vma_count;
43357+ local_t vma_count;
43358 struct uio_info *info;
43359 struct kobject *map_dir;
43360 struct kobject *portio_dir;
43361@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
43362 struct device_attribute *attr, char *buf)
43363 {
43364 struct uio_device *idev = dev_get_drvdata(dev);
43365- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
43366+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
43367 }
43368
43369 static struct device_attribute uio_class_attributes[] = {
43370@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
43371 {
43372 struct uio_device *idev = info->uio_dev;
43373
43374- atomic_inc(&idev->event);
43375+ atomic_inc_unchecked(&idev->event);
43376 wake_up_interruptible(&idev->wait);
43377 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
43378 }
43379@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
43380 }
43381
43382 listener->dev = idev;
43383- listener->event_count = atomic_read(&idev->event);
43384+ listener->event_count = atomic_read_unchecked(&idev->event);
43385 filep->private_data = listener;
43386
43387 if (idev->info->open) {
43388@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
43389 return -EIO;
43390
43391 poll_wait(filep, &idev->wait, wait);
43392- if (listener->event_count != atomic_read(&idev->event))
43393+ if (listener->event_count != atomic_read_unchecked(&idev->event))
43394 return POLLIN | POLLRDNORM;
43395 return 0;
43396 }
43397@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
43398 do {
43399 set_current_state(TASK_INTERRUPTIBLE);
43400
43401- event_count = atomic_read(&idev->event);
43402+ event_count = atomic_read_unchecked(&idev->event);
43403 if (event_count != listener->event_count) {
43404 if (copy_to_user(buf, &event_count, count))
43405 retval = -EFAULT;
43406@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
43407 static void uio_vma_open(struct vm_area_struct *vma)
43408 {
43409 struct uio_device *idev = vma->vm_private_data;
43410- idev->vma_count++;
43411+ local_inc(&idev->vma_count);
43412 }
43413
43414 static void uio_vma_close(struct vm_area_struct *vma)
43415 {
43416 struct uio_device *idev = vma->vm_private_data;
43417- idev->vma_count--;
43418+ local_dec(&idev->vma_count);
43419 }
43420
43421 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
43422@@ -819,7 +820,7 @@ int __uio_register_device(struct module *owner,
43423 idev->owner = owner;
43424 idev->info = info;
43425 init_waitqueue_head(&idev->wait);
43426- atomic_set(&idev->event, 0);
43427+ atomic_set_unchecked(&idev->event, 0);
43428
43429 ret = uio_get_minor(idev);
43430 if (ret)
43431diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
43432index b7eb86a..36d28af 100644
43433--- a/drivers/usb/atm/cxacru.c
43434+++ b/drivers/usb/atm/cxacru.c
43435@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
43436 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
43437 if (ret < 2)
43438 return -EINVAL;
43439- if (index < 0 || index > 0x7f)
43440+ if (index > 0x7f)
43441 return -EINVAL;
43442 pos += tmp;
43443
43444diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
43445index 35f10bf..6a38a0b 100644
43446--- a/drivers/usb/atm/usbatm.c
43447+++ b/drivers/usb/atm/usbatm.c
43448@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43449 if (printk_ratelimit())
43450 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
43451 __func__, vpi, vci);
43452- atomic_inc(&vcc->stats->rx_err);
43453+ atomic_inc_unchecked(&vcc->stats->rx_err);
43454 return;
43455 }
43456
43457@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43458 if (length > ATM_MAX_AAL5_PDU) {
43459 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
43460 __func__, length, vcc);
43461- atomic_inc(&vcc->stats->rx_err);
43462+ atomic_inc_unchecked(&vcc->stats->rx_err);
43463 goto out;
43464 }
43465
43466@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43467 if (sarb->len < pdu_length) {
43468 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
43469 __func__, pdu_length, sarb->len, vcc);
43470- atomic_inc(&vcc->stats->rx_err);
43471+ atomic_inc_unchecked(&vcc->stats->rx_err);
43472 goto out;
43473 }
43474
43475 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
43476 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
43477 __func__, vcc);
43478- atomic_inc(&vcc->stats->rx_err);
43479+ atomic_inc_unchecked(&vcc->stats->rx_err);
43480 goto out;
43481 }
43482
43483@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43484 if (printk_ratelimit())
43485 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
43486 __func__, length);
43487- atomic_inc(&vcc->stats->rx_drop);
43488+ atomic_inc_unchecked(&vcc->stats->rx_drop);
43489 goto out;
43490 }
43491
43492@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43493
43494 vcc->push(vcc, skb);
43495
43496- atomic_inc(&vcc->stats->rx);
43497+ atomic_inc_unchecked(&vcc->stats->rx);
43498 out:
43499 skb_trim(sarb, 0);
43500 }
43501@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
43502 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
43503
43504 usbatm_pop(vcc, skb);
43505- atomic_inc(&vcc->stats->tx);
43506+ atomic_inc_unchecked(&vcc->stats->tx);
43507
43508 skb = skb_dequeue(&instance->sndqueue);
43509 }
43510@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
43511 if (!left--)
43512 return sprintf(page,
43513 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
43514- atomic_read(&atm_dev->stats.aal5.tx),
43515- atomic_read(&atm_dev->stats.aal5.tx_err),
43516- atomic_read(&atm_dev->stats.aal5.rx),
43517- atomic_read(&atm_dev->stats.aal5.rx_err),
43518- atomic_read(&atm_dev->stats.aal5.rx_drop));
43519+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
43520+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
43521+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
43522+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
43523+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
43524
43525 if (!left--) {
43526 if (instance->disconnected)
43527diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
43528index cbacea9..246cccd 100644
43529--- a/drivers/usb/core/devices.c
43530+++ b/drivers/usb/core/devices.c
43531@@ -126,7 +126,7 @@ static const char format_endpt[] =
43532 * time it gets called.
43533 */
43534 static struct device_connect_event {
43535- atomic_t count;
43536+ atomic_unchecked_t count;
43537 wait_queue_head_t wait;
43538 } device_event = {
43539 .count = ATOMIC_INIT(1),
43540@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
43541
43542 void usbfs_conn_disc_event(void)
43543 {
43544- atomic_add(2, &device_event.count);
43545+ atomic_add_unchecked(2, &device_event.count);
43546 wake_up(&device_event.wait);
43547 }
43548
43549@@ -645,7 +645,7 @@ static unsigned int usb_device_poll(struct file *file,
43550
43551 poll_wait(file, &device_event.wait, wait);
43552
43553- event_count = atomic_read(&device_event.count);
43554+ event_count = atomic_read_unchecked(&device_event.count);
43555 if (file->f_version != event_count) {
43556 file->f_version = event_count;
43557 return POLLIN | POLLRDNORM;
43558diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
43559index 8e64adf..9a33a3c 100644
43560--- a/drivers/usb/core/hcd.c
43561+++ b/drivers/usb/core/hcd.c
43562@@ -1522,7 +1522,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
43563 */
43564 usb_get_urb(urb);
43565 atomic_inc(&urb->use_count);
43566- atomic_inc(&urb->dev->urbnum);
43567+ atomic_inc_unchecked(&urb->dev->urbnum);
43568 usbmon_urb_submit(&hcd->self, urb);
43569
43570 /* NOTE requirements on root-hub callers (usbfs and the hub
43571@@ -1549,7 +1549,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
43572 urb->hcpriv = NULL;
43573 INIT_LIST_HEAD(&urb->urb_list);
43574 atomic_dec(&urb->use_count);
43575- atomic_dec(&urb->dev->urbnum);
43576+ atomic_dec_unchecked(&urb->dev->urbnum);
43577 if (atomic_read(&urb->reject))
43578 wake_up(&usb_kill_urb_queue);
43579 usb_put_urb(urb);
43580diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
43581index 818e4a0..0fc9589 100644
43582--- a/drivers/usb/core/sysfs.c
43583+++ b/drivers/usb/core/sysfs.c
43584@@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
43585 struct usb_device *udev;
43586
43587 udev = to_usb_device(dev);
43588- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
43589+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
43590 }
43591 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
43592
43593diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
43594index f81b925..78d22ec 100644
43595--- a/drivers/usb/core/usb.c
43596+++ b/drivers/usb/core/usb.c
43597@@ -388,7 +388,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
43598 set_dev_node(&dev->dev, dev_to_node(bus->controller));
43599 dev->state = USB_STATE_ATTACHED;
43600 dev->lpm_disable_count = 1;
43601- atomic_set(&dev->urbnum, 0);
43602+ atomic_set_unchecked(&dev->urbnum, 0);
43603
43604 INIT_LIST_HEAD(&dev->ep0.urb_list);
43605 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
43606diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
43607index 5e29dde..eca992f 100644
43608--- a/drivers/usb/early/ehci-dbgp.c
43609+++ b/drivers/usb/early/ehci-dbgp.c
43610@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
43611
43612 #ifdef CONFIG_KGDB
43613 static struct kgdb_io kgdbdbgp_io_ops;
43614-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
43615+static struct kgdb_io kgdbdbgp_io_ops_console;
43616+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
43617 #else
43618 #define dbgp_kgdb_mode (0)
43619 #endif
43620@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
43621 .write_char = kgdbdbgp_write_char,
43622 };
43623
43624+static struct kgdb_io kgdbdbgp_io_ops_console = {
43625+ .name = "kgdbdbgp",
43626+ .read_char = kgdbdbgp_read_char,
43627+ .write_char = kgdbdbgp_write_char,
43628+ .is_console = 1
43629+};
43630+
43631 static int kgdbdbgp_wait_time;
43632
43633 static int __init kgdbdbgp_parse_config(char *str)
43634@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
43635 ptr++;
43636 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
43637 }
43638- kgdb_register_io_module(&kgdbdbgp_io_ops);
43639- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
43640+ if (early_dbgp_console.index != -1)
43641+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
43642+ else
43643+ kgdb_register_io_module(&kgdbdbgp_io_ops);
43644
43645 return 0;
43646 }
43647diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
43648index 598dcc1..032dd4f 100644
43649--- a/drivers/usb/gadget/u_serial.c
43650+++ b/drivers/usb/gadget/u_serial.c
43651@@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
43652 spin_lock_irq(&port->port_lock);
43653
43654 /* already open? Great. */
43655- if (port->port.count) {
43656+ if (atomic_read(&port->port.count)) {
43657 status = 0;
43658- port->port.count++;
43659+ atomic_inc(&port->port.count);
43660
43661 /* currently opening/closing? wait ... */
43662 } else if (port->openclose) {
43663@@ -796,7 +796,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
43664 tty->driver_data = port;
43665 port->port.tty = tty;
43666
43667- port->port.count = 1;
43668+ atomic_set(&port->port.count, 1);
43669 port->openclose = false;
43670
43671 /* if connected, start the I/O stream */
43672@@ -838,11 +838,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
43673
43674 spin_lock_irq(&port->port_lock);
43675
43676- if (port->port.count != 1) {
43677- if (port->port.count == 0)
43678+ if (atomic_read(&port->port.count) != 1) {
43679+ if (atomic_read(&port->port.count) == 0)
43680 WARN_ON(1);
43681 else
43682- --port->port.count;
43683+ atomic_dec(&port->port.count);
43684 goto exit;
43685 }
43686
43687@@ -852,7 +852,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
43688 * and sleep if necessary
43689 */
43690 port->openclose = true;
43691- port->port.count = 0;
43692+ atomic_set(&port->port.count, 0);
43693
43694 gser = port->port_usb;
43695 if (gser && gser->disconnect)
43696@@ -1159,7 +1159,7 @@ static int gs_closed(struct gs_port *port)
43697 int cond;
43698
43699 spin_lock_irq(&port->port_lock);
43700- cond = (port->port.count == 0) && !port->openclose;
43701+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
43702 spin_unlock_irq(&port->port_lock);
43703 return cond;
43704 }
43705@@ -1273,7 +1273,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
43706 /* if it's already open, start I/O ... and notify the serial
43707 * protocol about open/close status (connect/disconnect).
43708 */
43709- if (port->port.count) {
43710+ if (atomic_read(&port->port.count)) {
43711 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
43712 gs_start_io(port);
43713 if (gser->connect)
43714@@ -1320,7 +1320,7 @@ void gserial_disconnect(struct gserial *gser)
43715
43716 port->port_usb = NULL;
43717 gser->ioport = NULL;
43718- if (port->port.count > 0 || port->openclose) {
43719+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
43720 wake_up_interruptible(&port->drain_wait);
43721 if (port->port.tty)
43722 tty_hangup(port->port.tty);
43723@@ -1336,7 +1336,7 @@ void gserial_disconnect(struct gserial *gser)
43724
43725 /* finally, free any unused/unusable I/O buffers */
43726 spin_lock_irqsave(&port->port_lock, flags);
43727- if (port->port.count == 0 && !port->openclose)
43728+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
43729 gs_buf_free(&port->port_write_buf);
43730 gs_free_requests(gser->out, &port->read_pool, NULL);
43731 gs_free_requests(gser->out, &port->read_queue, NULL);
43732diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
43733index 5f3bcd3..bfca43f 100644
43734--- a/drivers/usb/serial/console.c
43735+++ b/drivers/usb/serial/console.c
43736@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
43737
43738 info->port = port;
43739
43740- ++port->port.count;
43741+ atomic_inc(&port->port.count);
43742 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
43743 if (serial->type->set_termios) {
43744 /*
43745@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
43746 }
43747 /* Now that any required fake tty operations are completed restore
43748 * the tty port count */
43749- --port->port.count;
43750+ atomic_dec(&port->port.count);
43751 /* The console is special in terms of closing the device so
43752 * indicate this port is now acting as a system console. */
43753 port->port.console = 1;
43754@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
43755 free_tty:
43756 kfree(tty);
43757 reset_open_count:
43758- port->port.count = 0;
43759+ atomic_set(&port->port.count, 0);
43760 usb_autopm_put_interface(serial->interface);
43761 error_get_interface:
43762 usb_serial_put(serial);
43763diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
43764index 75f70f0..d467e1a 100644
43765--- a/drivers/usb/storage/usb.h
43766+++ b/drivers/usb/storage/usb.h
43767@@ -63,7 +63,7 @@ struct us_unusual_dev {
43768 __u8 useProtocol;
43769 __u8 useTransport;
43770 int (*initFunction)(struct us_data *);
43771-};
43772+} __do_const;
43773
43774
43775 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
43776diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
43777index d6bea3e..60b250e 100644
43778--- a/drivers/usb/wusbcore/wa-hc.h
43779+++ b/drivers/usb/wusbcore/wa-hc.h
43780@@ -192,7 +192,7 @@ struct wahc {
43781 struct list_head xfer_delayed_list;
43782 spinlock_t xfer_list_lock;
43783 struct work_struct xfer_work;
43784- atomic_t xfer_id_count;
43785+ atomic_unchecked_t xfer_id_count;
43786 };
43787
43788
43789@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
43790 INIT_LIST_HEAD(&wa->xfer_delayed_list);
43791 spin_lock_init(&wa->xfer_list_lock);
43792 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
43793- atomic_set(&wa->xfer_id_count, 1);
43794+ atomic_set_unchecked(&wa->xfer_id_count, 1);
43795 }
43796
43797 /**
43798diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
43799index 57c01ab..8a05959 100644
43800--- a/drivers/usb/wusbcore/wa-xfer.c
43801+++ b/drivers/usb/wusbcore/wa-xfer.c
43802@@ -296,7 +296,7 @@ out:
43803 */
43804 static void wa_xfer_id_init(struct wa_xfer *xfer)
43805 {
43806- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
43807+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
43808 }
43809
43810 /*
43811diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
43812index 8c55011..eed4ae1a 100644
43813--- a/drivers/video/aty/aty128fb.c
43814+++ b/drivers/video/aty/aty128fb.c
43815@@ -149,7 +149,7 @@ enum {
43816 };
43817
43818 /* Must match above enum */
43819-static char * const r128_family[] = {
43820+static const char * const r128_family[] = {
43821 "AGP",
43822 "PCI",
43823 "PRO AGP",
43824diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
43825index 4f27fdc..d3537e6 100644
43826--- a/drivers/video/aty/atyfb_base.c
43827+++ b/drivers/video/aty/atyfb_base.c
43828@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
43829 par->accel_flags = var->accel_flags; /* hack */
43830
43831 if (var->accel_flags) {
43832- info->fbops->fb_sync = atyfb_sync;
43833+ pax_open_kernel();
43834+ *(void **)&info->fbops->fb_sync = atyfb_sync;
43835+ pax_close_kernel();
43836 info->flags &= ~FBINFO_HWACCEL_DISABLED;
43837 } else {
43838- info->fbops->fb_sync = NULL;
43839+ pax_open_kernel();
43840+ *(void **)&info->fbops->fb_sync = NULL;
43841+ pax_close_kernel();
43842 info->flags |= FBINFO_HWACCEL_DISABLED;
43843 }
43844
43845diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
43846index 95ec042..ae33e7a 100644
43847--- a/drivers/video/aty/mach64_cursor.c
43848+++ b/drivers/video/aty/mach64_cursor.c
43849@@ -208,7 +208,9 @@ int aty_init_cursor(struct fb_info *info)
43850 info->sprite.buf_align = 16; /* and 64 lines tall. */
43851 info->sprite.flags = FB_PIXMAP_IO;
43852
43853- info->fbops->fb_cursor = atyfb_cursor;
43854+ pax_open_kernel();
43855+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
43856+ pax_close_kernel();
43857
43858 return 0;
43859 }
43860diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
43861index 6c5ed6b..b727c88 100644
43862--- a/drivers/video/backlight/kb3886_bl.c
43863+++ b/drivers/video/backlight/kb3886_bl.c
43864@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
43865 static unsigned long kb3886bl_flags;
43866 #define KB3886BL_SUSPENDED 0x01
43867
43868-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
43869+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
43870 {
43871 .ident = "Sahara Touch-iT",
43872 .matches = {
43873diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
43874index 88cad6b..dd746c7 100644
43875--- a/drivers/video/fb_defio.c
43876+++ b/drivers/video/fb_defio.c
43877@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
43878
43879 BUG_ON(!fbdefio);
43880 mutex_init(&fbdefio->lock);
43881- info->fbops->fb_mmap = fb_deferred_io_mmap;
43882+ pax_open_kernel();
43883+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
43884+ pax_close_kernel();
43885 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
43886 INIT_LIST_HEAD(&fbdefio->pagelist);
43887 if (fbdefio->delay == 0) /* set a default of 1 s */
43888@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
43889 page->mapping = NULL;
43890 }
43891
43892- info->fbops->fb_mmap = NULL;
43893+ *(void **)&info->fbops->fb_mmap = NULL;
43894 mutex_destroy(&fbdefio->lock);
43895 }
43896 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
43897diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
43898index 5c3960d..15cf8fc 100644
43899--- a/drivers/video/fbcmap.c
43900+++ b/drivers/video/fbcmap.c
43901@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
43902 rc = -ENODEV;
43903 goto out;
43904 }
43905- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
43906- !info->fbops->fb_setcmap)) {
43907+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
43908 rc = -EINVAL;
43909 goto out1;
43910 }
43911diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43912index dc61c12..e29796e 100644
43913--- a/drivers/video/fbmem.c
43914+++ b/drivers/video/fbmem.c
43915@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43916 image->dx += image->width + 8;
43917 }
43918 } else if (rotate == FB_ROTATE_UD) {
43919- for (x = 0; x < num && image->dx >= 0; x++) {
43920+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43921 info->fbops->fb_imageblit(info, image);
43922 image->dx -= image->width + 8;
43923 }
43924@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43925 image->dy += image->height + 8;
43926 }
43927 } else if (rotate == FB_ROTATE_CCW) {
43928- for (x = 0; x < num && image->dy >= 0; x++) {
43929+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43930 info->fbops->fb_imageblit(info, image);
43931 image->dy -= image->height + 8;
43932 }
43933@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43934 return -EFAULT;
43935 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43936 return -EINVAL;
43937- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43938+ if (con2fb.framebuffer >= FB_MAX)
43939 return -EINVAL;
43940 if (!registered_fb[con2fb.framebuffer])
43941 request_module("fb%d", con2fb.framebuffer);
43942diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43943index 7672d2e..b56437f 100644
43944--- a/drivers/video/i810/i810_accel.c
43945+++ b/drivers/video/i810/i810_accel.c
43946@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43947 }
43948 }
43949 printk("ringbuffer lockup!!!\n");
43950+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43951 i810_report_error(mmio);
43952 par->dev_flags |= LOCKUP;
43953 info->pixmap.scan_align = 1;
43954diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43955index 3c14e43..eafa544 100644
43956--- a/drivers/video/logo/logo_linux_clut224.ppm
43957+++ b/drivers/video/logo/logo_linux_clut224.ppm
43958@@ -1,1604 +1,1123 @@
43959 P3
43960-# Standard 224-color Linux logo
43961 80 80
43962 255
43963- 0 0 0 0 0 0 0 0 0 0 0 0
43964- 0 0 0 0 0 0 0 0 0 0 0 0
43965- 0 0 0 0 0 0 0 0 0 0 0 0
43966- 0 0 0 0 0 0 0 0 0 0 0 0
43967- 0 0 0 0 0 0 0 0 0 0 0 0
43968- 0 0 0 0 0 0 0 0 0 0 0 0
43969- 0 0 0 0 0 0 0 0 0 0 0 0
43970- 0 0 0 0 0 0 0 0 0 0 0 0
43971- 0 0 0 0 0 0 0 0 0 0 0 0
43972- 6 6 6 6 6 6 10 10 10 10 10 10
43973- 10 10 10 6 6 6 6 6 6 6 6 6
43974- 0 0 0 0 0 0 0 0 0 0 0 0
43975- 0 0 0 0 0 0 0 0 0 0 0 0
43976- 0 0 0 0 0 0 0 0 0 0 0 0
43977- 0 0 0 0 0 0 0 0 0 0 0 0
43978- 0 0 0 0 0 0 0 0 0 0 0 0
43979- 0 0 0 0 0 0 0 0 0 0 0 0
43980- 0 0 0 0 0 0 0 0 0 0 0 0
43981- 0 0 0 0 0 0 0 0 0 0 0 0
43982- 0 0 0 0 0 0 0 0 0 0 0 0
43983- 0 0 0 0 0 0 0 0 0 0 0 0
43984- 0 0 0 0 0 0 0 0 0 0 0 0
43985- 0 0 0 0 0 0 0 0 0 0 0 0
43986- 0 0 0 0 0 0 0 0 0 0 0 0
43987- 0 0 0 0 0 0 0 0 0 0 0 0
43988- 0 0 0 0 0 0 0 0 0 0 0 0
43989- 0 0 0 0 0 0 0 0 0 0 0 0
43990- 0 0 0 0 0 0 0 0 0 0 0 0
43991- 0 0 0 6 6 6 10 10 10 14 14 14
43992- 22 22 22 26 26 26 30 30 30 34 34 34
43993- 30 30 30 30 30 30 26 26 26 18 18 18
43994- 14 14 14 10 10 10 6 6 6 0 0 0
43995- 0 0 0 0 0 0 0 0 0 0 0 0
43996- 0 0 0 0 0 0 0 0 0 0 0 0
43997- 0 0 0 0 0 0 0 0 0 0 0 0
43998- 0 0 0 0 0 0 0 0 0 0 0 0
43999- 0 0 0 0 0 0 0 0 0 0 0 0
44000- 0 0 0 0 0 0 0 0 0 0 0 0
44001- 0 0 0 0 0 0 0 0 0 0 0 0
44002- 0 0 0 0 0 0 0 0 0 0 0 0
44003- 0 0 0 0 0 0 0 0 0 0 0 0
44004- 0 0 0 0 0 1 0 0 1 0 0 0
44005- 0 0 0 0 0 0 0 0 0 0 0 0
44006- 0 0 0 0 0 0 0 0 0 0 0 0
44007- 0 0 0 0 0 0 0 0 0 0 0 0
44008- 0 0 0 0 0 0 0 0 0 0 0 0
44009- 0 0 0 0 0 0 0 0 0 0 0 0
44010- 0 0 0 0 0 0 0 0 0 0 0 0
44011- 6 6 6 14 14 14 26 26 26 42 42 42
44012- 54 54 54 66 66 66 78 78 78 78 78 78
44013- 78 78 78 74 74 74 66 66 66 54 54 54
44014- 42 42 42 26 26 26 18 18 18 10 10 10
44015- 6 6 6 0 0 0 0 0 0 0 0 0
44016- 0 0 0 0 0 0 0 0 0 0 0 0
44017- 0 0 0 0 0 0 0 0 0 0 0 0
44018- 0 0 0 0 0 0 0 0 0 0 0 0
44019- 0 0 0 0 0 0 0 0 0 0 0 0
44020- 0 0 0 0 0 0 0 0 0 0 0 0
44021- 0 0 0 0 0 0 0 0 0 0 0 0
44022- 0 0 0 0 0 0 0 0 0 0 0 0
44023- 0 0 0 0 0 0 0 0 0 0 0 0
44024- 0 0 1 0 0 0 0 0 0 0 0 0
44025- 0 0 0 0 0 0 0 0 0 0 0 0
44026- 0 0 0 0 0 0 0 0 0 0 0 0
44027- 0 0 0 0 0 0 0 0 0 0 0 0
44028- 0 0 0 0 0 0 0 0 0 0 0 0
44029- 0 0 0 0 0 0 0 0 0 0 0 0
44030- 0 0 0 0 0 0 0 0 0 10 10 10
44031- 22 22 22 42 42 42 66 66 66 86 86 86
44032- 66 66 66 38 38 38 38 38 38 22 22 22
44033- 26 26 26 34 34 34 54 54 54 66 66 66
44034- 86 86 86 70 70 70 46 46 46 26 26 26
44035- 14 14 14 6 6 6 0 0 0 0 0 0
44036- 0 0 0 0 0 0 0 0 0 0 0 0
44037- 0 0 0 0 0 0 0 0 0 0 0 0
44038- 0 0 0 0 0 0 0 0 0 0 0 0
44039- 0 0 0 0 0 0 0 0 0 0 0 0
44040- 0 0 0 0 0 0 0 0 0 0 0 0
44041- 0 0 0 0 0 0 0 0 0 0 0 0
44042- 0 0 0 0 0 0 0 0 0 0 0 0
44043- 0 0 0 0 0 0 0 0 0 0 0 0
44044- 0 0 1 0 0 1 0 0 1 0 0 0
44045- 0 0 0 0 0 0 0 0 0 0 0 0
44046- 0 0 0 0 0 0 0 0 0 0 0 0
44047- 0 0 0 0 0 0 0 0 0 0 0 0
44048- 0 0 0 0 0 0 0 0 0 0 0 0
44049- 0 0 0 0 0 0 0 0 0 0 0 0
44050- 0 0 0 0 0 0 10 10 10 26 26 26
44051- 50 50 50 82 82 82 58 58 58 6 6 6
44052- 2 2 6 2 2 6 2 2 6 2 2 6
44053- 2 2 6 2 2 6 2 2 6 2 2 6
44054- 6 6 6 54 54 54 86 86 86 66 66 66
44055- 38 38 38 18 18 18 6 6 6 0 0 0
44056- 0 0 0 0 0 0 0 0 0 0 0 0
44057- 0 0 0 0 0 0 0 0 0 0 0 0
44058- 0 0 0 0 0 0 0 0 0 0 0 0
44059- 0 0 0 0 0 0 0 0 0 0 0 0
44060- 0 0 0 0 0 0 0 0 0 0 0 0
44061- 0 0 0 0 0 0 0 0 0 0 0 0
44062- 0 0 0 0 0 0 0 0 0 0 0 0
44063- 0 0 0 0 0 0 0 0 0 0 0 0
44064- 0 0 0 0 0 0 0 0 0 0 0 0
44065- 0 0 0 0 0 0 0 0 0 0 0 0
44066- 0 0 0 0 0 0 0 0 0 0 0 0
44067- 0 0 0 0 0 0 0 0 0 0 0 0
44068- 0 0 0 0 0 0 0 0 0 0 0 0
44069- 0 0 0 0 0 0 0 0 0 0 0 0
44070- 0 0 0 6 6 6 22 22 22 50 50 50
44071- 78 78 78 34 34 34 2 2 6 2 2 6
44072- 2 2 6 2 2 6 2 2 6 2 2 6
44073- 2 2 6 2 2 6 2 2 6 2 2 6
44074- 2 2 6 2 2 6 6 6 6 70 70 70
44075- 78 78 78 46 46 46 22 22 22 6 6 6
44076- 0 0 0 0 0 0 0 0 0 0 0 0
44077- 0 0 0 0 0 0 0 0 0 0 0 0
44078- 0 0 0 0 0 0 0 0 0 0 0 0
44079- 0 0 0 0 0 0 0 0 0 0 0 0
44080- 0 0 0 0 0 0 0 0 0 0 0 0
44081- 0 0 0 0 0 0 0 0 0 0 0 0
44082- 0 0 0 0 0 0 0 0 0 0 0 0
44083- 0 0 0 0 0 0 0 0 0 0 0 0
44084- 0 0 1 0 0 1 0 0 1 0 0 0
44085- 0 0 0 0 0 0 0 0 0 0 0 0
44086- 0 0 0 0 0 0 0 0 0 0 0 0
44087- 0 0 0 0 0 0 0 0 0 0 0 0
44088- 0 0 0 0 0 0 0 0 0 0 0 0
44089- 0 0 0 0 0 0 0 0 0 0 0 0
44090- 6 6 6 18 18 18 42 42 42 82 82 82
44091- 26 26 26 2 2 6 2 2 6 2 2 6
44092- 2 2 6 2 2 6 2 2 6 2 2 6
44093- 2 2 6 2 2 6 2 2 6 14 14 14
44094- 46 46 46 34 34 34 6 6 6 2 2 6
44095- 42 42 42 78 78 78 42 42 42 18 18 18
44096- 6 6 6 0 0 0 0 0 0 0 0 0
44097- 0 0 0 0 0 0 0 0 0 0 0 0
44098- 0 0 0 0 0 0 0 0 0 0 0 0
44099- 0 0 0 0 0 0 0 0 0 0 0 0
44100- 0 0 0 0 0 0 0 0 0 0 0 0
44101- 0 0 0 0 0 0 0 0 0 0 0 0
44102- 0 0 0 0 0 0 0 0 0 0 0 0
44103- 0 0 0 0 0 0 0 0 0 0 0 0
44104- 0 0 1 0 0 0 0 0 1 0 0 0
44105- 0 0 0 0 0 0 0 0 0 0 0 0
44106- 0 0 0 0 0 0 0 0 0 0 0 0
44107- 0 0 0 0 0 0 0 0 0 0 0 0
44108- 0 0 0 0 0 0 0 0 0 0 0 0
44109- 0 0 0 0 0 0 0 0 0 0 0 0
44110- 10 10 10 30 30 30 66 66 66 58 58 58
44111- 2 2 6 2 2 6 2 2 6 2 2 6
44112- 2 2 6 2 2 6 2 2 6 2 2 6
44113- 2 2 6 2 2 6 2 2 6 26 26 26
44114- 86 86 86 101 101 101 46 46 46 10 10 10
44115- 2 2 6 58 58 58 70 70 70 34 34 34
44116- 10 10 10 0 0 0 0 0 0 0 0 0
44117- 0 0 0 0 0 0 0 0 0 0 0 0
44118- 0 0 0 0 0 0 0 0 0 0 0 0
44119- 0 0 0 0 0 0 0 0 0 0 0 0
44120- 0 0 0 0 0 0 0 0 0 0 0 0
44121- 0 0 0 0 0 0 0 0 0 0 0 0
44122- 0 0 0 0 0 0 0 0 0 0 0 0
44123- 0 0 0 0 0 0 0 0 0 0 0 0
44124- 0 0 1 0 0 1 0 0 1 0 0 0
44125- 0 0 0 0 0 0 0 0 0 0 0 0
44126- 0 0 0 0 0 0 0 0 0 0 0 0
44127- 0 0 0 0 0 0 0 0 0 0 0 0
44128- 0 0 0 0 0 0 0 0 0 0 0 0
44129- 0 0 0 0 0 0 0 0 0 0 0 0
44130- 14 14 14 42 42 42 86 86 86 10 10 10
44131- 2 2 6 2 2 6 2 2 6 2 2 6
44132- 2 2 6 2 2 6 2 2 6 2 2 6
44133- 2 2 6 2 2 6 2 2 6 30 30 30
44134- 94 94 94 94 94 94 58 58 58 26 26 26
44135- 2 2 6 6 6 6 78 78 78 54 54 54
44136- 22 22 22 6 6 6 0 0 0 0 0 0
44137- 0 0 0 0 0 0 0 0 0 0 0 0
44138- 0 0 0 0 0 0 0 0 0 0 0 0
44139- 0 0 0 0 0 0 0 0 0 0 0 0
44140- 0 0 0 0 0 0 0 0 0 0 0 0
44141- 0 0 0 0 0 0 0 0 0 0 0 0
44142- 0 0 0 0 0 0 0 0 0 0 0 0
44143- 0 0 0 0 0 0 0 0 0 0 0 0
44144- 0 0 0 0 0 0 0 0 0 0 0 0
44145- 0 0 0 0 0 0 0 0 0 0 0 0
44146- 0 0 0 0 0 0 0 0 0 0 0 0
44147- 0 0 0 0 0 0 0 0 0 0 0 0
44148- 0 0 0 0 0 0 0 0 0 0 0 0
44149- 0 0 0 0 0 0 0 0 0 6 6 6
44150- 22 22 22 62 62 62 62 62 62 2 2 6
44151- 2 2 6 2 2 6 2 2 6 2 2 6
44152- 2 2 6 2 2 6 2 2 6 2 2 6
44153- 2 2 6 2 2 6 2 2 6 26 26 26
44154- 54 54 54 38 38 38 18 18 18 10 10 10
44155- 2 2 6 2 2 6 34 34 34 82 82 82
44156- 38 38 38 14 14 14 0 0 0 0 0 0
44157- 0 0 0 0 0 0 0 0 0 0 0 0
44158- 0 0 0 0 0 0 0 0 0 0 0 0
44159- 0 0 0 0 0 0 0 0 0 0 0 0
44160- 0 0 0 0 0 0 0 0 0 0 0 0
44161- 0 0 0 0 0 0 0 0 0 0 0 0
44162- 0 0 0 0 0 0 0 0 0 0 0 0
44163- 0 0 0 0 0 0 0 0 0 0 0 0
44164- 0 0 0 0 0 1 0 0 1 0 0 0
44165- 0 0 0 0 0 0 0 0 0 0 0 0
44166- 0 0 0 0 0 0 0 0 0 0 0 0
44167- 0 0 0 0 0 0 0 0 0 0 0 0
44168- 0 0 0 0 0 0 0 0 0 0 0 0
44169- 0 0 0 0 0 0 0 0 0 6 6 6
44170- 30 30 30 78 78 78 30 30 30 2 2 6
44171- 2 2 6 2 2 6 2 2 6 2 2 6
44172- 2 2 6 2 2 6 2 2 6 2 2 6
44173- 2 2 6 2 2 6 2 2 6 10 10 10
44174- 10 10 10 2 2 6 2 2 6 2 2 6
44175- 2 2 6 2 2 6 2 2 6 78 78 78
44176- 50 50 50 18 18 18 6 6 6 0 0 0
44177- 0 0 0 0 0 0 0 0 0 0 0 0
44178- 0 0 0 0 0 0 0 0 0 0 0 0
44179- 0 0 0 0 0 0 0 0 0 0 0 0
44180- 0 0 0 0 0 0 0 0 0 0 0 0
44181- 0 0 0 0 0 0 0 0 0 0 0 0
44182- 0 0 0 0 0 0 0 0 0 0 0 0
44183- 0 0 0 0 0 0 0 0 0 0 0 0
44184- 0 0 1 0 0 0 0 0 0 0 0 0
44185- 0 0 0 0 0 0 0 0 0 0 0 0
44186- 0 0 0 0 0 0 0 0 0 0 0 0
44187- 0 0 0 0 0 0 0 0 0 0 0 0
44188- 0 0 0 0 0 0 0 0 0 0 0 0
44189- 0 0 0 0 0 0 0 0 0 10 10 10
44190- 38 38 38 86 86 86 14 14 14 2 2 6
44191- 2 2 6 2 2 6 2 2 6 2 2 6
44192- 2 2 6 2 2 6 2 2 6 2 2 6
44193- 2 2 6 2 2 6 2 2 6 2 2 6
44194- 2 2 6 2 2 6 2 2 6 2 2 6
44195- 2 2 6 2 2 6 2 2 6 54 54 54
44196- 66 66 66 26 26 26 6 6 6 0 0 0
44197- 0 0 0 0 0 0 0 0 0 0 0 0
44198- 0 0 0 0 0 0 0 0 0 0 0 0
44199- 0 0 0 0 0 0 0 0 0 0 0 0
44200- 0 0 0 0 0 0 0 0 0 0 0 0
44201- 0 0 0 0 0 0 0 0 0 0 0 0
44202- 0 0 0 0 0 0 0 0 0 0 0 0
44203- 0 0 0 0 0 0 0 0 0 0 0 0
44204- 0 0 0 0 0 1 0 0 1 0 0 0
44205- 0 0 0 0 0 0 0 0 0 0 0 0
44206- 0 0 0 0 0 0 0 0 0 0 0 0
44207- 0 0 0 0 0 0 0 0 0 0 0 0
44208- 0 0 0 0 0 0 0 0 0 0 0 0
44209- 0 0 0 0 0 0 0 0 0 14 14 14
44210- 42 42 42 82 82 82 2 2 6 2 2 6
44211- 2 2 6 6 6 6 10 10 10 2 2 6
44212- 2 2 6 2 2 6 2 2 6 2 2 6
44213- 2 2 6 2 2 6 2 2 6 6 6 6
44214- 14 14 14 10 10 10 2 2 6 2 2 6
44215- 2 2 6 2 2 6 2 2 6 18 18 18
44216- 82 82 82 34 34 34 10 10 10 0 0 0
44217- 0 0 0 0 0 0 0 0 0 0 0 0
44218- 0 0 0 0 0 0 0 0 0 0 0 0
44219- 0 0 0 0 0 0 0 0 0 0 0 0
44220- 0 0 0 0 0 0 0 0 0 0 0 0
44221- 0 0 0 0 0 0 0 0 0 0 0 0
44222- 0 0 0 0 0 0 0 0 0 0 0 0
44223- 0 0 0 0 0 0 0 0 0 0 0 0
44224- 0 0 1 0 0 0 0 0 0 0 0 0
44225- 0 0 0 0 0 0 0 0 0 0 0 0
44226- 0 0 0 0 0 0 0 0 0 0 0 0
44227- 0 0 0 0 0 0 0 0 0 0 0 0
44228- 0 0 0 0 0 0 0 0 0 0 0 0
44229- 0 0 0 0 0 0 0 0 0 14 14 14
44230- 46 46 46 86 86 86 2 2 6 2 2 6
44231- 6 6 6 6 6 6 22 22 22 34 34 34
44232- 6 6 6 2 2 6 2 2 6 2 2 6
44233- 2 2 6 2 2 6 18 18 18 34 34 34
44234- 10 10 10 50 50 50 22 22 22 2 2 6
44235- 2 2 6 2 2 6 2 2 6 10 10 10
44236- 86 86 86 42 42 42 14 14 14 0 0 0
44237- 0 0 0 0 0 0 0 0 0 0 0 0
44238- 0 0 0 0 0 0 0 0 0 0 0 0
44239- 0 0 0 0 0 0 0 0 0 0 0 0
44240- 0 0 0 0 0 0 0 0 0 0 0 0
44241- 0 0 0 0 0 0 0 0 0 0 0 0
44242- 0 0 0 0 0 0 0 0 0 0 0 0
44243- 0 0 0 0 0 0 0 0 0 0 0 0
44244- 0 0 1 0 0 1 0 0 1 0 0 0
44245- 0 0 0 0 0 0 0 0 0 0 0 0
44246- 0 0 0 0 0 0 0 0 0 0 0 0
44247- 0 0 0 0 0 0 0 0 0 0 0 0
44248- 0 0 0 0 0 0 0 0 0 0 0 0
44249- 0 0 0 0 0 0 0 0 0 14 14 14
44250- 46 46 46 86 86 86 2 2 6 2 2 6
44251- 38 38 38 116 116 116 94 94 94 22 22 22
44252- 22 22 22 2 2 6 2 2 6 2 2 6
44253- 14 14 14 86 86 86 138 138 138 162 162 162
44254-154 154 154 38 38 38 26 26 26 6 6 6
44255- 2 2 6 2 2 6 2 2 6 2 2 6
44256- 86 86 86 46 46 46 14 14 14 0 0 0
44257- 0 0 0 0 0 0 0 0 0 0 0 0
44258- 0 0 0 0 0 0 0 0 0 0 0 0
44259- 0 0 0 0 0 0 0 0 0 0 0 0
44260- 0 0 0 0 0 0 0 0 0 0 0 0
44261- 0 0 0 0 0 0 0 0 0 0 0 0
44262- 0 0 0 0 0 0 0 0 0 0 0 0
44263- 0 0 0 0 0 0 0 0 0 0 0 0
44264- 0 0 0 0 0 0 0 0 0 0 0 0
44265- 0 0 0 0 0 0 0 0 0 0 0 0
44266- 0 0 0 0 0 0 0 0 0 0 0 0
44267- 0 0 0 0 0 0 0 0 0 0 0 0
44268- 0 0 0 0 0 0 0 0 0 0 0 0
44269- 0 0 0 0 0 0 0 0 0 14 14 14
44270- 46 46 46 86 86 86 2 2 6 14 14 14
44271-134 134 134 198 198 198 195 195 195 116 116 116
44272- 10 10 10 2 2 6 2 2 6 6 6 6
44273-101 98 89 187 187 187 210 210 210 218 218 218
44274-214 214 214 134 134 134 14 14 14 6 6 6
44275- 2 2 6 2 2 6 2 2 6 2 2 6
44276- 86 86 86 50 50 50 18 18 18 6 6 6
44277- 0 0 0 0 0 0 0 0 0 0 0 0
44278- 0 0 0 0 0 0 0 0 0 0 0 0
44279- 0 0 0 0 0 0 0 0 0 0 0 0
44280- 0 0 0 0 0 0 0 0 0 0 0 0
44281- 0 0 0 0 0 0 0 0 0 0 0 0
44282- 0 0 0 0 0 0 0 0 0 0 0 0
44283- 0 0 0 0 0 0 0 0 1 0 0 0
44284- 0 0 1 0 0 1 0 0 1 0 0 0
44285- 0 0 0 0 0 0 0 0 0 0 0 0
44286- 0 0 0 0 0 0 0 0 0 0 0 0
44287- 0 0 0 0 0 0 0 0 0 0 0 0
44288- 0 0 0 0 0 0 0 0 0 0 0 0
44289- 0 0 0 0 0 0 0 0 0 14 14 14
44290- 46 46 46 86 86 86 2 2 6 54 54 54
44291-218 218 218 195 195 195 226 226 226 246 246 246
44292- 58 58 58 2 2 6 2 2 6 30 30 30
44293-210 210 210 253 253 253 174 174 174 123 123 123
44294-221 221 221 234 234 234 74 74 74 2 2 6
44295- 2 2 6 2 2 6 2 2 6 2 2 6
44296- 70 70 70 58 58 58 22 22 22 6 6 6
44297- 0 0 0 0 0 0 0 0 0 0 0 0
44298- 0 0 0 0 0 0 0 0 0 0 0 0
44299- 0 0 0 0 0 0 0 0 0 0 0 0
44300- 0 0 0 0 0 0 0 0 0 0 0 0
44301- 0 0 0 0 0 0 0 0 0 0 0 0
44302- 0 0 0 0 0 0 0 0 0 0 0 0
44303- 0 0 0 0 0 0 0 0 0 0 0 0
44304- 0 0 0 0 0 0 0 0 0 0 0 0
44305- 0 0 0 0 0 0 0 0 0 0 0 0
44306- 0 0 0 0 0 0 0 0 0 0 0 0
44307- 0 0 0 0 0 0 0 0 0 0 0 0
44308- 0 0 0 0 0 0 0 0 0 0 0 0
44309- 0 0 0 0 0 0 0 0 0 14 14 14
44310- 46 46 46 82 82 82 2 2 6 106 106 106
44311-170 170 170 26 26 26 86 86 86 226 226 226
44312-123 123 123 10 10 10 14 14 14 46 46 46
44313-231 231 231 190 190 190 6 6 6 70 70 70
44314- 90 90 90 238 238 238 158 158 158 2 2 6
44315- 2 2 6 2 2 6 2 2 6 2 2 6
44316- 70 70 70 58 58 58 22 22 22 6 6 6
44317- 0 0 0 0 0 0 0 0 0 0 0 0
44318- 0 0 0 0 0 0 0 0 0 0 0 0
44319- 0 0 0 0 0 0 0 0 0 0 0 0
44320- 0 0 0 0 0 0 0 0 0 0 0 0
44321- 0 0 0 0 0 0 0 0 0 0 0 0
44322- 0 0 0 0 0 0 0 0 0 0 0 0
44323- 0 0 0 0 0 0 0 0 1 0 0 0
44324- 0 0 1 0 0 1 0 0 1 0 0 0
44325- 0 0 0 0 0 0 0 0 0 0 0 0
44326- 0 0 0 0 0 0 0 0 0 0 0 0
44327- 0 0 0 0 0 0 0 0 0 0 0 0
44328- 0 0 0 0 0 0 0 0 0 0 0 0
44329- 0 0 0 0 0 0 0 0 0 14 14 14
44330- 42 42 42 86 86 86 6 6 6 116 116 116
44331-106 106 106 6 6 6 70 70 70 149 149 149
44332-128 128 128 18 18 18 38 38 38 54 54 54
44333-221 221 221 106 106 106 2 2 6 14 14 14
44334- 46 46 46 190 190 190 198 198 198 2 2 6
44335- 2 2 6 2 2 6 2 2 6 2 2 6
44336- 74 74 74 62 62 62 22 22 22 6 6 6
44337- 0 0 0 0 0 0 0 0 0 0 0 0
44338- 0 0 0 0 0 0 0 0 0 0 0 0
44339- 0 0 0 0 0 0 0 0 0 0 0 0
44340- 0 0 0 0 0 0 0 0 0 0 0 0
44341- 0 0 0 0 0 0 0 0 0 0 0 0
44342- 0 0 0 0 0 0 0 0 0 0 0 0
44343- 0 0 0 0 0 0 0 0 1 0 0 0
44344- 0 0 1 0 0 0 0 0 1 0 0 0
44345- 0 0 0 0 0 0 0 0 0 0 0 0
44346- 0 0 0 0 0 0 0 0 0 0 0 0
44347- 0 0 0 0 0 0 0 0 0 0 0 0
44348- 0 0 0 0 0 0 0 0 0 0 0 0
44349- 0 0 0 0 0 0 0 0 0 14 14 14
44350- 42 42 42 94 94 94 14 14 14 101 101 101
44351-128 128 128 2 2 6 18 18 18 116 116 116
44352-118 98 46 121 92 8 121 92 8 98 78 10
44353-162 162 162 106 106 106 2 2 6 2 2 6
44354- 2 2 6 195 195 195 195 195 195 6 6 6
44355- 2 2 6 2 2 6 2 2 6 2 2 6
44356- 74 74 74 62 62 62 22 22 22 6 6 6
44357- 0 0 0 0 0 0 0 0 0 0 0 0
44358- 0 0 0 0 0 0 0 0 0 0 0 0
44359- 0 0 0 0 0 0 0 0 0 0 0 0
44360- 0 0 0 0 0 0 0 0 0 0 0 0
44361- 0 0 0 0 0 0 0 0 0 0 0 0
44362- 0 0 0 0 0 0 0 0 0 0 0 0
44363- 0 0 0 0 0 0 0 0 1 0 0 1
44364- 0 0 1 0 0 0 0 0 1 0 0 0
44365- 0 0 0 0 0 0 0 0 0 0 0 0
44366- 0 0 0 0 0 0 0 0 0 0 0 0
44367- 0 0 0 0 0 0 0 0 0 0 0 0
44368- 0 0 0 0 0 0 0 0 0 0 0 0
44369- 0 0 0 0 0 0 0 0 0 10 10 10
44370- 38 38 38 90 90 90 14 14 14 58 58 58
44371-210 210 210 26 26 26 54 38 6 154 114 10
44372-226 170 11 236 186 11 225 175 15 184 144 12
44373-215 174 15 175 146 61 37 26 9 2 2 6
44374- 70 70 70 246 246 246 138 138 138 2 2 6
44375- 2 2 6 2 2 6 2 2 6 2 2 6
44376- 70 70 70 66 66 66 26 26 26 6 6 6
44377- 0 0 0 0 0 0 0 0 0 0 0 0
44378- 0 0 0 0 0 0 0 0 0 0 0 0
44379- 0 0 0 0 0 0 0 0 0 0 0 0
44380- 0 0 0 0 0 0 0 0 0 0 0 0
44381- 0 0 0 0 0 0 0 0 0 0 0 0
44382- 0 0 0 0 0 0 0 0 0 0 0 0
44383- 0 0 0 0 0 0 0 0 0 0 0 0
44384- 0 0 0 0 0 0 0 0 0 0 0 0
44385- 0 0 0 0 0 0 0 0 0 0 0 0
44386- 0 0 0 0 0 0 0 0 0 0 0 0
44387- 0 0 0 0 0 0 0 0 0 0 0 0
44388- 0 0 0 0 0 0 0 0 0 0 0 0
44389- 0 0 0 0 0 0 0 0 0 10 10 10
44390- 38 38 38 86 86 86 14 14 14 10 10 10
44391-195 195 195 188 164 115 192 133 9 225 175 15
44392-239 182 13 234 190 10 232 195 16 232 200 30
44393-245 207 45 241 208 19 232 195 16 184 144 12
44394-218 194 134 211 206 186 42 42 42 2 2 6
44395- 2 2 6 2 2 6 2 2 6 2 2 6
44396- 50 50 50 74 74 74 30 30 30 6 6 6
44397- 0 0 0 0 0 0 0 0 0 0 0 0
44398- 0 0 0 0 0 0 0 0 0 0 0 0
44399- 0 0 0 0 0 0 0 0 0 0 0 0
44400- 0 0 0 0 0 0 0 0 0 0 0 0
44401- 0 0 0 0 0 0 0 0 0 0 0 0
44402- 0 0 0 0 0 0 0 0 0 0 0 0
44403- 0 0 0 0 0 0 0 0 0 0 0 0
44404- 0 0 0 0 0 0 0 0 0 0 0 0
44405- 0 0 0 0 0 0 0 0 0 0 0 0
44406- 0 0 0 0 0 0 0 0 0 0 0 0
44407- 0 0 0 0 0 0 0 0 0 0 0 0
44408- 0 0 0 0 0 0 0 0 0 0 0 0
44409- 0 0 0 0 0 0 0 0 0 10 10 10
44410- 34 34 34 86 86 86 14 14 14 2 2 6
44411-121 87 25 192 133 9 219 162 10 239 182 13
44412-236 186 11 232 195 16 241 208 19 244 214 54
44413-246 218 60 246 218 38 246 215 20 241 208 19
44414-241 208 19 226 184 13 121 87 25 2 2 6
44415- 2 2 6 2 2 6 2 2 6 2 2 6
44416- 50 50 50 82 82 82 34 34 34 10 10 10
44417- 0 0 0 0 0 0 0 0 0 0 0 0
44418- 0 0 0 0 0 0 0 0 0 0 0 0
44419- 0 0 0 0 0 0 0 0 0 0 0 0
44420- 0 0 0 0 0 0 0 0 0 0 0 0
44421- 0 0 0 0 0 0 0 0 0 0 0 0
44422- 0 0 0 0 0 0 0 0 0 0 0 0
44423- 0 0 0 0 0 0 0 0 0 0 0 0
44424- 0 0 0 0 0 0 0 0 0 0 0 0
44425- 0 0 0 0 0 0 0 0 0 0 0 0
44426- 0 0 0 0 0 0 0 0 0 0 0 0
44427- 0 0 0 0 0 0 0 0 0 0 0 0
44428- 0 0 0 0 0 0 0 0 0 0 0 0
44429- 0 0 0 0 0 0 0 0 0 10 10 10
44430- 34 34 34 82 82 82 30 30 30 61 42 6
44431-180 123 7 206 145 10 230 174 11 239 182 13
44432-234 190 10 238 202 15 241 208 19 246 218 74
44433-246 218 38 246 215 20 246 215 20 246 215 20
44434-226 184 13 215 174 15 184 144 12 6 6 6
44435- 2 2 6 2 2 6 2 2 6 2 2 6
44436- 26 26 26 94 94 94 42 42 42 14 14 14
44437- 0 0 0 0 0 0 0 0 0 0 0 0
44438- 0 0 0 0 0 0 0 0 0 0 0 0
44439- 0 0 0 0 0 0 0 0 0 0 0 0
44440- 0 0 0 0 0 0 0 0 0 0 0 0
44441- 0 0 0 0 0 0 0 0 0 0 0 0
44442- 0 0 0 0 0 0 0 0 0 0 0 0
44443- 0 0 0 0 0 0 0 0 0 0 0 0
44444- 0 0 0 0 0 0 0 0 0 0 0 0
44445- 0 0 0 0 0 0 0 0 0 0 0 0
44446- 0 0 0 0 0 0 0 0 0 0 0 0
44447- 0 0 0 0 0 0 0 0 0 0 0 0
44448- 0 0 0 0 0 0 0 0 0 0 0 0
44449- 0 0 0 0 0 0 0 0 0 10 10 10
44450- 30 30 30 78 78 78 50 50 50 104 69 6
44451-192 133 9 216 158 10 236 178 12 236 186 11
44452-232 195 16 241 208 19 244 214 54 245 215 43
44453-246 215 20 246 215 20 241 208 19 198 155 10
44454-200 144 11 216 158 10 156 118 10 2 2 6
44455- 2 2 6 2 2 6 2 2 6 2 2 6
44456- 6 6 6 90 90 90 54 54 54 18 18 18
44457- 6 6 6 0 0 0 0 0 0 0 0 0
44458- 0 0 0 0 0 0 0 0 0 0 0 0
44459- 0 0 0 0 0 0 0 0 0 0 0 0
44460- 0 0 0 0 0 0 0 0 0 0 0 0
44461- 0 0 0 0 0 0 0 0 0 0 0 0
44462- 0 0 0 0 0 0 0 0 0 0 0 0
44463- 0 0 0 0 0 0 0 0 0 0 0 0
44464- 0 0 0 0 0 0 0 0 0 0 0 0
44465- 0 0 0 0 0 0 0 0 0 0 0 0
44466- 0 0 0 0 0 0 0 0 0 0 0 0
44467- 0 0 0 0 0 0 0 0 0 0 0 0
44468- 0 0 0 0 0 0 0 0 0 0 0 0
44469- 0 0 0 0 0 0 0 0 0 10 10 10
44470- 30 30 30 78 78 78 46 46 46 22 22 22
44471-137 92 6 210 162 10 239 182 13 238 190 10
44472-238 202 15 241 208 19 246 215 20 246 215 20
44473-241 208 19 203 166 17 185 133 11 210 150 10
44474-216 158 10 210 150 10 102 78 10 2 2 6
44475- 6 6 6 54 54 54 14 14 14 2 2 6
44476- 2 2 6 62 62 62 74 74 74 30 30 30
44477- 10 10 10 0 0 0 0 0 0 0 0 0
44478- 0 0 0 0 0 0 0 0 0 0 0 0
44479- 0 0 0 0 0 0 0 0 0 0 0 0
44480- 0 0 0 0 0 0 0 0 0 0 0 0
44481- 0 0 0 0 0 0 0 0 0 0 0 0
44482- 0 0 0 0 0 0 0 0 0 0 0 0
44483- 0 0 0 0 0 0 0 0 0 0 0 0
44484- 0 0 0 0 0 0 0 0 0 0 0 0
44485- 0 0 0 0 0 0 0 0 0 0 0 0
44486- 0 0 0 0 0 0 0 0 0 0 0 0
44487- 0 0 0 0 0 0 0 0 0 0 0 0
44488- 0 0 0 0 0 0 0 0 0 0 0 0
44489- 0 0 0 0 0 0 0 0 0 10 10 10
44490- 34 34 34 78 78 78 50 50 50 6 6 6
44491- 94 70 30 139 102 15 190 146 13 226 184 13
44492-232 200 30 232 195 16 215 174 15 190 146 13
44493-168 122 10 192 133 9 210 150 10 213 154 11
44494-202 150 34 182 157 106 101 98 89 2 2 6
44495- 2 2 6 78 78 78 116 116 116 58 58 58
44496- 2 2 6 22 22 22 90 90 90 46 46 46
44497- 18 18 18 6 6 6 0 0 0 0 0 0
44498- 0 0 0 0 0 0 0 0 0 0 0 0
44499- 0 0 0 0 0 0 0 0 0 0 0 0
44500- 0 0 0 0 0 0 0 0 0 0 0 0
44501- 0 0 0 0 0 0 0 0 0 0 0 0
44502- 0 0 0 0 0 0 0 0 0 0 0 0
44503- 0 0 0 0 0 0 0 0 0 0 0 0
44504- 0 0 0 0 0 0 0 0 0 0 0 0
44505- 0 0 0 0 0 0 0 0 0 0 0 0
44506- 0 0 0 0 0 0 0 0 0 0 0 0
44507- 0 0 0 0 0 0 0 0 0 0 0 0
44508- 0 0 0 0 0 0 0 0 0 0 0 0
44509- 0 0 0 0 0 0 0 0 0 10 10 10
44510- 38 38 38 86 86 86 50 50 50 6 6 6
44511-128 128 128 174 154 114 156 107 11 168 122 10
44512-198 155 10 184 144 12 197 138 11 200 144 11
44513-206 145 10 206 145 10 197 138 11 188 164 115
44514-195 195 195 198 198 198 174 174 174 14 14 14
44515- 2 2 6 22 22 22 116 116 116 116 116 116
44516- 22 22 22 2 2 6 74 74 74 70 70 70
44517- 30 30 30 10 10 10 0 0 0 0 0 0
44518- 0 0 0 0 0 0 0 0 0 0 0 0
44519- 0 0 0 0 0 0 0 0 0 0 0 0
44520- 0 0 0 0 0 0 0 0 0 0 0 0
44521- 0 0 0 0 0 0 0 0 0 0 0 0
44522- 0 0 0 0 0 0 0 0 0 0 0 0
44523- 0 0 0 0 0 0 0 0 0 0 0 0
44524- 0 0 0 0 0 0 0 0 0 0 0 0
44525- 0 0 0 0 0 0 0 0 0 0 0 0
44526- 0 0 0 0 0 0 0 0 0 0 0 0
44527- 0 0 0 0 0 0 0 0 0 0 0 0
44528- 0 0 0 0 0 0 0 0 0 0 0 0
44529- 0 0 0 0 0 0 6 6 6 18 18 18
44530- 50 50 50 101 101 101 26 26 26 10 10 10
44531-138 138 138 190 190 190 174 154 114 156 107 11
44532-197 138 11 200 144 11 197 138 11 192 133 9
44533-180 123 7 190 142 34 190 178 144 187 187 187
44534-202 202 202 221 221 221 214 214 214 66 66 66
44535- 2 2 6 2 2 6 50 50 50 62 62 62
44536- 6 6 6 2 2 6 10 10 10 90 90 90
44537- 50 50 50 18 18 18 6 6 6 0 0 0
44538- 0 0 0 0 0 0 0 0 0 0 0 0
44539- 0 0 0 0 0 0 0 0 0 0 0 0
44540- 0 0 0 0 0 0 0 0 0 0 0 0
44541- 0 0 0 0 0 0 0 0 0 0 0 0
44542- 0 0 0 0 0 0 0 0 0 0 0 0
44543- 0 0 0 0 0 0 0 0 0 0 0 0
44544- 0 0 0 0 0 0 0 0 0 0 0 0
44545- 0 0 0 0 0 0 0 0 0 0 0 0
44546- 0 0 0 0 0 0 0 0 0 0 0 0
44547- 0 0 0 0 0 0 0 0 0 0 0 0
44548- 0 0 0 0 0 0 0 0 0 0 0 0
44549- 0 0 0 0 0 0 10 10 10 34 34 34
44550- 74 74 74 74 74 74 2 2 6 6 6 6
44551-144 144 144 198 198 198 190 190 190 178 166 146
44552-154 121 60 156 107 11 156 107 11 168 124 44
44553-174 154 114 187 187 187 190 190 190 210 210 210
44554-246 246 246 253 253 253 253 253 253 182 182 182
44555- 6 6 6 2 2 6 2 2 6 2 2 6
44556- 2 2 6 2 2 6 2 2 6 62 62 62
44557- 74 74 74 34 34 34 14 14 14 0 0 0
44558- 0 0 0 0 0 0 0 0 0 0 0 0
44559- 0 0 0 0 0 0 0 0 0 0 0 0
44560- 0 0 0 0 0 0 0 0 0 0 0 0
44561- 0 0 0 0 0 0 0 0 0 0 0 0
44562- 0 0 0 0 0 0 0 0 0 0 0 0
44563- 0 0 0 0 0 0 0 0 0 0 0 0
44564- 0 0 0 0 0 0 0 0 0 0 0 0
44565- 0 0 0 0 0 0 0 0 0 0 0 0
44566- 0 0 0 0 0 0 0 0 0 0 0 0
44567- 0 0 0 0 0 0 0 0 0 0 0 0
44568- 0 0 0 0 0 0 0 0 0 0 0 0
44569- 0 0 0 10 10 10 22 22 22 54 54 54
44570- 94 94 94 18 18 18 2 2 6 46 46 46
44571-234 234 234 221 221 221 190 190 190 190 190 190
44572-190 190 190 187 187 187 187 187 187 190 190 190
44573-190 190 190 195 195 195 214 214 214 242 242 242
44574-253 253 253 253 253 253 253 253 253 253 253 253
44575- 82 82 82 2 2 6 2 2 6 2 2 6
44576- 2 2 6 2 2 6 2 2 6 14 14 14
44577- 86 86 86 54 54 54 22 22 22 6 6 6
44578- 0 0 0 0 0 0 0 0 0 0 0 0
44579- 0 0 0 0 0 0 0 0 0 0 0 0
44580- 0 0 0 0 0 0 0 0 0 0 0 0
44581- 0 0 0 0 0 0 0 0 0 0 0 0
44582- 0 0 0 0 0 0 0 0 0 0 0 0
44583- 0 0 0 0 0 0 0 0 0 0 0 0
44584- 0 0 0 0 0 0 0 0 0 0 0 0
44585- 0 0 0 0 0 0 0 0 0 0 0 0
44586- 0 0 0 0 0 0 0 0 0 0 0 0
44587- 0 0 0 0 0 0 0 0 0 0 0 0
44588- 0 0 0 0 0 0 0 0 0 0 0 0
44589- 6 6 6 18 18 18 46 46 46 90 90 90
44590- 46 46 46 18 18 18 6 6 6 182 182 182
44591-253 253 253 246 246 246 206 206 206 190 190 190
44592-190 190 190 190 190 190 190 190 190 190 190 190
44593-206 206 206 231 231 231 250 250 250 253 253 253
44594-253 253 253 253 253 253 253 253 253 253 253 253
44595-202 202 202 14 14 14 2 2 6 2 2 6
44596- 2 2 6 2 2 6 2 2 6 2 2 6
44597- 42 42 42 86 86 86 42 42 42 18 18 18
44598- 6 6 6 0 0 0 0 0 0 0 0 0
44599- 0 0 0 0 0 0 0 0 0 0 0 0
44600- 0 0 0 0 0 0 0 0 0 0 0 0
44601- 0 0 0 0 0 0 0 0 0 0 0 0
44602- 0 0 0 0 0 0 0 0 0 0 0 0
44603- 0 0 0 0 0 0 0 0 0 0 0 0
44604- 0 0 0 0 0 0 0 0 0 0 0 0
44605- 0 0 0 0 0 0 0 0 0 0 0 0
44606- 0 0 0 0 0 0 0 0 0 0 0 0
44607- 0 0 0 0 0 0 0 0 0 0 0 0
44608- 0 0 0 0 0 0 0 0 0 6 6 6
44609- 14 14 14 38 38 38 74 74 74 66 66 66
44610- 2 2 6 6 6 6 90 90 90 250 250 250
44611-253 253 253 253 253 253 238 238 238 198 198 198
44612-190 190 190 190 190 190 195 195 195 221 221 221
44613-246 246 246 253 253 253 253 253 253 253 253 253
44614-253 253 253 253 253 253 253 253 253 253 253 253
44615-253 253 253 82 82 82 2 2 6 2 2 6
44616- 2 2 6 2 2 6 2 2 6 2 2 6
44617- 2 2 6 78 78 78 70 70 70 34 34 34
44618- 14 14 14 6 6 6 0 0 0 0 0 0
44619- 0 0 0 0 0 0 0 0 0 0 0 0
44620- 0 0 0 0 0 0 0 0 0 0 0 0
44621- 0 0 0 0 0 0 0 0 0 0 0 0
44622- 0 0 0 0 0 0 0 0 0 0 0 0
44623- 0 0 0 0 0 0 0 0 0 0 0 0
44624- 0 0 0 0 0 0 0 0 0 0 0 0
44625- 0 0 0 0 0 0 0 0 0 0 0 0
44626- 0 0 0 0 0 0 0 0 0 0 0 0
44627- 0 0 0 0 0 0 0 0 0 0 0 0
44628- 0 0 0 0 0 0 0 0 0 14 14 14
44629- 34 34 34 66 66 66 78 78 78 6 6 6
44630- 2 2 6 18 18 18 218 218 218 253 253 253
44631-253 253 253 253 253 253 253 253 253 246 246 246
44632-226 226 226 231 231 231 246 246 246 253 253 253
44633-253 253 253 253 253 253 253 253 253 253 253 253
44634-253 253 253 253 253 253 253 253 253 253 253 253
44635-253 253 253 178 178 178 2 2 6 2 2 6
44636- 2 2 6 2 2 6 2 2 6 2 2 6
44637- 2 2 6 18 18 18 90 90 90 62 62 62
44638- 30 30 30 10 10 10 0 0 0 0 0 0
44639- 0 0 0 0 0 0 0 0 0 0 0 0
44640- 0 0 0 0 0 0 0 0 0 0 0 0
44641- 0 0 0 0 0 0 0 0 0 0 0 0
44642- 0 0 0 0 0 0 0 0 0 0 0 0
44643- 0 0 0 0 0 0 0 0 0 0 0 0
44644- 0 0 0 0 0 0 0 0 0 0 0 0
44645- 0 0 0 0 0 0 0 0 0 0 0 0
44646- 0 0 0 0 0 0 0 0 0 0 0 0
44647- 0 0 0 0 0 0 0 0 0 0 0 0
44648- 0 0 0 0 0 0 10 10 10 26 26 26
44649- 58 58 58 90 90 90 18 18 18 2 2 6
44650- 2 2 6 110 110 110 253 253 253 253 253 253
44651-253 253 253 253 253 253 253 253 253 253 253 253
44652-250 250 250 253 253 253 253 253 253 253 253 253
44653-253 253 253 253 253 253 253 253 253 253 253 253
44654-253 253 253 253 253 253 253 253 253 253 253 253
44655-253 253 253 231 231 231 18 18 18 2 2 6
44656- 2 2 6 2 2 6 2 2 6 2 2 6
44657- 2 2 6 2 2 6 18 18 18 94 94 94
44658- 54 54 54 26 26 26 10 10 10 0 0 0
44659- 0 0 0 0 0 0 0 0 0 0 0 0
44660- 0 0 0 0 0 0 0 0 0 0 0 0
44661- 0 0 0 0 0 0 0 0 0 0 0 0
44662- 0 0 0 0 0 0 0 0 0 0 0 0
44663- 0 0 0 0 0 0 0 0 0 0 0 0
44664- 0 0 0 0 0 0 0 0 0 0 0 0
44665- 0 0 0 0 0 0 0 0 0 0 0 0
44666- 0 0 0 0 0 0 0 0 0 0 0 0
44667- 0 0 0 0 0 0 0 0 0 0 0 0
44668- 0 0 0 6 6 6 22 22 22 50 50 50
44669- 90 90 90 26 26 26 2 2 6 2 2 6
44670- 14 14 14 195 195 195 250 250 250 253 253 253
44671-253 253 253 253 253 253 253 253 253 253 253 253
44672-253 253 253 253 253 253 253 253 253 253 253 253
44673-253 253 253 253 253 253 253 253 253 253 253 253
44674-253 253 253 253 253 253 253 253 253 253 253 253
44675-250 250 250 242 242 242 54 54 54 2 2 6
44676- 2 2 6 2 2 6 2 2 6 2 2 6
44677- 2 2 6 2 2 6 2 2 6 38 38 38
44678- 86 86 86 50 50 50 22 22 22 6 6 6
44679- 0 0 0 0 0 0 0 0 0 0 0 0
44680- 0 0 0 0 0 0 0 0 0 0 0 0
44681- 0 0 0 0 0 0 0 0 0 0 0 0
44682- 0 0 0 0 0 0 0 0 0 0 0 0
44683- 0 0 0 0 0 0 0 0 0 0 0 0
44684- 0 0 0 0 0 0 0 0 0 0 0 0
44685- 0 0 0 0 0 0 0 0 0 0 0 0
44686- 0 0 0 0 0 0 0 0 0 0 0 0
44687- 0 0 0 0 0 0 0 0 0 0 0 0
44688- 6 6 6 14 14 14 38 38 38 82 82 82
44689- 34 34 34 2 2 6 2 2 6 2 2 6
44690- 42 42 42 195 195 195 246 246 246 253 253 253
44691-253 253 253 253 253 253 253 253 253 250 250 250
44692-242 242 242 242 242 242 250 250 250 253 253 253
44693-253 253 253 253 253 253 253 253 253 253 253 253
44694-253 253 253 250 250 250 246 246 246 238 238 238
44695-226 226 226 231 231 231 101 101 101 6 6 6
44696- 2 2 6 2 2 6 2 2 6 2 2 6
44697- 2 2 6 2 2 6 2 2 6 2 2 6
44698- 38 38 38 82 82 82 42 42 42 14 14 14
44699- 6 6 6 0 0 0 0 0 0 0 0 0
44700- 0 0 0 0 0 0 0 0 0 0 0 0
44701- 0 0 0 0 0 0 0 0 0 0 0 0
44702- 0 0 0 0 0 0 0 0 0 0 0 0
44703- 0 0 0 0 0 0 0 0 0 0 0 0
44704- 0 0 0 0 0 0 0 0 0 0 0 0
44705- 0 0 0 0 0 0 0 0 0 0 0 0
44706- 0 0 0 0 0 0 0 0 0 0 0 0
44707- 0 0 0 0 0 0 0 0 0 0 0 0
44708- 10 10 10 26 26 26 62 62 62 66 66 66
44709- 2 2 6 2 2 6 2 2 6 6 6 6
44710- 70 70 70 170 170 170 206 206 206 234 234 234
44711-246 246 246 250 250 250 250 250 250 238 238 238
44712-226 226 226 231 231 231 238 238 238 250 250 250
44713-250 250 250 250 250 250 246 246 246 231 231 231
44714-214 214 214 206 206 206 202 202 202 202 202 202
44715-198 198 198 202 202 202 182 182 182 18 18 18
44716- 2 2 6 2 2 6 2 2 6 2 2 6
44717- 2 2 6 2 2 6 2 2 6 2 2 6
44718- 2 2 6 62 62 62 66 66 66 30 30 30
44719- 10 10 10 0 0 0 0 0 0 0 0 0
44720- 0 0 0 0 0 0 0 0 0 0 0 0
44721- 0 0 0 0 0 0 0 0 0 0 0 0
44722- 0 0 0 0 0 0 0 0 0 0 0 0
44723- 0 0 0 0 0 0 0 0 0 0 0 0
44724- 0 0 0 0 0 0 0 0 0 0 0 0
44725- 0 0 0 0 0 0 0 0 0 0 0 0
44726- 0 0 0 0 0 0 0 0 0 0 0 0
44727- 0 0 0 0 0 0 0 0 0 0 0 0
44728- 14 14 14 42 42 42 82 82 82 18 18 18
44729- 2 2 6 2 2 6 2 2 6 10 10 10
44730- 94 94 94 182 182 182 218 218 218 242 242 242
44731-250 250 250 253 253 253 253 253 253 250 250 250
44732-234 234 234 253 253 253 253 253 253 253 253 253
44733-253 253 253 253 253 253 253 253 253 246 246 246
44734-238 238 238 226 226 226 210 210 210 202 202 202
44735-195 195 195 195 195 195 210 210 210 158 158 158
44736- 6 6 6 14 14 14 50 50 50 14 14 14
44737- 2 2 6 2 2 6 2 2 6 2 2 6
44738- 2 2 6 6 6 6 86 86 86 46 46 46
44739- 18 18 18 6 6 6 0 0 0 0 0 0
44740- 0 0 0 0 0 0 0 0 0 0 0 0
44741- 0 0 0 0 0 0 0 0 0 0 0 0
44742- 0 0 0 0 0 0 0 0 0 0 0 0
44743- 0 0 0 0 0 0 0 0 0 0 0 0
44744- 0 0 0 0 0 0 0 0 0 0 0 0
44745- 0 0 0 0 0 0 0 0 0 0 0 0
44746- 0 0 0 0 0 0 0 0 0 0 0 0
44747- 0 0 0 0 0 0 0 0 0 6 6 6
44748- 22 22 22 54 54 54 70 70 70 2 2 6
44749- 2 2 6 10 10 10 2 2 6 22 22 22
44750-166 166 166 231 231 231 250 250 250 253 253 253
44751-253 253 253 253 253 253 253 253 253 250 250 250
44752-242 242 242 253 253 253 253 253 253 253 253 253
44753-253 253 253 253 253 253 253 253 253 253 253 253
44754-253 253 253 253 253 253 253 253 253 246 246 246
44755-231 231 231 206 206 206 198 198 198 226 226 226
44756- 94 94 94 2 2 6 6 6 6 38 38 38
44757- 30 30 30 2 2 6 2 2 6 2 2 6
44758- 2 2 6 2 2 6 62 62 62 66 66 66
44759- 26 26 26 10 10 10 0 0 0 0 0 0
44760- 0 0 0 0 0 0 0 0 0 0 0 0
44761- 0 0 0 0 0 0 0 0 0 0 0 0
44762- 0 0 0 0 0 0 0 0 0 0 0 0
44763- 0 0 0 0 0 0 0 0 0 0 0 0
44764- 0 0 0 0 0 0 0 0 0 0 0 0
44765- 0 0 0 0 0 0 0 0 0 0 0 0
44766- 0 0 0 0 0 0 0 0 0 0 0 0
44767- 0 0 0 0 0 0 0 0 0 10 10 10
44768- 30 30 30 74 74 74 50 50 50 2 2 6
44769- 26 26 26 26 26 26 2 2 6 106 106 106
44770-238 238 238 253 253 253 253 253 253 253 253 253
44771-253 253 253 253 253 253 253 253 253 253 253 253
44772-253 253 253 253 253 253 253 253 253 253 253 253
44773-253 253 253 253 253 253 253 253 253 253 253 253
44774-253 253 253 253 253 253 253 253 253 253 253 253
44775-253 253 253 246 246 246 218 218 218 202 202 202
44776-210 210 210 14 14 14 2 2 6 2 2 6
44777- 30 30 30 22 22 22 2 2 6 2 2 6
44778- 2 2 6 2 2 6 18 18 18 86 86 86
44779- 42 42 42 14 14 14 0 0 0 0 0 0
44780- 0 0 0 0 0 0 0 0 0 0 0 0
44781- 0 0 0 0 0 0 0 0 0 0 0 0
44782- 0 0 0 0 0 0 0 0 0 0 0 0
44783- 0 0 0 0 0 0 0 0 0 0 0 0
44784- 0 0 0 0 0 0 0 0 0 0 0 0
44785- 0 0 0 0 0 0 0 0 0 0 0 0
44786- 0 0 0 0 0 0 0 0 0 0 0 0
44787- 0 0 0 0 0 0 0 0 0 14 14 14
44788- 42 42 42 90 90 90 22 22 22 2 2 6
44789- 42 42 42 2 2 6 18 18 18 218 218 218
44790-253 253 253 253 253 253 253 253 253 253 253 253
44791-253 253 253 253 253 253 253 253 253 253 253 253
44792-253 253 253 253 253 253 253 253 253 253 253 253
44793-253 253 253 253 253 253 253 253 253 253 253 253
44794-253 253 253 253 253 253 253 253 253 253 253 253
44795-253 253 253 253 253 253 250 250 250 221 221 221
44796-218 218 218 101 101 101 2 2 6 14 14 14
44797- 18 18 18 38 38 38 10 10 10 2 2 6
44798- 2 2 6 2 2 6 2 2 6 78 78 78
44799- 58 58 58 22 22 22 6 6 6 0 0 0
44800- 0 0 0 0 0 0 0 0 0 0 0 0
44801- 0 0 0 0 0 0 0 0 0 0 0 0
44802- 0 0 0 0 0 0 0 0 0 0 0 0
44803- 0 0 0 0 0 0 0 0 0 0 0 0
44804- 0 0 0 0 0 0 0 0 0 0 0 0
44805- 0 0 0 0 0 0 0 0 0 0 0 0
44806- 0 0 0 0 0 0 0 0 0 0 0 0
44807- 0 0 0 0 0 0 6 6 6 18 18 18
44808- 54 54 54 82 82 82 2 2 6 26 26 26
44809- 22 22 22 2 2 6 123 123 123 253 253 253
44810-253 253 253 253 253 253 253 253 253 253 253 253
44811-253 253 253 253 253 253 253 253 253 253 253 253
44812-253 253 253 253 253 253 253 253 253 253 253 253
44813-253 253 253 253 253 253 253 253 253 253 253 253
44814-253 253 253 253 253 253 253 253 253 253 253 253
44815-253 253 253 253 253 253 253 253 253 250 250 250
44816-238 238 238 198 198 198 6 6 6 38 38 38
44817- 58 58 58 26 26 26 38 38 38 2 2 6
44818- 2 2 6 2 2 6 2 2 6 46 46 46
44819- 78 78 78 30 30 30 10 10 10 0 0 0
44820- 0 0 0 0 0 0 0 0 0 0 0 0
44821- 0 0 0 0 0 0 0 0 0 0 0 0
44822- 0 0 0 0 0 0 0 0 0 0 0 0
44823- 0 0 0 0 0 0 0 0 0 0 0 0
44824- 0 0 0 0 0 0 0 0 0 0 0 0
44825- 0 0 0 0 0 0 0 0 0 0 0 0
44826- 0 0 0 0 0 0 0 0 0 0 0 0
44827- 0 0 0 0 0 0 10 10 10 30 30 30
44828- 74 74 74 58 58 58 2 2 6 42 42 42
44829- 2 2 6 22 22 22 231 231 231 253 253 253
44830-253 253 253 253 253 253 253 253 253 253 253 253
44831-253 253 253 253 253 253 253 253 253 250 250 250
44832-253 253 253 253 253 253 253 253 253 253 253 253
44833-253 253 253 253 253 253 253 253 253 253 253 253
44834-253 253 253 253 253 253 253 253 253 253 253 253
44835-253 253 253 253 253 253 253 253 253 253 253 253
44836-253 253 253 246 246 246 46 46 46 38 38 38
44837- 42 42 42 14 14 14 38 38 38 14 14 14
44838- 2 2 6 2 2 6 2 2 6 6 6 6
44839- 86 86 86 46 46 46 14 14 14 0 0 0
44840- 0 0 0 0 0 0 0 0 0 0 0 0
44841- 0 0 0 0 0 0 0 0 0 0 0 0
44842- 0 0 0 0 0 0 0 0 0 0 0 0
44843- 0 0 0 0 0 0 0 0 0 0 0 0
44844- 0 0 0 0 0 0 0 0 0 0 0 0
44845- 0 0 0 0 0 0 0 0 0 0 0 0
44846- 0 0 0 0 0 0 0 0 0 0 0 0
44847- 0 0 0 6 6 6 14 14 14 42 42 42
44848- 90 90 90 18 18 18 18 18 18 26 26 26
44849- 2 2 6 116 116 116 253 253 253 253 253 253
44850-253 253 253 253 253 253 253 253 253 253 253 253
44851-253 253 253 253 253 253 250 250 250 238 238 238
44852-253 253 253 253 253 253 253 253 253 253 253 253
44853-253 253 253 253 253 253 253 253 253 253 253 253
44854-253 253 253 253 253 253 253 253 253 253 253 253
44855-253 253 253 253 253 253 253 253 253 253 253 253
44856-253 253 253 253 253 253 94 94 94 6 6 6
44857- 2 2 6 2 2 6 10 10 10 34 34 34
44858- 2 2 6 2 2 6 2 2 6 2 2 6
44859- 74 74 74 58 58 58 22 22 22 6 6 6
44860- 0 0 0 0 0 0 0 0 0 0 0 0
44861- 0 0 0 0 0 0 0 0 0 0 0 0
44862- 0 0 0 0 0 0 0 0 0 0 0 0
44863- 0 0 0 0 0 0 0 0 0 0 0 0
44864- 0 0 0 0 0 0 0 0 0 0 0 0
44865- 0 0 0 0 0 0 0 0 0 0 0 0
44866- 0 0 0 0 0 0 0 0 0 0 0 0
44867- 0 0 0 10 10 10 26 26 26 66 66 66
44868- 82 82 82 2 2 6 38 38 38 6 6 6
44869- 14 14 14 210 210 210 253 253 253 253 253 253
44870-253 253 253 253 253 253 253 253 253 253 253 253
44871-253 253 253 253 253 253 246 246 246 242 242 242
44872-253 253 253 253 253 253 253 253 253 253 253 253
44873-253 253 253 253 253 253 253 253 253 253 253 253
44874-253 253 253 253 253 253 253 253 253 253 253 253
44875-253 253 253 253 253 253 253 253 253 253 253 253
44876-253 253 253 253 253 253 144 144 144 2 2 6
44877- 2 2 6 2 2 6 2 2 6 46 46 46
44878- 2 2 6 2 2 6 2 2 6 2 2 6
44879- 42 42 42 74 74 74 30 30 30 10 10 10
44880- 0 0 0 0 0 0 0 0 0 0 0 0
44881- 0 0 0 0 0 0 0 0 0 0 0 0
44882- 0 0 0 0 0 0 0 0 0 0 0 0
44883- 0 0 0 0 0 0 0 0 0 0 0 0
44884- 0 0 0 0 0 0 0 0 0 0 0 0
44885- 0 0 0 0 0 0 0 0 0 0 0 0
44886- 0 0 0 0 0 0 0 0 0 0 0 0
44887- 6 6 6 14 14 14 42 42 42 90 90 90
44888- 26 26 26 6 6 6 42 42 42 2 2 6
44889- 74 74 74 250 250 250 253 253 253 253 253 253
44890-253 253 253 253 253 253 253 253 253 253 253 253
44891-253 253 253 253 253 253 242 242 242 242 242 242
44892-253 253 253 253 253 253 253 253 253 253 253 253
44893-253 253 253 253 253 253 253 253 253 253 253 253
44894-253 253 253 253 253 253 253 253 253 253 253 253
44895-253 253 253 253 253 253 253 253 253 253 253 253
44896-253 253 253 253 253 253 182 182 182 2 2 6
44897- 2 2 6 2 2 6 2 2 6 46 46 46
44898- 2 2 6 2 2 6 2 2 6 2 2 6
44899- 10 10 10 86 86 86 38 38 38 10 10 10
44900- 0 0 0 0 0 0 0 0 0 0 0 0
44901- 0 0 0 0 0 0 0 0 0 0 0 0
44902- 0 0 0 0 0 0 0 0 0 0 0 0
44903- 0 0 0 0 0 0 0 0 0 0 0 0
44904- 0 0 0 0 0 0 0 0 0 0 0 0
44905- 0 0 0 0 0 0 0 0 0 0 0 0
44906- 0 0 0 0 0 0 0 0 0 0 0 0
44907- 10 10 10 26 26 26 66 66 66 82 82 82
44908- 2 2 6 22 22 22 18 18 18 2 2 6
44909-149 149 149 253 253 253 253 253 253 253 253 253
44910-253 253 253 253 253 253 253 253 253 253 253 253
44911-253 253 253 253 253 253 234 234 234 242 242 242
44912-253 253 253 253 253 253 253 253 253 253 253 253
44913-253 253 253 253 253 253 253 253 253 253 253 253
44914-253 253 253 253 253 253 253 253 253 253 253 253
44915-253 253 253 253 253 253 253 253 253 253 253 253
44916-253 253 253 253 253 253 206 206 206 2 2 6
44917- 2 2 6 2 2 6 2 2 6 38 38 38
44918- 2 2 6 2 2 6 2 2 6 2 2 6
44919- 6 6 6 86 86 86 46 46 46 14 14 14
44920- 0 0 0 0 0 0 0 0 0 0 0 0
44921- 0 0 0 0 0 0 0 0 0 0 0 0
44922- 0 0 0 0 0 0 0 0 0 0 0 0
44923- 0 0 0 0 0 0 0 0 0 0 0 0
44924- 0 0 0 0 0 0 0 0 0 0 0 0
44925- 0 0 0 0 0 0 0 0 0 0 0 0
44926- 0 0 0 0 0 0 0 0 0 6 6 6
44927- 18 18 18 46 46 46 86 86 86 18 18 18
44928- 2 2 6 34 34 34 10 10 10 6 6 6
44929-210 210 210 253 253 253 253 253 253 253 253 253
44930-253 253 253 253 253 253 253 253 253 253 253 253
44931-253 253 253 253 253 253 234 234 234 242 242 242
44932-253 253 253 253 253 253 253 253 253 253 253 253
44933-253 253 253 253 253 253 253 253 253 253 253 253
44934-253 253 253 253 253 253 253 253 253 253 253 253
44935-253 253 253 253 253 253 253 253 253 253 253 253
44936-253 253 253 253 253 253 221 221 221 6 6 6
44937- 2 2 6 2 2 6 6 6 6 30 30 30
44938- 2 2 6 2 2 6 2 2 6 2 2 6
44939- 2 2 6 82 82 82 54 54 54 18 18 18
44940- 6 6 6 0 0 0 0 0 0 0 0 0
44941- 0 0 0 0 0 0 0 0 0 0 0 0
44942- 0 0 0 0 0 0 0 0 0 0 0 0
44943- 0 0 0 0 0 0 0 0 0 0 0 0
44944- 0 0 0 0 0 0 0 0 0 0 0 0
44945- 0 0 0 0 0 0 0 0 0 0 0 0
44946- 0 0 0 0 0 0 0 0 0 10 10 10
44947- 26 26 26 66 66 66 62 62 62 2 2 6
44948- 2 2 6 38 38 38 10 10 10 26 26 26
44949-238 238 238 253 253 253 253 253 253 253 253 253
44950-253 253 253 253 253 253 253 253 253 253 253 253
44951-253 253 253 253 253 253 231 231 231 238 238 238
44952-253 253 253 253 253 253 253 253 253 253 253 253
44953-253 253 253 253 253 253 253 253 253 253 253 253
44954-253 253 253 253 253 253 253 253 253 253 253 253
44955-253 253 253 253 253 253 253 253 253 253 253 253
44956-253 253 253 253 253 253 231 231 231 6 6 6
44957- 2 2 6 2 2 6 10 10 10 30 30 30
44958- 2 2 6 2 2 6 2 2 6 2 2 6
44959- 2 2 6 66 66 66 58 58 58 22 22 22
44960- 6 6 6 0 0 0 0 0 0 0 0 0
44961- 0 0 0 0 0 0 0 0 0 0 0 0
44962- 0 0 0 0 0 0 0 0 0 0 0 0
44963- 0 0 0 0 0 0 0 0 0 0 0 0
44964- 0 0 0 0 0 0 0 0 0 0 0 0
44965- 0 0 0 0 0 0 0 0 0 0 0 0
44966- 0 0 0 0 0 0 0 0 0 10 10 10
44967- 38 38 38 78 78 78 6 6 6 2 2 6
44968- 2 2 6 46 46 46 14 14 14 42 42 42
44969-246 246 246 253 253 253 253 253 253 253 253 253
44970-253 253 253 253 253 253 253 253 253 253 253 253
44971-253 253 253 253 253 253 231 231 231 242 242 242
44972-253 253 253 253 253 253 253 253 253 253 253 253
44973-253 253 253 253 253 253 253 253 253 253 253 253
44974-253 253 253 253 253 253 253 253 253 253 253 253
44975-253 253 253 253 253 253 253 253 253 253 253 253
44976-253 253 253 253 253 253 234 234 234 10 10 10
44977- 2 2 6 2 2 6 22 22 22 14 14 14
44978- 2 2 6 2 2 6 2 2 6 2 2 6
44979- 2 2 6 66 66 66 62 62 62 22 22 22
44980- 6 6 6 0 0 0 0 0 0 0 0 0
44981- 0 0 0 0 0 0 0 0 0 0 0 0
44982- 0 0 0 0 0 0 0 0 0 0 0 0
44983- 0 0 0 0 0 0 0 0 0 0 0 0
44984- 0 0 0 0 0 0 0 0 0 0 0 0
44985- 0 0 0 0 0 0 0 0 0 0 0 0
44986- 0 0 0 0 0 0 6 6 6 18 18 18
44987- 50 50 50 74 74 74 2 2 6 2 2 6
44988- 14 14 14 70 70 70 34 34 34 62 62 62
44989-250 250 250 253 253 253 253 253 253 253 253 253
44990-253 253 253 253 253 253 253 253 253 253 253 253
44991-253 253 253 253 253 253 231 231 231 246 246 246
44992-253 253 253 253 253 253 253 253 253 253 253 253
44993-253 253 253 253 253 253 253 253 253 253 253 253
44994-253 253 253 253 253 253 253 253 253 253 253 253
44995-253 253 253 253 253 253 253 253 253 253 253 253
44996-253 253 253 253 253 253 234 234 234 14 14 14
44997- 2 2 6 2 2 6 30 30 30 2 2 6
44998- 2 2 6 2 2 6 2 2 6 2 2 6
44999- 2 2 6 66 66 66 62 62 62 22 22 22
45000- 6 6 6 0 0 0 0 0 0 0 0 0
45001- 0 0 0 0 0 0 0 0 0 0 0 0
45002- 0 0 0 0 0 0 0 0 0 0 0 0
45003- 0 0 0 0 0 0 0 0 0 0 0 0
45004- 0 0 0 0 0 0 0 0 0 0 0 0
45005- 0 0 0 0 0 0 0 0 0 0 0 0
45006- 0 0 0 0 0 0 6 6 6 18 18 18
45007- 54 54 54 62 62 62 2 2 6 2 2 6
45008- 2 2 6 30 30 30 46 46 46 70 70 70
45009-250 250 250 253 253 253 253 253 253 253 253 253
45010-253 253 253 253 253 253 253 253 253 253 253 253
45011-253 253 253 253 253 253 231 231 231 246 246 246
45012-253 253 253 253 253 253 253 253 253 253 253 253
45013-253 253 253 253 253 253 253 253 253 253 253 253
45014-253 253 253 253 253 253 253 253 253 253 253 253
45015-253 253 253 253 253 253 253 253 253 253 253 253
45016-253 253 253 253 253 253 226 226 226 10 10 10
45017- 2 2 6 6 6 6 30 30 30 2 2 6
45018- 2 2 6 2 2 6 2 2 6 2 2 6
45019- 2 2 6 66 66 66 58 58 58 22 22 22
45020- 6 6 6 0 0 0 0 0 0 0 0 0
45021- 0 0 0 0 0 0 0 0 0 0 0 0
45022- 0 0 0 0 0 0 0 0 0 0 0 0
45023- 0 0 0 0 0 0 0 0 0 0 0 0
45024- 0 0 0 0 0 0 0 0 0 0 0 0
45025- 0 0 0 0 0 0 0 0 0 0 0 0
45026- 0 0 0 0 0 0 6 6 6 22 22 22
45027- 58 58 58 62 62 62 2 2 6 2 2 6
45028- 2 2 6 2 2 6 30 30 30 78 78 78
45029-250 250 250 253 253 253 253 253 253 253 253 253
45030-253 253 253 253 253 253 253 253 253 253 253 253
45031-253 253 253 253 253 253 231 231 231 246 246 246
45032-253 253 253 253 253 253 253 253 253 253 253 253
45033-253 253 253 253 253 253 253 253 253 253 253 253
45034-253 253 253 253 253 253 253 253 253 253 253 253
45035-253 253 253 253 253 253 253 253 253 253 253 253
45036-253 253 253 253 253 253 206 206 206 2 2 6
45037- 22 22 22 34 34 34 18 14 6 22 22 22
45038- 26 26 26 18 18 18 6 6 6 2 2 6
45039- 2 2 6 82 82 82 54 54 54 18 18 18
45040- 6 6 6 0 0 0 0 0 0 0 0 0
45041- 0 0 0 0 0 0 0 0 0 0 0 0
45042- 0 0 0 0 0 0 0 0 0 0 0 0
45043- 0 0 0 0 0 0 0 0 0 0 0 0
45044- 0 0 0 0 0 0 0 0 0 0 0 0
45045- 0 0 0 0 0 0 0 0 0 0 0 0
45046- 0 0 0 0 0 0 6 6 6 26 26 26
45047- 62 62 62 106 106 106 74 54 14 185 133 11
45048-210 162 10 121 92 8 6 6 6 62 62 62
45049-238 238 238 253 253 253 253 253 253 253 253 253
45050-253 253 253 253 253 253 253 253 253 253 253 253
45051-253 253 253 253 253 253 231 231 231 246 246 246
45052-253 253 253 253 253 253 253 253 253 253 253 253
45053-253 253 253 253 253 253 253 253 253 253 253 253
45054-253 253 253 253 253 253 253 253 253 253 253 253
45055-253 253 253 253 253 253 253 253 253 253 253 253
45056-253 253 253 253 253 253 158 158 158 18 18 18
45057- 14 14 14 2 2 6 2 2 6 2 2 6
45058- 6 6 6 18 18 18 66 66 66 38 38 38
45059- 6 6 6 94 94 94 50 50 50 18 18 18
45060- 6 6 6 0 0 0 0 0 0 0 0 0
45061- 0 0 0 0 0 0 0 0 0 0 0 0
45062- 0 0 0 0 0 0 0 0 0 0 0 0
45063- 0 0 0 0 0 0 0 0 0 0 0 0
45064- 0 0 0 0 0 0 0 0 0 0 0 0
45065- 0 0 0 0 0 0 0 0 0 6 6 6
45066- 10 10 10 10 10 10 18 18 18 38 38 38
45067- 78 78 78 142 134 106 216 158 10 242 186 14
45068-246 190 14 246 190 14 156 118 10 10 10 10
45069- 90 90 90 238 238 238 253 253 253 253 253 253
45070-253 253 253 253 253 253 253 253 253 253 253 253
45071-253 253 253 253 253 253 231 231 231 250 250 250
45072-253 253 253 253 253 253 253 253 253 253 253 253
45073-253 253 253 253 253 253 253 253 253 253 253 253
45074-253 253 253 253 253 253 253 253 253 253 253 253
45075-253 253 253 253 253 253 253 253 253 246 230 190
45076-238 204 91 238 204 91 181 142 44 37 26 9
45077- 2 2 6 2 2 6 2 2 6 2 2 6
45078- 2 2 6 2 2 6 38 38 38 46 46 46
45079- 26 26 26 106 106 106 54 54 54 18 18 18
45080- 6 6 6 0 0 0 0 0 0 0 0 0
45081- 0 0 0 0 0 0 0 0 0 0 0 0
45082- 0 0 0 0 0 0 0 0 0 0 0 0
45083- 0 0 0 0 0 0 0 0 0 0 0 0
45084- 0 0 0 0 0 0 0 0 0 0 0 0
45085- 0 0 0 6 6 6 14 14 14 22 22 22
45086- 30 30 30 38 38 38 50 50 50 70 70 70
45087-106 106 106 190 142 34 226 170 11 242 186 14
45088-246 190 14 246 190 14 246 190 14 154 114 10
45089- 6 6 6 74 74 74 226 226 226 253 253 253
45090-253 253 253 253 253 253 253 253 253 253 253 253
45091-253 253 253 253 253 253 231 231 231 250 250 250
45092-253 253 253 253 253 253 253 253 253 253 253 253
45093-253 253 253 253 253 253 253 253 253 253 253 253
45094-253 253 253 253 253 253 253 253 253 253 253 253
45095-253 253 253 253 253 253 253 253 253 228 184 62
45096-241 196 14 241 208 19 232 195 16 38 30 10
45097- 2 2 6 2 2 6 2 2 6 2 2 6
45098- 2 2 6 6 6 6 30 30 30 26 26 26
45099-203 166 17 154 142 90 66 66 66 26 26 26
45100- 6 6 6 0 0 0 0 0 0 0 0 0
45101- 0 0 0 0 0 0 0 0 0 0 0 0
45102- 0 0 0 0 0 0 0 0 0 0 0 0
45103- 0 0 0 0 0 0 0 0 0 0 0 0
45104- 0 0 0 0 0 0 0 0 0 0 0 0
45105- 6 6 6 18 18 18 38 38 38 58 58 58
45106- 78 78 78 86 86 86 101 101 101 123 123 123
45107-175 146 61 210 150 10 234 174 13 246 186 14
45108-246 190 14 246 190 14 246 190 14 238 190 10
45109-102 78 10 2 2 6 46 46 46 198 198 198
45110-253 253 253 253 253 253 253 253 253 253 253 253
45111-253 253 253 253 253 253 234 234 234 242 242 242
45112-253 253 253 253 253 253 253 253 253 253 253 253
45113-253 253 253 253 253 253 253 253 253 253 253 253
45114-253 253 253 253 253 253 253 253 253 253 253 253
45115-253 253 253 253 253 253 253 253 253 224 178 62
45116-242 186 14 241 196 14 210 166 10 22 18 6
45117- 2 2 6 2 2 6 2 2 6 2 2 6
45118- 2 2 6 2 2 6 6 6 6 121 92 8
45119-238 202 15 232 195 16 82 82 82 34 34 34
45120- 10 10 10 0 0 0 0 0 0 0 0 0
45121- 0 0 0 0 0 0 0 0 0 0 0 0
45122- 0 0 0 0 0 0 0 0 0 0 0 0
45123- 0 0 0 0 0 0 0 0 0 0 0 0
45124- 0 0 0 0 0 0 0 0 0 0 0 0
45125- 14 14 14 38 38 38 70 70 70 154 122 46
45126-190 142 34 200 144 11 197 138 11 197 138 11
45127-213 154 11 226 170 11 242 186 14 246 190 14
45128-246 190 14 246 190 14 246 190 14 246 190 14
45129-225 175 15 46 32 6 2 2 6 22 22 22
45130-158 158 158 250 250 250 253 253 253 253 253 253
45131-253 253 253 253 253 253 253 253 253 253 253 253
45132-253 253 253 253 253 253 253 253 253 253 253 253
45133-253 253 253 253 253 253 253 253 253 253 253 253
45134-253 253 253 253 253 253 253 253 253 253 253 253
45135-253 253 253 250 250 250 242 242 242 224 178 62
45136-239 182 13 236 186 11 213 154 11 46 32 6
45137- 2 2 6 2 2 6 2 2 6 2 2 6
45138- 2 2 6 2 2 6 61 42 6 225 175 15
45139-238 190 10 236 186 11 112 100 78 42 42 42
45140- 14 14 14 0 0 0 0 0 0 0 0 0
45141- 0 0 0 0 0 0 0 0 0 0 0 0
45142- 0 0 0 0 0 0 0 0 0 0 0 0
45143- 0 0 0 0 0 0 0 0 0 0 0 0
45144- 0 0 0 0 0 0 0 0 0 6 6 6
45145- 22 22 22 54 54 54 154 122 46 213 154 11
45146-226 170 11 230 174 11 226 170 11 226 170 11
45147-236 178 12 242 186 14 246 190 14 246 190 14
45148-246 190 14 246 190 14 246 190 14 246 190 14
45149-241 196 14 184 144 12 10 10 10 2 2 6
45150- 6 6 6 116 116 116 242 242 242 253 253 253
45151-253 253 253 253 253 253 253 253 253 253 253 253
45152-253 253 253 253 253 253 253 253 253 253 253 253
45153-253 253 253 253 253 253 253 253 253 253 253 253
45154-253 253 253 253 253 253 253 253 253 253 253 253
45155-253 253 253 231 231 231 198 198 198 214 170 54
45156-236 178 12 236 178 12 210 150 10 137 92 6
45157- 18 14 6 2 2 6 2 2 6 2 2 6
45158- 6 6 6 70 47 6 200 144 11 236 178 12
45159-239 182 13 239 182 13 124 112 88 58 58 58
45160- 22 22 22 6 6 6 0 0 0 0 0 0
45161- 0 0 0 0 0 0 0 0 0 0 0 0
45162- 0 0 0 0 0 0 0 0 0 0 0 0
45163- 0 0 0 0 0 0 0 0 0 0 0 0
45164- 0 0 0 0 0 0 0 0 0 10 10 10
45165- 30 30 30 70 70 70 180 133 36 226 170 11
45166-239 182 13 242 186 14 242 186 14 246 186 14
45167-246 190 14 246 190 14 246 190 14 246 190 14
45168-246 190 14 246 190 14 246 190 14 246 190 14
45169-246 190 14 232 195 16 98 70 6 2 2 6
45170- 2 2 6 2 2 6 66 66 66 221 221 221
45171-253 253 253 253 253 253 253 253 253 253 253 253
45172-253 253 253 253 253 253 253 253 253 253 253 253
45173-253 253 253 253 253 253 253 253 253 253 253 253
45174-253 253 253 253 253 253 253 253 253 253 253 253
45175-253 253 253 206 206 206 198 198 198 214 166 58
45176-230 174 11 230 174 11 216 158 10 192 133 9
45177-163 110 8 116 81 8 102 78 10 116 81 8
45178-167 114 7 197 138 11 226 170 11 239 182 13
45179-242 186 14 242 186 14 162 146 94 78 78 78
45180- 34 34 34 14 14 14 6 6 6 0 0 0
45181- 0 0 0 0 0 0 0 0 0 0 0 0
45182- 0 0 0 0 0 0 0 0 0 0 0 0
45183- 0 0 0 0 0 0 0 0 0 0 0 0
45184- 0 0 0 0 0 0 0 0 0 6 6 6
45185- 30 30 30 78 78 78 190 142 34 226 170 11
45186-239 182 13 246 190 14 246 190 14 246 190 14
45187-246 190 14 246 190 14 246 190 14 246 190 14
45188-246 190 14 246 190 14 246 190 14 246 190 14
45189-246 190 14 241 196 14 203 166 17 22 18 6
45190- 2 2 6 2 2 6 2 2 6 38 38 38
45191-218 218 218 253 253 253 253 253 253 253 253 253
45192-253 253 253 253 253 253 253 253 253 253 253 253
45193-253 253 253 253 253 253 253 253 253 253 253 253
45194-253 253 253 253 253 253 253 253 253 253 253 253
45195-250 250 250 206 206 206 198 198 198 202 162 69
45196-226 170 11 236 178 12 224 166 10 210 150 10
45197-200 144 11 197 138 11 192 133 9 197 138 11
45198-210 150 10 226 170 11 242 186 14 246 190 14
45199-246 190 14 246 186 14 225 175 15 124 112 88
45200- 62 62 62 30 30 30 14 14 14 6 6 6
45201- 0 0 0 0 0 0 0 0 0 0 0 0
45202- 0 0 0 0 0 0 0 0 0 0 0 0
45203- 0 0 0 0 0 0 0 0 0 0 0 0
45204- 0 0 0 0 0 0 0 0 0 10 10 10
45205- 30 30 30 78 78 78 174 135 50 224 166 10
45206-239 182 13 246 190 14 246 190 14 246 190 14
45207-246 190 14 246 190 14 246 190 14 246 190 14
45208-246 190 14 246 190 14 246 190 14 246 190 14
45209-246 190 14 246 190 14 241 196 14 139 102 15
45210- 2 2 6 2 2 6 2 2 6 2 2 6
45211- 78 78 78 250 250 250 253 253 253 253 253 253
45212-253 253 253 253 253 253 253 253 253 253 253 253
45213-253 253 253 253 253 253 253 253 253 253 253 253
45214-253 253 253 253 253 253 253 253 253 253 253 253
45215-250 250 250 214 214 214 198 198 198 190 150 46
45216-219 162 10 236 178 12 234 174 13 224 166 10
45217-216 158 10 213 154 11 213 154 11 216 158 10
45218-226 170 11 239 182 13 246 190 14 246 190 14
45219-246 190 14 246 190 14 242 186 14 206 162 42
45220-101 101 101 58 58 58 30 30 30 14 14 14
45221- 6 6 6 0 0 0 0 0 0 0 0 0
45222- 0 0 0 0 0 0 0 0 0 0 0 0
45223- 0 0 0 0 0 0 0 0 0 0 0 0
45224- 0 0 0 0 0 0 0 0 0 10 10 10
45225- 30 30 30 74 74 74 174 135 50 216 158 10
45226-236 178 12 246 190 14 246 190 14 246 190 14
45227-246 190 14 246 190 14 246 190 14 246 190 14
45228-246 190 14 246 190 14 246 190 14 246 190 14
45229-246 190 14 246 190 14 241 196 14 226 184 13
45230- 61 42 6 2 2 6 2 2 6 2 2 6
45231- 22 22 22 238 238 238 253 253 253 253 253 253
45232-253 253 253 253 253 253 253 253 253 253 253 253
45233-253 253 253 253 253 253 253 253 253 253 253 253
45234-253 253 253 253 253 253 253 253 253 253 253 253
45235-253 253 253 226 226 226 187 187 187 180 133 36
45236-216 158 10 236 178 12 239 182 13 236 178 12
45237-230 174 11 226 170 11 226 170 11 230 174 11
45238-236 178 12 242 186 14 246 190 14 246 190 14
45239-246 190 14 246 190 14 246 186 14 239 182 13
45240-206 162 42 106 106 106 66 66 66 34 34 34
45241- 14 14 14 6 6 6 0 0 0 0 0 0
45242- 0 0 0 0 0 0 0 0 0 0 0 0
45243- 0 0 0 0 0 0 0 0 0 0 0 0
45244- 0 0 0 0 0 0 0 0 0 6 6 6
45245- 26 26 26 70 70 70 163 133 67 213 154 11
45246-236 178 12 246 190 14 246 190 14 246 190 14
45247-246 190 14 246 190 14 246 190 14 246 190 14
45248-246 190 14 246 190 14 246 190 14 246 190 14
45249-246 190 14 246 190 14 246 190 14 241 196 14
45250-190 146 13 18 14 6 2 2 6 2 2 6
45251- 46 46 46 246 246 246 253 253 253 253 253 253
45252-253 253 253 253 253 253 253 253 253 253 253 253
45253-253 253 253 253 253 253 253 253 253 253 253 253
45254-253 253 253 253 253 253 253 253 253 253 253 253
45255-253 253 253 221 221 221 86 86 86 156 107 11
45256-216 158 10 236 178 12 242 186 14 246 186 14
45257-242 186 14 239 182 13 239 182 13 242 186 14
45258-242 186 14 246 186 14 246 190 14 246 190 14
45259-246 190 14 246 190 14 246 190 14 246 190 14
45260-242 186 14 225 175 15 142 122 72 66 66 66
45261- 30 30 30 10 10 10 0 0 0 0 0 0
45262- 0 0 0 0 0 0 0 0 0 0 0 0
45263- 0 0 0 0 0 0 0 0 0 0 0 0
45264- 0 0 0 0 0 0 0 0 0 6 6 6
45265- 26 26 26 70 70 70 163 133 67 210 150 10
45266-236 178 12 246 190 14 246 190 14 246 190 14
45267-246 190 14 246 190 14 246 190 14 246 190 14
45268-246 190 14 246 190 14 246 190 14 246 190 14
45269-246 190 14 246 190 14 246 190 14 246 190 14
45270-232 195 16 121 92 8 34 34 34 106 106 106
45271-221 221 221 253 253 253 253 253 253 253 253 253
45272-253 253 253 253 253 253 253 253 253 253 253 253
45273-253 253 253 253 253 253 253 253 253 253 253 253
45274-253 253 253 253 253 253 253 253 253 253 253 253
45275-242 242 242 82 82 82 18 14 6 163 110 8
45276-216 158 10 236 178 12 242 186 14 246 190 14
45277-246 190 14 246 190 14 246 190 14 246 190 14
45278-246 190 14 246 190 14 246 190 14 246 190 14
45279-246 190 14 246 190 14 246 190 14 246 190 14
45280-246 190 14 246 190 14 242 186 14 163 133 67
45281- 46 46 46 18 18 18 6 6 6 0 0 0
45282- 0 0 0 0 0 0 0 0 0 0 0 0
45283- 0 0 0 0 0 0 0 0 0 0 0 0
45284- 0 0 0 0 0 0 0 0 0 10 10 10
45285- 30 30 30 78 78 78 163 133 67 210 150 10
45286-236 178 12 246 186 14 246 190 14 246 190 14
45287-246 190 14 246 190 14 246 190 14 246 190 14
45288-246 190 14 246 190 14 246 190 14 246 190 14
45289-246 190 14 246 190 14 246 190 14 246 190 14
45290-241 196 14 215 174 15 190 178 144 253 253 253
45291-253 253 253 253 253 253 253 253 253 253 253 253
45292-253 253 253 253 253 253 253 253 253 253 253 253
45293-253 253 253 253 253 253 253 253 253 253 253 253
45294-253 253 253 253 253 253 253 253 253 218 218 218
45295- 58 58 58 2 2 6 22 18 6 167 114 7
45296-216 158 10 236 178 12 246 186 14 246 190 14
45297-246 190 14 246 190 14 246 190 14 246 190 14
45298-246 190 14 246 190 14 246 190 14 246 190 14
45299-246 190 14 246 190 14 246 190 14 246 190 14
45300-246 190 14 246 186 14 242 186 14 190 150 46
45301- 54 54 54 22 22 22 6 6 6 0 0 0
45302- 0 0 0 0 0 0 0 0 0 0 0 0
45303- 0 0 0 0 0 0 0 0 0 0 0 0
45304- 0 0 0 0 0 0 0 0 0 14 14 14
45305- 38 38 38 86 86 86 180 133 36 213 154 11
45306-236 178 12 246 186 14 246 190 14 246 190 14
45307-246 190 14 246 190 14 246 190 14 246 190 14
45308-246 190 14 246 190 14 246 190 14 246 190 14
45309-246 190 14 246 190 14 246 190 14 246 190 14
45310-246 190 14 232 195 16 190 146 13 214 214 214
45311-253 253 253 253 253 253 253 253 253 253 253 253
45312-253 253 253 253 253 253 253 253 253 253 253 253
45313-253 253 253 253 253 253 253 253 253 253 253 253
45314-253 253 253 250 250 250 170 170 170 26 26 26
45315- 2 2 6 2 2 6 37 26 9 163 110 8
45316-219 162 10 239 182 13 246 186 14 246 190 14
45317-246 190 14 246 190 14 246 190 14 246 190 14
45318-246 190 14 246 190 14 246 190 14 246 190 14
45319-246 190 14 246 190 14 246 190 14 246 190 14
45320-246 186 14 236 178 12 224 166 10 142 122 72
45321- 46 46 46 18 18 18 6 6 6 0 0 0
45322- 0 0 0 0 0 0 0 0 0 0 0 0
45323- 0 0 0 0 0 0 0 0 0 0 0 0
45324- 0 0 0 0 0 0 6 6 6 18 18 18
45325- 50 50 50 109 106 95 192 133 9 224 166 10
45326-242 186 14 246 190 14 246 190 14 246 190 14
45327-246 190 14 246 190 14 246 190 14 246 190 14
45328-246 190 14 246 190 14 246 190 14 246 190 14
45329-246 190 14 246 190 14 246 190 14 246 190 14
45330-242 186 14 226 184 13 210 162 10 142 110 46
45331-226 226 226 253 253 253 253 253 253 253 253 253
45332-253 253 253 253 253 253 253 253 253 253 253 253
45333-253 253 253 253 253 253 253 253 253 253 253 253
45334-198 198 198 66 66 66 2 2 6 2 2 6
45335- 2 2 6 2 2 6 50 34 6 156 107 11
45336-219 162 10 239 182 13 246 186 14 246 190 14
45337-246 190 14 246 190 14 246 190 14 246 190 14
45338-246 190 14 246 190 14 246 190 14 246 190 14
45339-246 190 14 246 190 14 246 190 14 242 186 14
45340-234 174 13 213 154 11 154 122 46 66 66 66
45341- 30 30 30 10 10 10 0 0 0 0 0 0
45342- 0 0 0 0 0 0 0 0 0 0 0 0
45343- 0 0 0 0 0 0 0 0 0 0 0 0
45344- 0 0 0 0 0 0 6 6 6 22 22 22
45345- 58 58 58 154 121 60 206 145 10 234 174 13
45346-242 186 14 246 186 14 246 190 14 246 190 14
45347-246 190 14 246 190 14 246 190 14 246 190 14
45348-246 190 14 246 190 14 246 190 14 246 190 14
45349-246 190 14 246 190 14 246 190 14 246 190 14
45350-246 186 14 236 178 12 210 162 10 163 110 8
45351- 61 42 6 138 138 138 218 218 218 250 250 250
45352-253 253 253 253 253 253 253 253 253 250 250 250
45353-242 242 242 210 210 210 144 144 144 66 66 66
45354- 6 6 6 2 2 6 2 2 6 2 2 6
45355- 2 2 6 2 2 6 61 42 6 163 110 8
45356-216 158 10 236 178 12 246 190 14 246 190 14
45357-246 190 14 246 190 14 246 190 14 246 190 14
45358-246 190 14 246 190 14 246 190 14 246 190 14
45359-246 190 14 239 182 13 230 174 11 216 158 10
45360-190 142 34 124 112 88 70 70 70 38 38 38
45361- 18 18 18 6 6 6 0 0 0 0 0 0
45362- 0 0 0 0 0 0 0 0 0 0 0 0
45363- 0 0 0 0 0 0 0 0 0 0 0 0
45364- 0 0 0 0 0 0 6 6 6 22 22 22
45365- 62 62 62 168 124 44 206 145 10 224 166 10
45366-236 178 12 239 182 13 242 186 14 242 186 14
45367-246 186 14 246 190 14 246 190 14 246 190 14
45368-246 190 14 246 190 14 246 190 14 246 190 14
45369-246 190 14 246 190 14 246 190 14 246 190 14
45370-246 190 14 236 178 12 216 158 10 175 118 6
45371- 80 54 7 2 2 6 6 6 6 30 30 30
45372- 54 54 54 62 62 62 50 50 50 38 38 38
45373- 14 14 14 2 2 6 2 2 6 2 2 6
45374- 2 2 6 2 2 6 2 2 6 2 2 6
45375- 2 2 6 6 6 6 80 54 7 167 114 7
45376-213 154 11 236 178 12 246 190 14 246 190 14
45377-246 190 14 246 190 14 246 190 14 246 190 14
45378-246 190 14 242 186 14 239 182 13 239 182 13
45379-230 174 11 210 150 10 174 135 50 124 112 88
45380- 82 82 82 54 54 54 34 34 34 18 18 18
45381- 6 6 6 0 0 0 0 0 0 0 0 0
45382- 0 0 0 0 0 0 0 0 0 0 0 0
45383- 0 0 0 0 0 0 0 0 0 0 0 0
45384- 0 0 0 0 0 0 6 6 6 18 18 18
45385- 50 50 50 158 118 36 192 133 9 200 144 11
45386-216 158 10 219 162 10 224 166 10 226 170 11
45387-230 174 11 236 178 12 239 182 13 239 182 13
45388-242 186 14 246 186 14 246 190 14 246 190 14
45389-246 190 14 246 190 14 246 190 14 246 190 14
45390-246 186 14 230 174 11 210 150 10 163 110 8
45391-104 69 6 10 10 10 2 2 6 2 2 6
45392- 2 2 6 2 2 6 2 2 6 2 2 6
45393- 2 2 6 2 2 6 2 2 6 2 2 6
45394- 2 2 6 2 2 6 2 2 6 2 2 6
45395- 2 2 6 6 6 6 91 60 6 167 114 7
45396-206 145 10 230 174 11 242 186 14 246 190 14
45397-246 190 14 246 190 14 246 186 14 242 186 14
45398-239 182 13 230 174 11 224 166 10 213 154 11
45399-180 133 36 124 112 88 86 86 86 58 58 58
45400- 38 38 38 22 22 22 10 10 10 6 6 6
45401- 0 0 0 0 0 0 0 0 0 0 0 0
45402- 0 0 0 0 0 0 0 0 0 0 0 0
45403- 0 0 0 0 0 0 0 0 0 0 0 0
45404- 0 0 0 0 0 0 0 0 0 14 14 14
45405- 34 34 34 70 70 70 138 110 50 158 118 36
45406-167 114 7 180 123 7 192 133 9 197 138 11
45407-200 144 11 206 145 10 213 154 11 219 162 10
45408-224 166 10 230 174 11 239 182 13 242 186 14
45409-246 186 14 246 186 14 246 186 14 246 186 14
45410-239 182 13 216 158 10 185 133 11 152 99 6
45411-104 69 6 18 14 6 2 2 6 2 2 6
45412- 2 2 6 2 2 6 2 2 6 2 2 6
45413- 2 2 6 2 2 6 2 2 6 2 2 6
45414- 2 2 6 2 2 6 2 2 6 2 2 6
45415- 2 2 6 6 6 6 80 54 7 152 99 6
45416-192 133 9 219 162 10 236 178 12 239 182 13
45417-246 186 14 242 186 14 239 182 13 236 178 12
45418-224 166 10 206 145 10 192 133 9 154 121 60
45419- 94 94 94 62 62 62 42 42 42 22 22 22
45420- 14 14 14 6 6 6 0 0 0 0 0 0
45421- 0 0 0 0 0 0 0 0 0 0 0 0
45422- 0 0 0 0 0 0 0 0 0 0 0 0
45423- 0 0 0 0 0 0 0 0 0 0 0 0
45424- 0 0 0 0 0 0 0 0 0 6 6 6
45425- 18 18 18 34 34 34 58 58 58 78 78 78
45426-101 98 89 124 112 88 142 110 46 156 107 11
45427-163 110 8 167 114 7 175 118 6 180 123 7
45428-185 133 11 197 138 11 210 150 10 219 162 10
45429-226 170 11 236 178 12 236 178 12 234 174 13
45430-219 162 10 197 138 11 163 110 8 130 83 6
45431- 91 60 6 10 10 10 2 2 6 2 2 6
45432- 18 18 18 38 38 38 38 38 38 38 38 38
45433- 38 38 38 38 38 38 38 38 38 38 38 38
45434- 38 38 38 38 38 38 26 26 26 2 2 6
45435- 2 2 6 6 6 6 70 47 6 137 92 6
45436-175 118 6 200 144 11 219 162 10 230 174 11
45437-234 174 13 230 174 11 219 162 10 210 150 10
45438-192 133 9 163 110 8 124 112 88 82 82 82
45439- 50 50 50 30 30 30 14 14 14 6 6 6
45440- 0 0 0 0 0 0 0 0 0 0 0 0
45441- 0 0 0 0 0 0 0 0 0 0 0 0
45442- 0 0 0 0 0 0 0 0 0 0 0 0
45443- 0 0 0 0 0 0 0 0 0 0 0 0
45444- 0 0 0 0 0 0 0 0 0 0 0 0
45445- 6 6 6 14 14 14 22 22 22 34 34 34
45446- 42 42 42 58 58 58 74 74 74 86 86 86
45447-101 98 89 122 102 70 130 98 46 121 87 25
45448-137 92 6 152 99 6 163 110 8 180 123 7
45449-185 133 11 197 138 11 206 145 10 200 144 11
45450-180 123 7 156 107 11 130 83 6 104 69 6
45451- 50 34 6 54 54 54 110 110 110 101 98 89
45452- 86 86 86 82 82 82 78 78 78 78 78 78
45453- 78 78 78 78 78 78 78 78 78 78 78 78
45454- 78 78 78 82 82 82 86 86 86 94 94 94
45455-106 106 106 101 101 101 86 66 34 124 80 6
45456-156 107 11 180 123 7 192 133 9 200 144 11
45457-206 145 10 200 144 11 192 133 9 175 118 6
45458-139 102 15 109 106 95 70 70 70 42 42 42
45459- 22 22 22 10 10 10 0 0 0 0 0 0
45460- 0 0 0 0 0 0 0 0 0 0 0 0
45461- 0 0 0 0 0 0 0 0 0 0 0 0
45462- 0 0 0 0 0 0 0 0 0 0 0 0
45463- 0 0 0 0 0 0 0 0 0 0 0 0
45464- 0 0 0 0 0 0 0 0 0 0 0 0
45465- 0 0 0 0 0 0 6 6 6 10 10 10
45466- 14 14 14 22 22 22 30 30 30 38 38 38
45467- 50 50 50 62 62 62 74 74 74 90 90 90
45468-101 98 89 112 100 78 121 87 25 124 80 6
45469-137 92 6 152 99 6 152 99 6 152 99 6
45470-138 86 6 124 80 6 98 70 6 86 66 30
45471-101 98 89 82 82 82 58 58 58 46 46 46
45472- 38 38 38 34 34 34 34 34 34 34 34 34
45473- 34 34 34 34 34 34 34 34 34 34 34 34
45474- 34 34 34 34 34 34 38 38 38 42 42 42
45475- 54 54 54 82 82 82 94 86 76 91 60 6
45476-134 86 6 156 107 11 167 114 7 175 118 6
45477-175 118 6 167 114 7 152 99 6 121 87 25
45478-101 98 89 62 62 62 34 34 34 18 18 18
45479- 6 6 6 0 0 0 0 0 0 0 0 0
45480- 0 0 0 0 0 0 0 0 0 0 0 0
45481- 0 0 0 0 0 0 0 0 0 0 0 0
45482- 0 0 0 0 0 0 0 0 0 0 0 0
45483- 0 0 0 0 0 0 0 0 0 0 0 0
45484- 0 0 0 0 0 0 0 0 0 0 0 0
45485- 0 0 0 0 0 0 0 0 0 0 0 0
45486- 0 0 0 6 6 6 6 6 6 10 10 10
45487- 18 18 18 22 22 22 30 30 30 42 42 42
45488- 50 50 50 66 66 66 86 86 86 101 98 89
45489-106 86 58 98 70 6 104 69 6 104 69 6
45490-104 69 6 91 60 6 82 62 34 90 90 90
45491- 62 62 62 38 38 38 22 22 22 14 14 14
45492- 10 10 10 10 10 10 10 10 10 10 10 10
45493- 10 10 10 10 10 10 6 6 6 10 10 10
45494- 10 10 10 10 10 10 10 10 10 14 14 14
45495- 22 22 22 42 42 42 70 70 70 89 81 66
45496- 80 54 7 104 69 6 124 80 6 137 92 6
45497-134 86 6 116 81 8 100 82 52 86 86 86
45498- 58 58 58 30 30 30 14 14 14 6 6 6
45499- 0 0 0 0 0 0 0 0 0 0 0 0
45500- 0 0 0 0 0 0 0 0 0 0 0 0
45501- 0 0 0 0 0 0 0 0 0 0 0 0
45502- 0 0 0 0 0 0 0 0 0 0 0 0
45503- 0 0 0 0 0 0 0 0 0 0 0 0
45504- 0 0 0 0 0 0 0 0 0 0 0 0
45505- 0 0 0 0 0 0 0 0 0 0 0 0
45506- 0 0 0 0 0 0 0 0 0 0 0 0
45507- 0 0 0 6 6 6 10 10 10 14 14 14
45508- 18 18 18 26 26 26 38 38 38 54 54 54
45509- 70 70 70 86 86 86 94 86 76 89 81 66
45510- 89 81 66 86 86 86 74 74 74 50 50 50
45511- 30 30 30 14 14 14 6 6 6 0 0 0
45512- 0 0 0 0 0 0 0 0 0 0 0 0
45513- 0 0 0 0 0 0 0 0 0 0 0 0
45514- 0 0 0 0 0 0 0 0 0 0 0 0
45515- 6 6 6 18 18 18 34 34 34 58 58 58
45516- 82 82 82 89 81 66 89 81 66 89 81 66
45517- 94 86 66 94 86 76 74 74 74 50 50 50
45518- 26 26 26 14 14 14 6 6 6 0 0 0
45519- 0 0 0 0 0 0 0 0 0 0 0 0
45520- 0 0 0 0 0 0 0 0 0 0 0 0
45521- 0 0 0 0 0 0 0 0 0 0 0 0
45522- 0 0 0 0 0 0 0 0 0 0 0 0
45523- 0 0 0 0 0 0 0 0 0 0 0 0
45524- 0 0 0 0 0 0 0 0 0 0 0 0
45525- 0 0 0 0 0 0 0 0 0 0 0 0
45526- 0 0 0 0 0 0 0 0 0 0 0 0
45527- 0 0 0 0 0 0 0 0 0 0 0 0
45528- 6 6 6 6 6 6 14 14 14 18 18 18
45529- 30 30 30 38 38 38 46 46 46 54 54 54
45530- 50 50 50 42 42 42 30 30 30 18 18 18
45531- 10 10 10 0 0 0 0 0 0 0 0 0
45532- 0 0 0 0 0 0 0 0 0 0 0 0
45533- 0 0 0 0 0 0 0 0 0 0 0 0
45534- 0 0 0 0 0 0 0 0 0 0 0 0
45535- 0 0 0 6 6 6 14 14 14 26 26 26
45536- 38 38 38 50 50 50 58 58 58 58 58 58
45537- 54 54 54 42 42 42 30 30 30 18 18 18
45538- 10 10 10 0 0 0 0 0 0 0 0 0
45539- 0 0 0 0 0 0 0 0 0 0 0 0
45540- 0 0 0 0 0 0 0 0 0 0 0 0
45541- 0 0 0 0 0 0 0 0 0 0 0 0
45542- 0 0 0 0 0 0 0 0 0 0 0 0
45543- 0 0 0 0 0 0 0 0 0 0 0 0
45544- 0 0 0 0 0 0 0 0 0 0 0 0
45545- 0 0 0 0 0 0 0 0 0 0 0 0
45546- 0 0 0 0 0 0 0 0 0 0 0 0
45547- 0 0 0 0 0 0 0 0 0 0 0 0
45548- 0 0 0 0 0 0 0 0 0 6 6 6
45549- 6 6 6 10 10 10 14 14 14 18 18 18
45550- 18 18 18 14 14 14 10 10 10 6 6 6
45551- 0 0 0 0 0 0 0 0 0 0 0 0
45552- 0 0 0 0 0 0 0 0 0 0 0 0
45553- 0 0 0 0 0 0 0 0 0 0 0 0
45554- 0 0 0 0 0 0 0 0 0 0 0 0
45555- 0 0 0 0 0 0 0 0 0 6 6 6
45556- 14 14 14 18 18 18 22 22 22 22 22 22
45557- 18 18 18 14 14 14 10 10 10 6 6 6
45558- 0 0 0 0 0 0 0 0 0 0 0 0
45559- 0 0 0 0 0 0 0 0 0 0 0 0
45560- 0 0 0 0 0 0 0 0 0 0 0 0
45561- 0 0 0 0 0 0 0 0 0 0 0 0
45562- 0 0 0 0 0 0 0 0 0 0 0 0
45563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45576+4 4 4 4 4 4
45577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45590+4 4 4 4 4 4
45591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45604+4 4 4 4 4 4
45605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45618+4 4 4 4 4 4
45619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45632+4 4 4 4 4 4
45633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45646+4 4 4 4 4 4
45647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45651+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
45652+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
45653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45656+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
45657+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45658+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
45659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45660+4 4 4 4 4 4
45661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45665+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
45666+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
45667+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45670+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
45671+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
45672+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
45673+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45674+4 4 4 4 4 4
45675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45679+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
45680+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
45681+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45684+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
45685+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
45686+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
45687+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
45688+4 4 4 4 4 4
45689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45692+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
45693+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
45694+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
45695+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
45696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45697+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45698+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
45699+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
45700+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
45701+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
45702+4 4 4 4 4 4
45703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45706+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
45707+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
45708+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
45709+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
45710+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45711+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
45712+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
45713+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
45714+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
45715+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
45716+4 4 4 4 4 4
45717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45720+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
45721+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
45722+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
45723+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
45724+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45725+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
45726+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
45727+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
45728+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
45729+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
45730+4 4 4 4 4 4
45731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45733+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
45734+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
45735+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
45736+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
45737+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
45738+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
45739+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
45740+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
45741+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
45742+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
45743+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
45744+4 4 4 4 4 4
45745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45747+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
45748+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
45749+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
45750+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
45751+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
45752+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
45753+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
45754+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
45755+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
45756+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
45757+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
45758+4 4 4 4 4 4
45759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45761+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
45762+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
45763+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
45764+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
45765+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
45766+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
45767+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
45768+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
45769+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
45770+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
45771+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45772+4 4 4 4 4 4
45773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45775+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
45776+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
45777+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
45778+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
45779+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
45780+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
45781+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
45782+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
45783+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
45784+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
45785+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
45786+4 4 4 4 4 4
45787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45788+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
45789+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
45790+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
45791+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
45792+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
45793+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
45794+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
45795+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
45796+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
45797+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
45798+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
45799+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
45800+4 4 4 4 4 4
45801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45802+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
45803+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
45804+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
45805+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45806+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
45807+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
45808+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
45809+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
45810+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
45811+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
45812+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
45813+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
45814+0 0 0 4 4 4
45815+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45816+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
45817+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
45818+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
45819+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
45820+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
45821+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
45822+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
45823+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
45824+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
45825+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
45826+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
45827+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
45828+2 0 0 0 0 0
45829+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
45830+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
45831+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
45832+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
45833+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
45834+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
45835+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
45836+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
45837+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
45838+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
45839+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
45840+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
45841+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
45842+37 38 37 0 0 0
45843+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45844+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45845+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45846+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45847+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45848+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45849+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45850+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45851+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45852+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45853+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45854+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45855+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45856+85 115 134 4 0 0
45857+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45858+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45859+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45860+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45861+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45862+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45863+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45864+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45865+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45866+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45867+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45868+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45869+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45870+60 73 81 4 0 0
45871+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45872+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45873+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45874+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45875+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45876+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45877+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45878+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45879+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45880+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45881+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45882+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45883+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45884+16 19 21 4 0 0
45885+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45886+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45887+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45888+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45889+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45890+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45891+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45892+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45893+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45894+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45895+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45896+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45897+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45898+4 0 0 4 3 3
45899+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45900+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45901+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45903+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45904+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45905+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45906+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45907+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45908+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45909+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45910+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45911+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45912+3 2 2 4 4 4
45913+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45914+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45915+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45916+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45917+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45918+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45919+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45920+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45921+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45922+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45923+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45924+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45925+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45926+4 4 4 4 4 4
45927+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45928+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45929+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45930+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45931+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45932+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45933+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45934+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45935+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45936+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45937+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45938+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45939+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45940+4 4 4 4 4 4
45941+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45942+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45943+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45944+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45945+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45946+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45947+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45948+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45949+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45950+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45951+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45952+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45953+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45954+5 5 5 5 5 5
45955+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45956+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45957+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45958+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45959+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45960+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45961+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45962+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45963+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45964+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45965+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45966+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45967+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45968+5 5 5 4 4 4
45969+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45970+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45971+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45972+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45973+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45974+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45975+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45976+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45977+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45978+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45979+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45980+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45982+4 4 4 4 4 4
45983+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45984+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45985+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45986+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45987+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45988+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45989+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45990+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45991+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45992+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45993+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45994+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45996+4 4 4 4 4 4
45997+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45998+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45999+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
46000+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
46001+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46002+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
46003+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
46004+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
46005+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
46006+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
46007+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
46008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46009+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46010+4 4 4 4 4 4
46011+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
46012+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
46013+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
46014+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
46015+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46016+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46017+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46018+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
46019+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
46020+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
46021+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
46022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46023+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46024+4 4 4 4 4 4
46025+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
46026+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
46027+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
46028+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
46029+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46030+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
46031+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
46032+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
46033+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
46034+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
46035+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46036+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46037+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46038+4 4 4 4 4 4
46039+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
46040+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
46041+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
46042+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
46043+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46044+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
46045+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
46046+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
46047+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
46048+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
46049+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
46050+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46051+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46052+4 4 4 4 4 4
46053+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
46054+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
46055+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
46056+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
46057+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46058+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
46059+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
46060+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
46061+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
46062+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
46063+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
46064+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46065+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46066+4 4 4 4 4 4
46067+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
46068+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
46069+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
46070+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
46071+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
46072+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
46073+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
46074+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
46075+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
46076+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
46077+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46078+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46079+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46080+4 4 4 4 4 4
46081+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
46082+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
46083+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
46084+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
46085+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46086+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
46087+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
46088+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
46089+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
46090+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
46091+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46092+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46093+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46094+4 4 4 4 4 4
46095+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
46096+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
46097+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
46098+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
46099+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46100+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
46101+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
46102+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
46103+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
46104+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
46105+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46106+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46107+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46108+4 4 4 4 4 4
46109+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
46110+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
46111+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
46112+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
46113+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46114+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
46115+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
46116+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
46117+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
46118+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46119+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46120+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46121+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46122+4 4 4 4 4 4
46123+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
46124+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
46125+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
46126+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
46127+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
46128+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
46129+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
46130+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
46131+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46132+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46133+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46134+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46135+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46136+4 4 4 4 4 4
46137+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
46138+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
46139+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
46140+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
46141+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46142+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
46143+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
46144+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
46145+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46146+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46147+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46148+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46149+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46150+4 4 4 4 4 4
46151+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
46152+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
46153+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
46154+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
46155+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
46156+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
46157+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
46158+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
46159+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46160+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46161+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46162+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46164+4 4 4 4 4 4
46165+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
46166+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
46167+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46168+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
46169+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
46170+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
46171+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
46172+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
46173+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
46174+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46175+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46176+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46178+4 4 4 4 4 4
46179+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
46180+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
46181+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
46182+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
46183+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
46184+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
46185+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
46186+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
46187+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46188+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46189+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46192+4 4 4 4 4 4
46193+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
46194+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
46195+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46196+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
46197+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
46198+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
46199+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
46200+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
46201+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
46202+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46203+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46206+4 4 4 4 4 4
46207+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
46208+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
46209+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
46210+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
46211+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
46212+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
46213+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
46214+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
46215+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46216+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46217+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46220+4 4 4 4 4 4
46221+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46222+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
46223+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46224+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
46225+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
46226+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
46227+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
46228+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
46229+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46230+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46231+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46234+4 4 4 4 4 4
46235+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
46236+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
46237+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
46238+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
46239+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
46240+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
46241+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46242+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
46243+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46244+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46245+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46248+4 4 4 4 4 4
46249+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46250+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
46251+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
46252+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
46253+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
46254+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
46255+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46256+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
46257+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46258+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46259+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46262+4 4 4 4 4 4
46263+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
46264+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
46265+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
46266+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
46267+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
46268+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
46269+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
46270+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
46271+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
46272+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46273+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46276+4 4 4 4 4 4
46277+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46278+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
46279+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
46280+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
46281+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
46282+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
46283+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
46284+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
46285+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
46286+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46287+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46290+4 4 4 4 4 4
46291+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
46292+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
46293+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
46294+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
46295+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
46296+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
46297+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
46298+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
46299+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
46300+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46301+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46304+4 4 4 4 4 4
46305+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46306+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
46307+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
46308+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
46309+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
46310+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
46311+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
46312+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
46313+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
46314+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46315+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46318+4 4 4 4 4 4
46319+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
46320+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
46321+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
46322+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
46323+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
46324+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
46325+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
46326+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
46327+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
46328+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
46329+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46332+4 4 4 4 4 4
46333+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
46334+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
46335+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
46336+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
46337+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
46338+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
46339+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
46340+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
46341+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
46342+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
46343+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46346+4 4 4 4 4 4
46347+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
46348+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
46349+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
46350+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
46351+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
46352+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
46353+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46354+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
46355+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
46356+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
46357+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46360+4 4 4 4 4 4
46361+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
46362+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
46363+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
46364+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
46365+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
46366+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
46367+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
46368+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
46369+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
46370+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
46371+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46374+4 4 4 4 4 4
46375+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
46376+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
46377+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
46378+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
46379+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
46380+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
46381+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
46382+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
46383+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
46384+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
46385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46388+4 4 4 4 4 4
46389+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46390+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
46391+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
46392+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
46393+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
46394+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
46395+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
46396+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
46397+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
46398+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
46399+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46402+4 4 4 4 4 4
46403+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
46404+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
46405+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
46406+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
46407+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
46408+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
46409+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
46410+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
46411+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
46412+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
46413+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46416+4 4 4 4 4 4
46417+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
46418+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
46419+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
46420+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
46421+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
46422+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
46423+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
46424+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
46425+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
46426+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46427+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46430+4 4 4 4 4 4
46431+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
46432+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46433+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
46434+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
46435+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
46436+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
46437+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
46438+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
46439+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
46440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46441+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46444+4 4 4 4 4 4
46445+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
46446+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
46447+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
46448+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
46449+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
46450+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
46451+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
46452+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
46453+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
46454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46455+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46458+4 4 4 4 4 4
46459+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46460+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
46461+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
46462+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
46463+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
46464+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
46465+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
46466+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
46467+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46469+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46472+4 4 4 4 4 4
46473+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
46474+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
46475+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46476+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
46477+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
46478+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
46479+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
46480+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
46481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46483+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46486+4 4 4 4 4 4
46487+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46488+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
46489+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
46490+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
46491+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
46492+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
46493+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
46494+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
46495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46497+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46500+4 4 4 4 4 4
46501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46502+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
46503+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46504+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
46505+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
46506+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
46507+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
46508+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
46509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46511+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46514+4 4 4 4 4 4
46515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46516+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
46517+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
46518+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
46519+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
46520+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
46521+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
46522+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
46523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46525+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46528+4 4 4 4 4 4
46529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46530+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
46531+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
46532+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46533+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
46534+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
46535+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
46536+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46539+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46542+4 4 4 4 4 4
46543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46545+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46546+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
46547+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
46548+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
46549+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
46550+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46553+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46556+4 4 4 4 4 4
46557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46560+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46561+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
46562+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
46563+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
46564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46570+4 4 4 4 4 4
46571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46574+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46575+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46576+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
46577+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
46578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46584+4 4 4 4 4 4
46585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46588+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46589+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46590+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46591+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
46592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46598+4 4 4 4 4 4
46599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46602+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
46603+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
46604+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
46605+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
46606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46612+4 4 4 4 4 4
46613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46617+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
46618+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46619+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46626+4 4 4 4 4 4
46627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46631+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
46632+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
46633+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46640+4 4 4 4 4 4
46641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46645+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
46646+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
46647+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46654+4 4 4 4 4 4
46655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46659+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
46660+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
46661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46668+4 4 4 4 4 4
46669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46673+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46674+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
46675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46682+4 4 4 4 4 4
46683diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
46684index fe92eed..106e085 100644
46685--- a/drivers/video/mb862xx/mb862xxfb_accel.c
46686+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
46687@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
46688 struct mb862xxfb_par *par = info->par;
46689
46690 if (info->var.bits_per_pixel == 32) {
46691- info->fbops->fb_fillrect = cfb_fillrect;
46692- info->fbops->fb_copyarea = cfb_copyarea;
46693- info->fbops->fb_imageblit = cfb_imageblit;
46694+ pax_open_kernel();
46695+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
46696+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
46697+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
46698+ pax_close_kernel();
46699 } else {
46700 outreg(disp, GC_L0EM, 3);
46701- info->fbops->fb_fillrect = mb86290fb_fillrect;
46702- info->fbops->fb_copyarea = mb86290fb_copyarea;
46703- info->fbops->fb_imageblit = mb86290fb_imageblit;
46704+ pax_open_kernel();
46705+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
46706+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
46707+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
46708+ pax_close_kernel();
46709 }
46710 outreg(draw, GDC_REG_DRAW_BASE, 0);
46711 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
46712diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
46713index ff22871..b129bed 100644
46714--- a/drivers/video/nvidia/nvidia.c
46715+++ b/drivers/video/nvidia/nvidia.c
46716@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
46717 info->fix.line_length = (info->var.xres_virtual *
46718 info->var.bits_per_pixel) >> 3;
46719 if (info->var.accel_flags) {
46720- info->fbops->fb_imageblit = nvidiafb_imageblit;
46721- info->fbops->fb_fillrect = nvidiafb_fillrect;
46722- info->fbops->fb_copyarea = nvidiafb_copyarea;
46723- info->fbops->fb_sync = nvidiafb_sync;
46724+ pax_open_kernel();
46725+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
46726+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
46727+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
46728+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
46729+ pax_close_kernel();
46730 info->pixmap.scan_align = 4;
46731 info->flags &= ~FBINFO_HWACCEL_DISABLED;
46732 info->flags |= FBINFO_READS_FAST;
46733 NVResetGraphics(info);
46734 } else {
46735- info->fbops->fb_imageblit = cfb_imageblit;
46736- info->fbops->fb_fillrect = cfb_fillrect;
46737- info->fbops->fb_copyarea = cfb_copyarea;
46738- info->fbops->fb_sync = NULL;
46739+ pax_open_kernel();
46740+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
46741+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
46742+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
46743+ *(void **)&info->fbops->fb_sync = NULL;
46744+ pax_close_kernel();
46745 info->pixmap.scan_align = 1;
46746 info->flags |= FBINFO_HWACCEL_DISABLED;
46747 info->flags &= ~FBINFO_READS_FAST;
46748@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
46749 info->pixmap.size = 8 * 1024;
46750 info->pixmap.flags = FB_PIXMAP_SYSTEM;
46751
46752- if (!hwcur)
46753- info->fbops->fb_cursor = NULL;
46754+ if (!hwcur) {
46755+ pax_open_kernel();
46756+ *(void **)&info->fbops->fb_cursor = NULL;
46757+ pax_close_kernel();
46758+ }
46759
46760 info->var.accel_flags = (!noaccel);
46761
46762diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
46763index 76d9053..dec2bfd 100644
46764--- a/drivers/video/s1d13xxxfb.c
46765+++ b/drivers/video/s1d13xxxfb.c
46766@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
46767
46768 switch(prod_id) {
46769 case S1D13506_PROD_ID: /* activate acceleration */
46770- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
46771- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
46772+ pax_open_kernel();
46773+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
46774+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
46775+ pax_close_kernel();
46776 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
46777 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
46778 break;
46779diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
46780index 97bd662..39fab85 100644
46781--- a/drivers/video/smscufx.c
46782+++ b/drivers/video/smscufx.c
46783@@ -1171,7 +1171,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
46784 fb_deferred_io_cleanup(info);
46785 kfree(info->fbdefio);
46786 info->fbdefio = NULL;
46787- info->fbops->fb_mmap = ufx_ops_mmap;
46788+ pax_open_kernel();
46789+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
46790+ pax_close_kernel();
46791 }
46792
46793 pr_debug("released /dev/fb%d user=%d count=%d",
46794diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
46795index 86d449e..8e04dc5 100644
46796--- a/drivers/video/udlfb.c
46797+++ b/drivers/video/udlfb.c
46798@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
46799 dlfb_urb_completion(urb);
46800
46801 error:
46802- atomic_add(bytes_sent, &dev->bytes_sent);
46803- atomic_add(bytes_identical, &dev->bytes_identical);
46804- atomic_add(width*height*2, &dev->bytes_rendered);
46805+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
46806+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
46807+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
46808 end_cycles = get_cycles();
46809- atomic_add(((unsigned int) ((end_cycles - start_cycles)
46810+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
46811 >> 10)), /* Kcycles */
46812 &dev->cpu_kcycles_used);
46813
46814@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
46815 dlfb_urb_completion(urb);
46816
46817 error:
46818- atomic_add(bytes_sent, &dev->bytes_sent);
46819- atomic_add(bytes_identical, &dev->bytes_identical);
46820- atomic_add(bytes_rendered, &dev->bytes_rendered);
46821+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
46822+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
46823+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
46824 end_cycles = get_cycles();
46825- atomic_add(((unsigned int) ((end_cycles - start_cycles)
46826+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
46827 >> 10)), /* Kcycles */
46828 &dev->cpu_kcycles_used);
46829 }
46830@@ -989,7 +989,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
46831 fb_deferred_io_cleanup(info);
46832 kfree(info->fbdefio);
46833 info->fbdefio = NULL;
46834- info->fbops->fb_mmap = dlfb_ops_mmap;
46835+ pax_open_kernel();
46836+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
46837+ pax_close_kernel();
46838 }
46839
46840 pr_warn("released /dev/fb%d user=%d count=%d\n",
46841@@ -1372,7 +1374,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
46842 struct fb_info *fb_info = dev_get_drvdata(fbdev);
46843 struct dlfb_data *dev = fb_info->par;
46844 return snprintf(buf, PAGE_SIZE, "%u\n",
46845- atomic_read(&dev->bytes_rendered));
46846+ atomic_read_unchecked(&dev->bytes_rendered));
46847 }
46848
46849 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
46850@@ -1380,7 +1382,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
46851 struct fb_info *fb_info = dev_get_drvdata(fbdev);
46852 struct dlfb_data *dev = fb_info->par;
46853 return snprintf(buf, PAGE_SIZE, "%u\n",
46854- atomic_read(&dev->bytes_identical));
46855+ atomic_read_unchecked(&dev->bytes_identical));
46856 }
46857
46858 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
46859@@ -1388,7 +1390,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
46860 struct fb_info *fb_info = dev_get_drvdata(fbdev);
46861 struct dlfb_data *dev = fb_info->par;
46862 return snprintf(buf, PAGE_SIZE, "%u\n",
46863- atomic_read(&dev->bytes_sent));
46864+ atomic_read_unchecked(&dev->bytes_sent));
46865 }
46866
46867 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
46868@@ -1396,7 +1398,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
46869 struct fb_info *fb_info = dev_get_drvdata(fbdev);
46870 struct dlfb_data *dev = fb_info->par;
46871 return snprintf(buf, PAGE_SIZE, "%u\n",
46872- atomic_read(&dev->cpu_kcycles_used));
46873+ atomic_read_unchecked(&dev->cpu_kcycles_used));
46874 }
46875
46876 static ssize_t edid_show(
46877@@ -1456,10 +1458,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
46878 struct fb_info *fb_info = dev_get_drvdata(fbdev);
46879 struct dlfb_data *dev = fb_info->par;
46880
46881- atomic_set(&dev->bytes_rendered, 0);
46882- atomic_set(&dev->bytes_identical, 0);
46883- atomic_set(&dev->bytes_sent, 0);
46884- atomic_set(&dev->cpu_kcycles_used, 0);
46885+ atomic_set_unchecked(&dev->bytes_rendered, 0);
46886+ atomic_set_unchecked(&dev->bytes_identical, 0);
46887+ atomic_set_unchecked(&dev->bytes_sent, 0);
46888+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
46889
46890 return count;
46891 }
46892diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
46893index b75db01..ad2f34a 100644
46894--- a/drivers/video/uvesafb.c
46895+++ b/drivers/video/uvesafb.c
46896@@ -19,6 +19,7 @@
46897 #include <linux/io.h>
46898 #include <linux/mutex.h>
46899 #include <linux/slab.h>
46900+#include <linux/moduleloader.h>
46901 #include <video/edid.h>
46902 #include <video/uvesafb.h>
46903 #ifdef CONFIG_X86
46904@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
46905 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
46906 par->pmi_setpal = par->ypan = 0;
46907 } else {
46908+
46909+#ifdef CONFIG_PAX_KERNEXEC
46910+#ifdef CONFIG_MODULES
46911+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
46912+#endif
46913+ if (!par->pmi_code) {
46914+ par->pmi_setpal = par->ypan = 0;
46915+ return 0;
46916+ }
46917+#endif
46918+
46919 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
46920 + task->t.regs.edi);
46921+
46922+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46923+ pax_open_kernel();
46924+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
46925+ pax_close_kernel();
46926+
46927+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
46928+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
46929+#else
46930 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
46931 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
46932+#endif
46933+
46934 printk(KERN_INFO "uvesafb: protected mode interface info at "
46935 "%04x:%04x\n",
46936 (u16)task->t.regs.es, (u16)task->t.regs.edi);
46937@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
46938 par->ypan = ypan;
46939
46940 if (par->pmi_setpal || par->ypan) {
46941+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
46942 if (__supported_pte_mask & _PAGE_NX) {
46943 par->pmi_setpal = par->ypan = 0;
46944 printk(KERN_WARNING "uvesafb: NX protection is actively."
46945 "We have better not to use the PMI.\n");
46946- } else {
46947+ } else
46948+#endif
46949 uvesafb_vbe_getpmi(task, par);
46950- }
46951 }
46952 #else
46953 /* The protected mode interface is not available on non-x86. */
46954@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
46955 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
46956
46957 /* Disable blanking if the user requested so. */
46958- if (!blank)
46959- info->fbops->fb_blank = NULL;
46960+ if (!blank) {
46961+ pax_open_kernel();
46962+ *(void **)&info->fbops->fb_blank = NULL;
46963+ pax_close_kernel();
46964+ }
46965
46966 /*
46967 * Find out how much IO memory is required for the mode with
46968@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
46969 info->flags = FBINFO_FLAG_DEFAULT |
46970 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
46971
46972- if (!par->ypan)
46973- info->fbops->fb_pan_display = NULL;
46974+ if (!par->ypan) {
46975+ pax_open_kernel();
46976+ *(void **)&info->fbops->fb_pan_display = NULL;
46977+ pax_close_kernel();
46978+ }
46979 }
46980
46981 static void uvesafb_init_mtrr(struct fb_info *info)
46982@@ -1836,6 +1866,11 @@ out:
46983 if (par->vbe_modes)
46984 kfree(par->vbe_modes);
46985
46986+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46987+ if (par->pmi_code)
46988+ module_free_exec(NULL, par->pmi_code);
46989+#endif
46990+
46991 framebuffer_release(info);
46992 return err;
46993 }
46994@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
46995 kfree(par->vbe_state_orig);
46996 if (par->vbe_state_saved)
46997 kfree(par->vbe_state_saved);
46998+
46999+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47000+ if (par->pmi_code)
47001+ module_free_exec(NULL, par->pmi_code);
47002+#endif
47003+
47004 }
47005
47006 framebuffer_release(info);
47007diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
47008index 501b340..d80aa17 100644
47009--- a/drivers/video/vesafb.c
47010+++ b/drivers/video/vesafb.c
47011@@ -9,6 +9,7 @@
47012 */
47013
47014 #include <linux/module.h>
47015+#include <linux/moduleloader.h>
47016 #include <linux/kernel.h>
47017 #include <linux/errno.h>
47018 #include <linux/string.h>
47019@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
47020 static int vram_total __initdata; /* Set total amount of memory */
47021 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
47022 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
47023-static void (*pmi_start)(void) __read_mostly;
47024-static void (*pmi_pal) (void) __read_mostly;
47025+static void (*pmi_start)(void) __read_only;
47026+static void (*pmi_pal) (void) __read_only;
47027 static int depth __read_mostly;
47028 static int vga_compat __read_mostly;
47029 /* --------------------------------------------------------------------- */
47030@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
47031 unsigned int size_vmode;
47032 unsigned int size_remap;
47033 unsigned int size_total;
47034+ void *pmi_code = NULL;
47035
47036 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
47037 return -ENODEV;
47038@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
47039 size_remap = size_total;
47040 vesafb_fix.smem_len = size_remap;
47041
47042-#ifndef __i386__
47043- screen_info.vesapm_seg = 0;
47044-#endif
47045-
47046 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
47047 printk(KERN_WARNING
47048 "vesafb: cannot reserve video memory at 0x%lx\n",
47049@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
47050 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
47051 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
47052
47053+#ifdef __i386__
47054+
47055+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47056+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
47057+ if (!pmi_code)
47058+#elif !defined(CONFIG_PAX_KERNEXEC)
47059+ if (0)
47060+#endif
47061+
47062+#endif
47063+ screen_info.vesapm_seg = 0;
47064+
47065 if (screen_info.vesapm_seg) {
47066- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
47067- screen_info.vesapm_seg,screen_info.vesapm_off);
47068+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
47069+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
47070 }
47071
47072 if (screen_info.vesapm_seg < 0xc000)
47073@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
47074
47075 if (ypan || pmi_setpal) {
47076 unsigned short *pmi_base;
47077+
47078 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
47079- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
47080- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
47081+
47082+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47083+ pax_open_kernel();
47084+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
47085+#else
47086+ pmi_code = pmi_base;
47087+#endif
47088+
47089+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
47090+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
47091+
47092+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47093+ pmi_start = ktva_ktla(pmi_start);
47094+ pmi_pal = ktva_ktla(pmi_pal);
47095+ pax_close_kernel();
47096+#endif
47097+
47098 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
47099 if (pmi_base[3]) {
47100 printk(KERN_INFO "vesafb: pmi: ports = ");
47101@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
47102 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
47103 (ypan ? FBINFO_HWACCEL_YPAN : 0);
47104
47105- if (!ypan)
47106- info->fbops->fb_pan_display = NULL;
47107+ if (!ypan) {
47108+ pax_open_kernel();
47109+ *(void **)&info->fbops->fb_pan_display = NULL;
47110+ pax_close_kernel();
47111+ }
47112
47113 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
47114 err = -ENOMEM;
47115@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
47116 info->node, info->fix.id);
47117 return 0;
47118 err:
47119+
47120+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47121+ module_free_exec(NULL, pmi_code);
47122+#endif
47123+
47124 if (info->screen_base)
47125 iounmap(info->screen_base);
47126 framebuffer_release(info);
47127diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
47128index 88714ae..16c2e11 100644
47129--- a/drivers/video/via/via_clock.h
47130+++ b/drivers/video/via/via_clock.h
47131@@ -56,7 +56,7 @@ struct via_clock {
47132
47133 void (*set_engine_pll_state)(u8 state);
47134 void (*set_engine_pll)(struct via_pll_config config);
47135-};
47136+} __no_const;
47137
47138
47139 static inline u32 get_pll_internal_frequency(u32 ref_freq,
47140diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
47141index fef20db..d28b1ab 100644
47142--- a/drivers/xen/xenfs/xenstored.c
47143+++ b/drivers/xen/xenfs/xenstored.c
47144@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
47145 static int xsd_kva_open(struct inode *inode, struct file *file)
47146 {
47147 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
47148+#ifdef CONFIG_GRKERNSEC_HIDESYM
47149+ NULL);
47150+#else
47151 xen_store_interface);
47152+#endif
47153+
47154 if (!file->private_data)
47155 return -ENOMEM;
47156 return 0;
47157diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
47158index 890bed5..17ae73e 100644
47159--- a/fs/9p/vfs_inode.c
47160+++ b/fs/9p/vfs_inode.c
47161@@ -1329,7 +1329,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
47162 void
47163 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47164 {
47165- char *s = nd_get_link(nd);
47166+ const char *s = nd_get_link(nd);
47167
47168 p9_debug(P9_DEBUG_VFS, " %s %s\n",
47169 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
47170diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
47171index 0efd152..b5802ad 100644
47172--- a/fs/Kconfig.binfmt
47173+++ b/fs/Kconfig.binfmt
47174@@ -89,7 +89,7 @@ config HAVE_AOUT
47175
47176 config BINFMT_AOUT
47177 tristate "Kernel support for a.out and ECOFF binaries"
47178- depends on HAVE_AOUT
47179+ depends on HAVE_AOUT && BROKEN
47180 ---help---
47181 A.out (Assembler.OUTput) is a set of formats for libraries and
47182 executables used in the earliest versions of UNIX. Linux used
47183diff --git a/fs/aio.c b/fs/aio.c
47184index 71f613c..9d01f1f 100644
47185--- a/fs/aio.c
47186+++ b/fs/aio.c
47187@@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
47188 size += sizeof(struct io_event) * nr_events;
47189 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
47190
47191- if (nr_pages < 0)
47192+ if (nr_pages <= 0)
47193 return -EINVAL;
47194
47195 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
47196@@ -1373,18 +1373,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
47197 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
47198 {
47199 ssize_t ret;
47200+ struct iovec iovstack;
47201
47202 #ifdef CONFIG_COMPAT
47203 if (compat)
47204 ret = compat_rw_copy_check_uvector(type,
47205 (struct compat_iovec __user *)kiocb->ki_buf,
47206- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
47207+ kiocb->ki_nbytes, 1, &iovstack,
47208 &kiocb->ki_iovec);
47209 else
47210 #endif
47211 ret = rw_copy_check_uvector(type,
47212 (struct iovec __user *)kiocb->ki_buf,
47213- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
47214+ kiocb->ki_nbytes, 1, &iovstack,
47215 &kiocb->ki_iovec);
47216 if (ret < 0)
47217 goto out;
47218@@ -1393,6 +1394,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
47219 if (ret < 0)
47220 goto out;
47221
47222+ if (kiocb->ki_iovec == &iovstack) {
47223+ kiocb->ki_inline_vec = iovstack;
47224+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
47225+ }
47226 kiocb->ki_nr_segs = kiocb->ki_nbytes;
47227 kiocb->ki_cur_seg = 0;
47228 /* ki_nbytes/left now reflect bytes instead of segs */
47229diff --git a/fs/attr.c b/fs/attr.c
47230index 1449adb..a2038c2 100644
47231--- a/fs/attr.c
47232+++ b/fs/attr.c
47233@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
47234 unsigned long limit;
47235
47236 limit = rlimit(RLIMIT_FSIZE);
47237+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
47238 if (limit != RLIM_INFINITY && offset > limit)
47239 goto out_sig;
47240 if (offset > inode->i_sb->s_maxbytes)
47241diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
47242index 03bc1d3..6205356 100644
47243--- a/fs/autofs4/waitq.c
47244+++ b/fs/autofs4/waitq.c
47245@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
47246 {
47247 unsigned long sigpipe, flags;
47248 mm_segment_t fs;
47249- const char *data = (const char *)addr;
47250+ const char __user *data = (const char __force_user *)addr;
47251 ssize_t wr = 0;
47252
47253 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
47254@@ -348,6 +348,10 @@ static int validate_request(struct autofs_wait_queue **wait,
47255 return 1;
47256 }
47257
47258+#ifdef CONFIG_GRKERNSEC_HIDESYM
47259+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
47260+#endif
47261+
47262 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
47263 enum autofs_notify notify)
47264 {
47265@@ -381,7 +385,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
47266
47267 /* If this is a direct mount request create a dummy name */
47268 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
47269+#ifdef CONFIG_GRKERNSEC_HIDESYM
47270+ /* this name does get written to userland via autofs4_write() */
47271+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
47272+#else
47273 qstr.len = sprintf(name, "%p", dentry);
47274+#endif
47275 else {
47276 qstr.len = autofs4_getpath(sbi, dentry, &name);
47277 if (!qstr.len) {
47278diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
47279index 2b3bda8..6a2d4be 100644
47280--- a/fs/befs/linuxvfs.c
47281+++ b/fs/befs/linuxvfs.c
47282@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47283 {
47284 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
47285 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
47286- char *link = nd_get_link(nd);
47287+ const char *link = nd_get_link(nd);
47288 if (!IS_ERR(link))
47289 kfree(link);
47290 }
47291diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
47292index 6043567..16a9239 100644
47293--- a/fs/binfmt_aout.c
47294+++ b/fs/binfmt_aout.c
47295@@ -16,6 +16,7 @@
47296 #include <linux/string.h>
47297 #include <linux/fs.h>
47298 #include <linux/file.h>
47299+#include <linux/security.h>
47300 #include <linux/stat.h>
47301 #include <linux/fcntl.h>
47302 #include <linux/ptrace.h>
47303@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
47304 #endif
47305 # define START_STACK(u) ((void __user *)u.start_stack)
47306
47307+ memset(&dump, 0, sizeof(dump));
47308+
47309 fs = get_fs();
47310 set_fs(KERNEL_DS);
47311 has_dumped = 1;
47312@@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
47313
47314 /* If the size of the dump file exceeds the rlimit, then see what would happen
47315 if we wrote the stack, but not the data area. */
47316+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
47317 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
47318 dump.u_dsize = 0;
47319
47320 /* Make sure we have enough room to write the stack and data areas. */
47321+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
47322 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
47323 dump.u_ssize = 0;
47324
47325@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
47326 rlim = rlimit(RLIMIT_DATA);
47327 if (rlim >= RLIM_INFINITY)
47328 rlim = ~0;
47329+
47330+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
47331 if (ex.a_data + ex.a_bss > rlim)
47332 return -ENOMEM;
47333
47334@@ -268,6 +275,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
47335
47336 install_exec_creds(bprm);
47337
47338+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47339+ current->mm->pax_flags = 0UL;
47340+#endif
47341+
47342+#ifdef CONFIG_PAX_PAGEEXEC
47343+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
47344+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
47345+
47346+#ifdef CONFIG_PAX_EMUTRAMP
47347+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
47348+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
47349+#endif
47350+
47351+#ifdef CONFIG_PAX_MPROTECT
47352+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
47353+ current->mm->pax_flags |= MF_PAX_MPROTECT;
47354+#endif
47355+
47356+ }
47357+#endif
47358+
47359 if (N_MAGIC(ex) == OMAGIC) {
47360 unsigned long text_addr, map_size;
47361 loff_t pos;
47362@@ -333,7 +361,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
47363 }
47364
47365 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
47366- PROT_READ | PROT_WRITE | PROT_EXEC,
47367+ PROT_READ | PROT_WRITE,
47368 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
47369 fd_offset + ex.a_text);
47370 if (error != N_DATADDR(ex)) {
47371diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
47372index 0c42cdb..f4be023 100644
47373--- a/fs/binfmt_elf.c
47374+++ b/fs/binfmt_elf.c
47375@@ -33,6 +33,7 @@
47376 #include <linux/elf.h>
47377 #include <linux/utsname.h>
47378 #include <linux/coredump.h>
47379+#include <linux/xattr.h>
47380 #include <asm/uaccess.h>
47381 #include <asm/param.h>
47382 #include <asm/page.h>
47383@@ -59,6 +60,10 @@ static int elf_core_dump(struct coredump_params *cprm);
47384 #define elf_core_dump NULL
47385 #endif
47386
47387+#ifdef CONFIG_PAX_MPROTECT
47388+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
47389+#endif
47390+
47391 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
47392 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
47393 #else
47394@@ -78,6 +83,11 @@ static struct linux_binfmt elf_format = {
47395 .load_binary = load_elf_binary,
47396 .load_shlib = load_elf_library,
47397 .core_dump = elf_core_dump,
47398+
47399+#ifdef CONFIG_PAX_MPROTECT
47400+ .handle_mprotect= elf_handle_mprotect,
47401+#endif
47402+
47403 .min_coredump = ELF_EXEC_PAGESIZE,
47404 };
47405
47406@@ -85,6 +95,8 @@ static struct linux_binfmt elf_format = {
47407
47408 static int set_brk(unsigned long start, unsigned long end)
47409 {
47410+ unsigned long e = end;
47411+
47412 start = ELF_PAGEALIGN(start);
47413 end = ELF_PAGEALIGN(end);
47414 if (end > start) {
47415@@ -93,7 +105,7 @@ static int set_brk(unsigned long start, unsigned long end)
47416 if (BAD_ADDR(addr))
47417 return addr;
47418 }
47419- current->mm->start_brk = current->mm->brk = end;
47420+ current->mm->start_brk = current->mm->brk = e;
47421 return 0;
47422 }
47423
47424@@ -154,12 +166,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47425 elf_addr_t __user *u_rand_bytes;
47426 const char *k_platform = ELF_PLATFORM;
47427 const char *k_base_platform = ELF_BASE_PLATFORM;
47428- unsigned char k_rand_bytes[16];
47429+ u32 k_rand_bytes[4];
47430 int items;
47431 elf_addr_t *elf_info;
47432 int ei_index = 0;
47433 const struct cred *cred = current_cred();
47434 struct vm_area_struct *vma;
47435+ unsigned long saved_auxv[AT_VECTOR_SIZE];
47436
47437 /*
47438 * In some cases (e.g. Hyper-Threading), we want to avoid L1
47439@@ -201,8 +214,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47440 * Generate 16 random bytes for userspace PRNG seeding.
47441 */
47442 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
47443- u_rand_bytes = (elf_addr_t __user *)
47444- STACK_ALLOC(p, sizeof(k_rand_bytes));
47445+ srandom32(k_rand_bytes[0] ^ random32());
47446+ srandom32(k_rand_bytes[1] ^ random32());
47447+ srandom32(k_rand_bytes[2] ^ random32());
47448+ srandom32(k_rand_bytes[3] ^ random32());
47449+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
47450+ u_rand_bytes = (elf_addr_t __user *) p;
47451 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
47452 return -EFAULT;
47453
47454@@ -314,9 +331,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47455 return -EFAULT;
47456 current->mm->env_end = p;
47457
47458+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
47459+
47460 /* Put the elf_info on the stack in the right place. */
47461 sp = (elf_addr_t __user *)envp + 1;
47462- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
47463+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
47464 return -EFAULT;
47465 return 0;
47466 }
47467@@ -380,15 +399,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
47468 an ELF header */
47469
47470 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47471- struct file *interpreter, unsigned long *interp_map_addr,
47472- unsigned long no_base)
47473+ struct file *interpreter, unsigned long no_base)
47474 {
47475 struct elf_phdr *elf_phdata;
47476 struct elf_phdr *eppnt;
47477- unsigned long load_addr = 0;
47478+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
47479 int load_addr_set = 0;
47480 unsigned long last_bss = 0, elf_bss = 0;
47481- unsigned long error = ~0UL;
47482+ unsigned long error = -EINVAL;
47483 unsigned long total_size;
47484 int retval, i, size;
47485
47486@@ -434,6 +452,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47487 goto out_close;
47488 }
47489
47490+#ifdef CONFIG_PAX_SEGMEXEC
47491+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
47492+ pax_task_size = SEGMEXEC_TASK_SIZE;
47493+#endif
47494+
47495 eppnt = elf_phdata;
47496 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
47497 if (eppnt->p_type == PT_LOAD) {
47498@@ -457,8 +480,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47499 map_addr = elf_map(interpreter, load_addr + vaddr,
47500 eppnt, elf_prot, elf_type, total_size);
47501 total_size = 0;
47502- if (!*interp_map_addr)
47503- *interp_map_addr = map_addr;
47504 error = map_addr;
47505 if (BAD_ADDR(map_addr))
47506 goto out_close;
47507@@ -477,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47508 k = load_addr + eppnt->p_vaddr;
47509 if (BAD_ADDR(k) ||
47510 eppnt->p_filesz > eppnt->p_memsz ||
47511- eppnt->p_memsz > TASK_SIZE ||
47512- TASK_SIZE - eppnt->p_memsz < k) {
47513+ eppnt->p_memsz > pax_task_size ||
47514+ pax_task_size - eppnt->p_memsz < k) {
47515 error = -ENOMEM;
47516 goto out_close;
47517 }
47518@@ -530,6 +551,315 @@ out:
47519 return error;
47520 }
47521
47522+#ifdef CONFIG_PAX_PT_PAX_FLAGS
47523+#ifdef CONFIG_PAX_SOFTMODE
47524+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
47525+{
47526+ unsigned long pax_flags = 0UL;
47527+
47528+#ifdef CONFIG_PAX_PAGEEXEC
47529+ if (elf_phdata->p_flags & PF_PAGEEXEC)
47530+ pax_flags |= MF_PAX_PAGEEXEC;
47531+#endif
47532+
47533+#ifdef CONFIG_PAX_SEGMEXEC
47534+ if (elf_phdata->p_flags & PF_SEGMEXEC)
47535+ pax_flags |= MF_PAX_SEGMEXEC;
47536+#endif
47537+
47538+#ifdef CONFIG_PAX_EMUTRAMP
47539+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
47540+ pax_flags |= MF_PAX_EMUTRAMP;
47541+#endif
47542+
47543+#ifdef CONFIG_PAX_MPROTECT
47544+ if (elf_phdata->p_flags & PF_MPROTECT)
47545+ pax_flags |= MF_PAX_MPROTECT;
47546+#endif
47547+
47548+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47549+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
47550+ pax_flags |= MF_PAX_RANDMMAP;
47551+#endif
47552+
47553+ return pax_flags;
47554+}
47555+#endif
47556+
47557+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
47558+{
47559+ unsigned long pax_flags = 0UL;
47560+
47561+#ifdef CONFIG_PAX_PAGEEXEC
47562+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
47563+ pax_flags |= MF_PAX_PAGEEXEC;
47564+#endif
47565+
47566+#ifdef CONFIG_PAX_SEGMEXEC
47567+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
47568+ pax_flags |= MF_PAX_SEGMEXEC;
47569+#endif
47570+
47571+#ifdef CONFIG_PAX_EMUTRAMP
47572+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
47573+ pax_flags |= MF_PAX_EMUTRAMP;
47574+#endif
47575+
47576+#ifdef CONFIG_PAX_MPROTECT
47577+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
47578+ pax_flags |= MF_PAX_MPROTECT;
47579+#endif
47580+
47581+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47582+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
47583+ pax_flags |= MF_PAX_RANDMMAP;
47584+#endif
47585+
47586+ return pax_flags;
47587+}
47588+#endif
47589+
47590+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
47591+#ifdef CONFIG_PAX_SOFTMODE
47592+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
47593+{
47594+ unsigned long pax_flags = 0UL;
47595+
47596+#ifdef CONFIG_PAX_PAGEEXEC
47597+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
47598+ pax_flags |= MF_PAX_PAGEEXEC;
47599+#endif
47600+
47601+#ifdef CONFIG_PAX_SEGMEXEC
47602+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
47603+ pax_flags |= MF_PAX_SEGMEXEC;
47604+#endif
47605+
47606+#ifdef CONFIG_PAX_EMUTRAMP
47607+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
47608+ pax_flags |= MF_PAX_EMUTRAMP;
47609+#endif
47610+
47611+#ifdef CONFIG_PAX_MPROTECT
47612+ if (pax_flags_softmode & MF_PAX_MPROTECT)
47613+ pax_flags |= MF_PAX_MPROTECT;
47614+#endif
47615+
47616+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47617+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
47618+ pax_flags |= MF_PAX_RANDMMAP;
47619+#endif
47620+
47621+ return pax_flags;
47622+}
47623+#endif
47624+
47625+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
47626+{
47627+ unsigned long pax_flags = 0UL;
47628+
47629+#ifdef CONFIG_PAX_PAGEEXEC
47630+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
47631+ pax_flags |= MF_PAX_PAGEEXEC;
47632+#endif
47633+
47634+#ifdef CONFIG_PAX_SEGMEXEC
47635+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
47636+ pax_flags |= MF_PAX_SEGMEXEC;
47637+#endif
47638+
47639+#ifdef CONFIG_PAX_EMUTRAMP
47640+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
47641+ pax_flags |= MF_PAX_EMUTRAMP;
47642+#endif
47643+
47644+#ifdef CONFIG_PAX_MPROTECT
47645+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
47646+ pax_flags |= MF_PAX_MPROTECT;
47647+#endif
47648+
47649+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47650+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
47651+ pax_flags |= MF_PAX_RANDMMAP;
47652+#endif
47653+
47654+ return pax_flags;
47655+}
47656+#endif
47657+
47658+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47659+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
47660+{
47661+ unsigned long pax_flags = 0UL;
47662+
47663+#ifdef CONFIG_PAX_EI_PAX
47664+
47665+#ifdef CONFIG_PAX_PAGEEXEC
47666+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
47667+ pax_flags |= MF_PAX_PAGEEXEC;
47668+#endif
47669+
47670+#ifdef CONFIG_PAX_SEGMEXEC
47671+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
47672+ pax_flags |= MF_PAX_SEGMEXEC;
47673+#endif
47674+
47675+#ifdef CONFIG_PAX_EMUTRAMP
47676+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
47677+ pax_flags |= MF_PAX_EMUTRAMP;
47678+#endif
47679+
47680+#ifdef CONFIG_PAX_MPROTECT
47681+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
47682+ pax_flags |= MF_PAX_MPROTECT;
47683+#endif
47684+
47685+#ifdef CONFIG_PAX_ASLR
47686+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
47687+ pax_flags |= MF_PAX_RANDMMAP;
47688+#endif
47689+
47690+#else
47691+
47692+#ifdef CONFIG_PAX_PAGEEXEC
47693+ pax_flags |= MF_PAX_PAGEEXEC;
47694+#endif
47695+
47696+#ifdef CONFIG_PAX_SEGMEXEC
47697+ pax_flags |= MF_PAX_SEGMEXEC;
47698+#endif
47699+
47700+#ifdef CONFIG_PAX_MPROTECT
47701+ pax_flags |= MF_PAX_MPROTECT;
47702+#endif
47703+
47704+#ifdef CONFIG_PAX_RANDMMAP
47705+ if (randomize_va_space)
47706+ pax_flags |= MF_PAX_RANDMMAP;
47707+#endif
47708+
47709+#endif
47710+
47711+ return pax_flags;
47712+}
47713+
47714+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
47715+{
47716+
47717+#ifdef CONFIG_PAX_PT_PAX_FLAGS
47718+ unsigned long i;
47719+
47720+ for (i = 0UL; i < elf_ex->e_phnum; i++)
47721+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
47722+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
47723+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
47724+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
47725+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
47726+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
47727+ return ~0UL;
47728+
47729+#ifdef CONFIG_PAX_SOFTMODE
47730+ if (pax_softmode)
47731+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
47732+ else
47733+#endif
47734+
47735+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
47736+ break;
47737+ }
47738+#endif
47739+
47740+ return ~0UL;
47741+}
47742+
47743+static unsigned long pax_parse_xattr_pax(struct file * const file)
47744+{
47745+
47746+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
47747+ ssize_t xattr_size, i;
47748+ unsigned char xattr_value[5];
47749+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
47750+
47751+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
47752+ if (xattr_size <= 0 || xattr_size > 5)
47753+ return ~0UL;
47754+
47755+ for (i = 0; i < xattr_size; i++)
47756+ switch (xattr_value[i]) {
47757+ default:
47758+ return ~0UL;
47759+
47760+#define parse_flag(option1, option2, flag) \
47761+ case option1: \
47762+ if (pax_flags_hardmode & MF_PAX_##flag) \
47763+ return ~0UL; \
47764+ pax_flags_hardmode |= MF_PAX_##flag; \
47765+ break; \
47766+ case option2: \
47767+ if (pax_flags_softmode & MF_PAX_##flag) \
47768+ return ~0UL; \
47769+ pax_flags_softmode |= MF_PAX_##flag; \
47770+ break;
47771+
47772+ parse_flag('p', 'P', PAGEEXEC);
47773+ parse_flag('e', 'E', EMUTRAMP);
47774+ parse_flag('m', 'M', MPROTECT);
47775+ parse_flag('r', 'R', RANDMMAP);
47776+ parse_flag('s', 'S', SEGMEXEC);
47777+
47778+#undef parse_flag
47779+ }
47780+
47781+ if (pax_flags_hardmode & pax_flags_softmode)
47782+ return ~0UL;
47783+
47784+#ifdef CONFIG_PAX_SOFTMODE
47785+ if (pax_softmode)
47786+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
47787+ else
47788+#endif
47789+
47790+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
47791+#else
47792+ return ~0UL;
47793+#endif
47794+
47795+}
47796+
47797+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
47798+{
47799+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
47800+
47801+ pax_flags = pax_parse_ei_pax(elf_ex);
47802+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
47803+ xattr_pax_flags = pax_parse_xattr_pax(file);
47804+
47805+ if (pt_pax_flags == ~0UL)
47806+ pt_pax_flags = xattr_pax_flags;
47807+ else if (xattr_pax_flags == ~0UL)
47808+ xattr_pax_flags = pt_pax_flags;
47809+ if (pt_pax_flags != xattr_pax_flags)
47810+ return -EINVAL;
47811+ if (pt_pax_flags != ~0UL)
47812+ pax_flags = pt_pax_flags;
47813+
47814+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
47815+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47816+ if ((__supported_pte_mask & _PAGE_NX))
47817+ pax_flags &= ~MF_PAX_SEGMEXEC;
47818+ else
47819+ pax_flags &= ~MF_PAX_PAGEEXEC;
47820+ }
47821+#endif
47822+
47823+ if (0 > pax_check_flags(&pax_flags))
47824+ return -EINVAL;
47825+
47826+ current->mm->pax_flags = pax_flags;
47827+ return 0;
47828+}
47829+#endif
47830+
47831 /*
47832 * These are the functions used to load ELF style executables and shared
47833 * libraries. There is no binary dependent code anywhere else.
47834@@ -546,6 +876,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
47835 {
47836 unsigned int random_variable = 0;
47837
47838+#ifdef CONFIG_PAX_RANDUSTACK
47839+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
47840+ return stack_top - current->mm->delta_stack;
47841+#endif
47842+
47843 if ((current->flags & PF_RANDOMIZE) &&
47844 !(current->personality & ADDR_NO_RANDOMIZE)) {
47845 random_variable = get_random_int() & STACK_RND_MASK;
47846@@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
47847 unsigned long load_addr = 0, load_bias = 0;
47848 int load_addr_set = 0;
47849 char * elf_interpreter = NULL;
47850- unsigned long error;
47851+ unsigned long error = 0;
47852 struct elf_phdr *elf_ppnt, *elf_phdata;
47853 unsigned long elf_bss, elf_brk;
47854 int retval, i;
47855@@ -574,12 +909,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
47856 unsigned long start_code, end_code, start_data, end_data;
47857 unsigned long reloc_func_desc __maybe_unused = 0;
47858 int executable_stack = EXSTACK_DEFAULT;
47859- unsigned long def_flags = 0;
47860 struct pt_regs *regs = current_pt_regs();
47861 struct {
47862 struct elfhdr elf_ex;
47863 struct elfhdr interp_elf_ex;
47864 } *loc;
47865+ unsigned long pax_task_size = TASK_SIZE;
47866
47867 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
47868 if (!loc) {
47869@@ -715,11 +1050,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
47870 goto out_free_dentry;
47871
47872 /* OK, This is the point of no return */
47873- current->mm->def_flags = def_flags;
47874+
47875+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47876+ current->mm->pax_flags = 0UL;
47877+#endif
47878+
47879+#ifdef CONFIG_PAX_DLRESOLVE
47880+ current->mm->call_dl_resolve = 0UL;
47881+#endif
47882+
47883+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
47884+ current->mm->call_syscall = 0UL;
47885+#endif
47886+
47887+#ifdef CONFIG_PAX_ASLR
47888+ current->mm->delta_mmap = 0UL;
47889+ current->mm->delta_stack = 0UL;
47890+#endif
47891+
47892+ current->mm->def_flags = 0;
47893+
47894+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47895+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
47896+ send_sig(SIGKILL, current, 0);
47897+ goto out_free_dentry;
47898+ }
47899+#endif
47900+
47901+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47902+ pax_set_initial_flags(bprm);
47903+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
47904+ if (pax_set_initial_flags_func)
47905+ (pax_set_initial_flags_func)(bprm);
47906+#endif
47907+
47908+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47909+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
47910+ current->mm->context.user_cs_limit = PAGE_SIZE;
47911+ current->mm->def_flags |= VM_PAGEEXEC;
47912+ }
47913+#endif
47914+
47915+#ifdef CONFIG_PAX_SEGMEXEC
47916+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
47917+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
47918+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
47919+ pax_task_size = SEGMEXEC_TASK_SIZE;
47920+ current->mm->def_flags |= VM_NOHUGEPAGE;
47921+ }
47922+#endif
47923+
47924+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
47925+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47926+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
47927+ put_cpu();
47928+ }
47929+#endif
47930
47931 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
47932 may depend on the personality. */
47933 SET_PERSONALITY(loc->elf_ex);
47934+
47935+#ifdef CONFIG_PAX_ASLR
47936+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
47937+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
47938+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
47939+ }
47940+#endif
47941+
47942+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47943+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47944+ executable_stack = EXSTACK_DISABLE_X;
47945+ current->personality &= ~READ_IMPLIES_EXEC;
47946+ } else
47947+#endif
47948+
47949 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
47950 current->personality |= READ_IMPLIES_EXEC;
47951
47952@@ -810,6 +1215,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
47953 #else
47954 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47955 #endif
47956+
47957+#ifdef CONFIG_PAX_RANDMMAP
47958+ /* PaX: randomize base address at the default exe base if requested */
47959+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
47960+#ifdef CONFIG_SPARC64
47961+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
47962+#else
47963+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
47964+#endif
47965+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
47966+ elf_flags |= MAP_FIXED;
47967+ }
47968+#endif
47969+
47970 }
47971
47972 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
47973@@ -842,9 +1261,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
47974 * allowed task size. Note that p_filesz must always be
47975 * <= p_memsz so it is only necessary to check p_memsz.
47976 */
47977- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47978- elf_ppnt->p_memsz > TASK_SIZE ||
47979- TASK_SIZE - elf_ppnt->p_memsz < k) {
47980+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47981+ elf_ppnt->p_memsz > pax_task_size ||
47982+ pax_task_size - elf_ppnt->p_memsz < k) {
47983 /* set_brk can never work. Avoid overflows. */
47984 send_sig(SIGKILL, current, 0);
47985 retval = -EINVAL;
47986@@ -883,17 +1302,44 @@ static int load_elf_binary(struct linux_binprm *bprm)
47987 goto out_free_dentry;
47988 }
47989 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
47990- send_sig(SIGSEGV, current, 0);
47991- retval = -EFAULT; /* Nobody gets to see this, but.. */
47992- goto out_free_dentry;
47993+ /*
47994+ * This bss-zeroing can fail if the ELF
47995+ * file specifies odd protections. So
47996+ * we don't check the return value
47997+ */
47998 }
47999
48000+#ifdef CONFIG_PAX_RANDMMAP
48001+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
48002+ unsigned long start, size;
48003+
48004+ start = ELF_PAGEALIGN(elf_brk);
48005+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
48006+ down_read(&current->mm->mmap_sem);
48007+ retval = -ENOMEM;
48008+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
48009+ unsigned long prot = PROT_NONE;
48010+
48011+ up_read(&current->mm->mmap_sem);
48012+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
48013+// if (current->personality & ADDR_NO_RANDOMIZE)
48014+// prot = PROT_READ;
48015+ start = vm_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
48016+ retval = IS_ERR_VALUE(start) ? start : 0;
48017+ } else
48018+ up_read(&current->mm->mmap_sem);
48019+ if (retval == 0)
48020+ retval = set_brk(start + size, start + size + PAGE_SIZE);
48021+ if (retval < 0) {
48022+ send_sig(SIGKILL, current, 0);
48023+ goto out_free_dentry;
48024+ }
48025+ }
48026+#endif
48027+
48028 if (elf_interpreter) {
48029- unsigned long interp_map_addr = 0;
48030-
48031 elf_entry = load_elf_interp(&loc->interp_elf_ex,
48032 interpreter,
48033- &interp_map_addr,
48034 load_bias);
48035 if (!IS_ERR((void *)elf_entry)) {
48036 /*
48037@@ -1115,7 +1561,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
48038 * Decide what to dump of a segment, part, all or none.
48039 */
48040 static unsigned long vma_dump_size(struct vm_area_struct *vma,
48041- unsigned long mm_flags)
48042+ unsigned long mm_flags, long signr)
48043 {
48044 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
48045
48046@@ -1152,7 +1598,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
48047 if (vma->vm_file == NULL)
48048 return 0;
48049
48050- if (FILTER(MAPPED_PRIVATE))
48051+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
48052 goto whole;
48053
48054 /*
48055@@ -1374,9 +1820,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
48056 {
48057 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
48058 int i = 0;
48059- do
48060+ do {
48061 i += 2;
48062- while (auxv[i - 2] != AT_NULL);
48063+ } while (auxv[i - 2] != AT_NULL);
48064 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
48065 }
48066
48067@@ -2006,14 +2452,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
48068 }
48069
48070 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
48071- unsigned long mm_flags)
48072+ struct coredump_params *cprm)
48073 {
48074 struct vm_area_struct *vma;
48075 size_t size = 0;
48076
48077 for (vma = first_vma(current, gate_vma); vma != NULL;
48078 vma = next_vma(vma, gate_vma))
48079- size += vma_dump_size(vma, mm_flags);
48080+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
48081 return size;
48082 }
48083
48084@@ -2107,7 +2553,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48085
48086 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
48087
48088- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
48089+ offset += elf_core_vma_data_size(gate_vma, cprm);
48090 offset += elf_core_extra_data_size();
48091 e_shoff = offset;
48092
48093@@ -2121,10 +2567,12 @@ static int elf_core_dump(struct coredump_params *cprm)
48094 offset = dataoff;
48095
48096 size += sizeof(*elf);
48097+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
48098 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
48099 goto end_coredump;
48100
48101 size += sizeof(*phdr4note);
48102+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
48103 if (size > cprm->limit
48104 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
48105 goto end_coredump;
48106@@ -2138,7 +2586,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48107 phdr.p_offset = offset;
48108 phdr.p_vaddr = vma->vm_start;
48109 phdr.p_paddr = 0;
48110- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
48111+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
48112 phdr.p_memsz = vma->vm_end - vma->vm_start;
48113 offset += phdr.p_filesz;
48114 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
48115@@ -2149,6 +2597,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48116 phdr.p_align = ELF_EXEC_PAGESIZE;
48117
48118 size += sizeof(phdr);
48119+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
48120 if (size > cprm->limit
48121 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
48122 goto end_coredump;
48123@@ -2173,7 +2622,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48124 unsigned long addr;
48125 unsigned long end;
48126
48127- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
48128+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
48129
48130 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
48131 struct page *page;
48132@@ -2182,6 +2631,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48133 page = get_dump_page(addr);
48134 if (page) {
48135 void *kaddr = kmap(page);
48136+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
48137 stop = ((size += PAGE_SIZE) > cprm->limit) ||
48138 !dump_write(cprm->file, kaddr,
48139 PAGE_SIZE);
48140@@ -2199,6 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48141
48142 if (e_phnum == PN_XNUM) {
48143 size += sizeof(*shdr4extnum);
48144+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
48145 if (size > cprm->limit
48146 || !dump_write(cprm->file, shdr4extnum,
48147 sizeof(*shdr4extnum)))
48148@@ -2219,6 +2670,97 @@ out:
48149
48150 #endif /* CONFIG_ELF_CORE */
48151
48152+#ifdef CONFIG_PAX_MPROTECT
48153+/* PaX: non-PIC ELF libraries need relocations on their executable segments
48154+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
48155+ * we'll remove VM_MAYWRITE for good on RELRO segments.
48156+ *
48157+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
48158+ * basis because we want to allow the common case and not the special ones.
48159+ */
48160+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
48161+{
48162+ struct elfhdr elf_h;
48163+ struct elf_phdr elf_p;
48164+ unsigned long i;
48165+ unsigned long oldflags;
48166+ bool is_textrel_rw, is_textrel_rx, is_relro;
48167+
48168+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
48169+ return;
48170+
48171+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
48172+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
48173+
48174+#ifdef CONFIG_PAX_ELFRELOCS
48175+ /* possible TEXTREL */
48176+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
48177+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
48178+#else
48179+ is_textrel_rw = false;
48180+ is_textrel_rx = false;
48181+#endif
48182+
48183+ /* possible RELRO */
48184+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
48185+
48186+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
48187+ return;
48188+
48189+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
48190+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
48191+
48192+#ifdef CONFIG_PAX_ETEXECRELOCS
48193+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
48194+#else
48195+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
48196+#endif
48197+
48198+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
48199+ !elf_check_arch(&elf_h) ||
48200+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
48201+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
48202+ return;
48203+
48204+ for (i = 0UL; i < elf_h.e_phnum; i++) {
48205+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
48206+ return;
48207+ switch (elf_p.p_type) {
48208+ case PT_DYNAMIC:
48209+ if (!is_textrel_rw && !is_textrel_rx)
48210+ continue;
48211+ i = 0UL;
48212+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
48213+ elf_dyn dyn;
48214+
48215+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
48216+ return;
48217+ if (dyn.d_tag == DT_NULL)
48218+ return;
48219+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
48220+ gr_log_textrel(vma);
48221+ if (is_textrel_rw)
48222+ vma->vm_flags |= VM_MAYWRITE;
48223+ else
48224+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
48225+ vma->vm_flags &= ~VM_MAYWRITE;
48226+ return;
48227+ }
48228+ i++;
48229+ }
48230+ return;
48231+
48232+ case PT_GNU_RELRO:
48233+ if (!is_relro)
48234+ continue;
48235+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
48236+ vma->vm_flags &= ~VM_MAYWRITE;
48237+ return;
48238+ }
48239+ }
48240+}
48241+#endif
48242+
48243 static int __init init_elf_binfmt(void)
48244 {
48245 register_binfmt(&elf_format);
48246diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
48247index b563719..3868998 100644
48248--- a/fs/binfmt_flat.c
48249+++ b/fs/binfmt_flat.c
48250@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
48251 realdatastart = (unsigned long) -ENOMEM;
48252 printk("Unable to allocate RAM for process data, errno %d\n",
48253 (int)-realdatastart);
48254+ down_write(&current->mm->mmap_sem);
48255 vm_munmap(textpos, text_len);
48256+ up_write(&current->mm->mmap_sem);
48257 ret = realdatastart;
48258 goto err;
48259 }
48260@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
48261 }
48262 if (IS_ERR_VALUE(result)) {
48263 printk("Unable to read data+bss, errno %d\n", (int)-result);
48264+ down_write(&current->mm->mmap_sem);
48265 vm_munmap(textpos, text_len);
48266 vm_munmap(realdatastart, len);
48267+ up_write(&current->mm->mmap_sem);
48268 ret = result;
48269 goto err;
48270 }
48271@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
48272 }
48273 if (IS_ERR_VALUE(result)) {
48274 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
48275+ down_write(&current->mm->mmap_sem);
48276 vm_munmap(textpos, text_len + data_len + extra +
48277 MAX_SHARED_LIBS * sizeof(unsigned long));
48278+ up_write(&current->mm->mmap_sem);
48279 ret = result;
48280 goto err;
48281 }
48282diff --git a/fs/bio.c b/fs/bio.c
48283index b96fc6c..431d628 100644
48284--- a/fs/bio.c
48285+++ b/fs/bio.c
48286@@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
48287 /*
48288 * Overflow, abort
48289 */
48290- if (end < start)
48291+ if (end < start || end - start > INT_MAX - nr_pages)
48292 return ERR_PTR(-EINVAL);
48293
48294 nr_pages += end - start;
48295@@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
48296 /*
48297 * Overflow, abort
48298 */
48299- if (end < start)
48300+ if (end < start || end - start > INT_MAX - nr_pages)
48301 return ERR_PTR(-EINVAL);
48302
48303 nr_pages += end - start;
48304@@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
48305 const int read = bio_data_dir(bio) == READ;
48306 struct bio_map_data *bmd = bio->bi_private;
48307 int i;
48308- char *p = bmd->sgvecs[0].iov_base;
48309+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
48310
48311 __bio_for_each_segment(bvec, bio, i, 0) {
48312 char *addr = page_address(bvec->bv_page);
48313diff --git a/fs/block_dev.c b/fs/block_dev.c
48314index 78333a3..23dcb4d 100644
48315--- a/fs/block_dev.c
48316+++ b/fs/block_dev.c
48317@@ -651,7 +651,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
48318 else if (bdev->bd_contains == bdev)
48319 return true; /* is a whole device which isn't held */
48320
48321- else if (whole->bd_holder == bd_may_claim)
48322+ else if (whole->bd_holder == (void *)bd_may_claim)
48323 return true; /* is a partition of a device that is being partitioned */
48324 else if (whole->bd_holder != NULL)
48325 return false; /* is a partition of a held device */
48326diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
48327index eea5da7..88fead70 100644
48328--- a/fs/btrfs/ctree.c
48329+++ b/fs/btrfs/ctree.c
48330@@ -1033,9 +1033,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
48331 free_extent_buffer(buf);
48332 add_root_to_dirty_list(root);
48333 } else {
48334- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
48335- parent_start = parent->start;
48336- else
48337+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
48338+ if (parent)
48339+ parent_start = parent->start;
48340+ else
48341+ parent_start = 0;
48342+ } else
48343 parent_start = 0;
48344
48345 WARN_ON(trans->transid != btrfs_header_generation(parent));
48346diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
48347index cc93b23..f3c42bf 100644
48348--- a/fs/btrfs/inode.c
48349+++ b/fs/btrfs/inode.c
48350@@ -7296,7 +7296,7 @@ fail:
48351 return -ENOMEM;
48352 }
48353
48354-static int btrfs_getattr(struct vfsmount *mnt,
48355+int btrfs_getattr(struct vfsmount *mnt,
48356 struct dentry *dentry, struct kstat *stat)
48357 {
48358 struct inode *inode = dentry->d_inode;
48359@@ -7310,6 +7310,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
48360 return 0;
48361 }
48362
48363+EXPORT_SYMBOL(btrfs_getattr);
48364+
48365+dev_t get_btrfs_dev_from_inode(struct inode *inode)
48366+{
48367+ return BTRFS_I(inode)->root->anon_dev;
48368+}
48369+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
48370+
48371 /*
48372 * If a file is moved, it will inherit the cow and compression flags of the new
48373 * directory.
48374diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
48375index 338f259..b657640 100644
48376--- a/fs/btrfs/ioctl.c
48377+++ b/fs/btrfs/ioctl.c
48378@@ -3033,9 +3033,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
48379 for (i = 0; i < num_types; i++) {
48380 struct btrfs_space_info *tmp;
48381
48382+ /* Don't copy in more than we allocated */
48383 if (!slot_count)
48384 break;
48385
48386+ slot_count--;
48387+
48388 info = NULL;
48389 rcu_read_lock();
48390 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
48391@@ -3057,10 +3060,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
48392 memcpy(dest, &space, sizeof(space));
48393 dest++;
48394 space_args.total_spaces++;
48395- slot_count--;
48396 }
48397- if (!slot_count)
48398- break;
48399 }
48400 up_read(&info->groups_sem);
48401 }
48402diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
48403index 300e09a..9fe4539 100644
48404--- a/fs/btrfs/relocation.c
48405+++ b/fs/btrfs/relocation.c
48406@@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
48407 }
48408 spin_unlock(&rc->reloc_root_tree.lock);
48409
48410- BUG_ON((struct btrfs_root *)node->data != root);
48411+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
48412
48413 if (!del) {
48414 spin_lock(&rc->reloc_root_tree.lock);
48415diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
48416index d8982e9..29a85fa 100644
48417--- a/fs/btrfs/super.c
48418+++ b/fs/btrfs/super.c
48419@@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
48420 function, line, errstr);
48421 return;
48422 }
48423- ACCESS_ONCE(trans->transaction->aborted) = errno;
48424+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
48425 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
48426 }
48427 /*
48428diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
48429index 622f469..e8d2d55 100644
48430--- a/fs/cachefiles/bind.c
48431+++ b/fs/cachefiles/bind.c
48432@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
48433 args);
48434
48435 /* start by checking things over */
48436- ASSERT(cache->fstop_percent >= 0 &&
48437- cache->fstop_percent < cache->fcull_percent &&
48438+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
48439 cache->fcull_percent < cache->frun_percent &&
48440 cache->frun_percent < 100);
48441
48442- ASSERT(cache->bstop_percent >= 0 &&
48443- cache->bstop_percent < cache->bcull_percent &&
48444+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
48445 cache->bcull_percent < cache->brun_percent &&
48446 cache->brun_percent < 100);
48447
48448diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
48449index 0a1467b..6a53245 100644
48450--- a/fs/cachefiles/daemon.c
48451+++ b/fs/cachefiles/daemon.c
48452@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
48453 if (n > buflen)
48454 return -EMSGSIZE;
48455
48456- if (copy_to_user(_buffer, buffer, n) != 0)
48457+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
48458 return -EFAULT;
48459
48460 return n;
48461@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
48462 if (test_bit(CACHEFILES_DEAD, &cache->flags))
48463 return -EIO;
48464
48465- if (datalen < 0 || datalen > PAGE_SIZE - 1)
48466+ if (datalen > PAGE_SIZE - 1)
48467 return -EOPNOTSUPP;
48468
48469 /* drag the command string into the kernel so we can parse it */
48470@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
48471 if (args[0] != '%' || args[1] != '\0')
48472 return -EINVAL;
48473
48474- if (fstop < 0 || fstop >= cache->fcull_percent)
48475+ if (fstop >= cache->fcull_percent)
48476 return cachefiles_daemon_range_error(cache, args);
48477
48478 cache->fstop_percent = fstop;
48479@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
48480 if (args[0] != '%' || args[1] != '\0')
48481 return -EINVAL;
48482
48483- if (bstop < 0 || bstop >= cache->bcull_percent)
48484+ if (bstop >= cache->bcull_percent)
48485 return cachefiles_daemon_range_error(cache, args);
48486
48487 cache->bstop_percent = bstop;
48488diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
48489index 4938251..7e01445 100644
48490--- a/fs/cachefiles/internal.h
48491+++ b/fs/cachefiles/internal.h
48492@@ -59,7 +59,7 @@ struct cachefiles_cache {
48493 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
48494 struct rb_root active_nodes; /* active nodes (can't be culled) */
48495 rwlock_t active_lock; /* lock for active_nodes */
48496- atomic_t gravecounter; /* graveyard uniquifier */
48497+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
48498 unsigned frun_percent; /* when to stop culling (% files) */
48499 unsigned fcull_percent; /* when to start culling (% files) */
48500 unsigned fstop_percent; /* when to stop allocating (% files) */
48501@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
48502 * proc.c
48503 */
48504 #ifdef CONFIG_CACHEFILES_HISTOGRAM
48505-extern atomic_t cachefiles_lookup_histogram[HZ];
48506-extern atomic_t cachefiles_mkdir_histogram[HZ];
48507-extern atomic_t cachefiles_create_histogram[HZ];
48508+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
48509+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
48510+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
48511
48512 extern int __init cachefiles_proc_init(void);
48513 extern void cachefiles_proc_cleanup(void);
48514 static inline
48515-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
48516+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
48517 {
48518 unsigned long jif = jiffies - start_jif;
48519 if (jif >= HZ)
48520 jif = HZ - 1;
48521- atomic_inc(&histogram[jif]);
48522+ atomic_inc_unchecked(&histogram[jif]);
48523 }
48524
48525 #else
48526diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
48527index 8c01c5fc..15f982e 100644
48528--- a/fs/cachefiles/namei.c
48529+++ b/fs/cachefiles/namei.c
48530@@ -317,7 +317,7 @@ try_again:
48531 /* first step is to make up a grave dentry in the graveyard */
48532 sprintf(nbuffer, "%08x%08x",
48533 (uint32_t) get_seconds(),
48534- (uint32_t) atomic_inc_return(&cache->gravecounter));
48535+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
48536
48537 /* do the multiway lock magic */
48538 trap = lock_rename(cache->graveyard, dir);
48539diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
48540index eccd339..4c1d995 100644
48541--- a/fs/cachefiles/proc.c
48542+++ b/fs/cachefiles/proc.c
48543@@ -14,9 +14,9 @@
48544 #include <linux/seq_file.h>
48545 #include "internal.h"
48546
48547-atomic_t cachefiles_lookup_histogram[HZ];
48548-atomic_t cachefiles_mkdir_histogram[HZ];
48549-atomic_t cachefiles_create_histogram[HZ];
48550+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
48551+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
48552+atomic_unchecked_t cachefiles_create_histogram[HZ];
48553
48554 /*
48555 * display the latency histogram
48556@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
48557 return 0;
48558 default:
48559 index = (unsigned long) v - 3;
48560- x = atomic_read(&cachefiles_lookup_histogram[index]);
48561- y = atomic_read(&cachefiles_mkdir_histogram[index]);
48562- z = atomic_read(&cachefiles_create_histogram[index]);
48563+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
48564+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
48565+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
48566 if (x == 0 && y == 0 && z == 0)
48567 return 0;
48568
48569diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
48570index 4809922..aab2c39 100644
48571--- a/fs/cachefiles/rdwr.c
48572+++ b/fs/cachefiles/rdwr.c
48573@@ -965,7 +965,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
48574 old_fs = get_fs();
48575 set_fs(KERNEL_DS);
48576 ret = file->f_op->write(
48577- file, (const void __user *) data, len, &pos);
48578+ file, (const void __force_user *) data, len, &pos);
48579 set_fs(old_fs);
48580 kunmap(page);
48581 if (ret != len)
48582diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
48583index 8c1aabe..bbf856a 100644
48584--- a/fs/ceph/dir.c
48585+++ b/fs/ceph/dir.c
48586@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
48587 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
48588 struct ceph_mds_client *mdsc = fsc->mdsc;
48589 unsigned frag = fpos_frag(filp->f_pos);
48590- int off = fpos_off(filp->f_pos);
48591+ unsigned int off = fpos_off(filp->f_pos);
48592 int err;
48593 u32 ftype;
48594 struct ceph_mds_reply_info_parsed *rinfo;
48595diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
48596index d9ea6ed..1e6c8ac 100644
48597--- a/fs/cifs/cifs_debug.c
48598+++ b/fs/cifs/cifs_debug.c
48599@@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
48600
48601 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
48602 #ifdef CONFIG_CIFS_STATS2
48603- atomic_set(&totBufAllocCount, 0);
48604- atomic_set(&totSmBufAllocCount, 0);
48605+ atomic_set_unchecked(&totBufAllocCount, 0);
48606+ atomic_set_unchecked(&totSmBufAllocCount, 0);
48607 #endif /* CONFIG_CIFS_STATS2 */
48608 spin_lock(&cifs_tcp_ses_lock);
48609 list_for_each(tmp1, &cifs_tcp_ses_list) {
48610@@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
48611 tcon = list_entry(tmp3,
48612 struct cifs_tcon,
48613 tcon_list);
48614- atomic_set(&tcon->num_smbs_sent, 0);
48615+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
48616 if (server->ops->clear_stats)
48617 server->ops->clear_stats(tcon);
48618 }
48619@@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
48620 smBufAllocCount.counter, cifs_min_small);
48621 #ifdef CONFIG_CIFS_STATS2
48622 seq_printf(m, "Total Large %d Small %d Allocations\n",
48623- atomic_read(&totBufAllocCount),
48624- atomic_read(&totSmBufAllocCount));
48625+ atomic_read_unchecked(&totBufAllocCount),
48626+ atomic_read_unchecked(&totSmBufAllocCount));
48627 #endif /* CONFIG_CIFS_STATS2 */
48628
48629 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
48630@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
48631 if (tcon->need_reconnect)
48632 seq_puts(m, "\tDISCONNECTED ");
48633 seq_printf(m, "\nSMBs: %d",
48634- atomic_read(&tcon->num_smbs_sent));
48635+ atomic_read_unchecked(&tcon->num_smbs_sent));
48636 if (server->ops->print_stats)
48637 server->ops->print_stats(m, tcon);
48638 }
48639diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
48640index de7f916..6cb22a9 100644
48641--- a/fs/cifs/cifsfs.c
48642+++ b/fs/cifs/cifsfs.c
48643@@ -997,7 +997,7 @@ cifs_init_request_bufs(void)
48644 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
48645 cifs_req_cachep = kmem_cache_create("cifs_request",
48646 CIFSMaxBufSize + max_hdr_size, 0,
48647- SLAB_HWCACHE_ALIGN, NULL);
48648+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
48649 if (cifs_req_cachep == NULL)
48650 return -ENOMEM;
48651
48652@@ -1024,7 +1024,7 @@ cifs_init_request_bufs(void)
48653 efficient to alloc 1 per page off the slab compared to 17K (5page)
48654 alloc of large cifs buffers even when page debugging is on */
48655 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
48656- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
48657+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
48658 NULL);
48659 if (cifs_sm_req_cachep == NULL) {
48660 mempool_destroy(cifs_req_poolp);
48661@@ -1109,8 +1109,8 @@ init_cifs(void)
48662 atomic_set(&bufAllocCount, 0);
48663 atomic_set(&smBufAllocCount, 0);
48664 #ifdef CONFIG_CIFS_STATS2
48665- atomic_set(&totBufAllocCount, 0);
48666- atomic_set(&totSmBufAllocCount, 0);
48667+ atomic_set_unchecked(&totBufAllocCount, 0);
48668+ atomic_set_unchecked(&totSmBufAllocCount, 0);
48669 #endif /* CONFIG_CIFS_STATS2 */
48670
48671 atomic_set(&midCount, 0);
48672diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
48673index e6899ce..d6b2920 100644
48674--- a/fs/cifs/cifsglob.h
48675+++ b/fs/cifs/cifsglob.h
48676@@ -751,35 +751,35 @@ struct cifs_tcon {
48677 __u16 Flags; /* optional support bits */
48678 enum statusEnum tidStatus;
48679 #ifdef CONFIG_CIFS_STATS
48680- atomic_t num_smbs_sent;
48681+ atomic_unchecked_t num_smbs_sent;
48682 union {
48683 struct {
48684- atomic_t num_writes;
48685- atomic_t num_reads;
48686- atomic_t num_flushes;
48687- atomic_t num_oplock_brks;
48688- atomic_t num_opens;
48689- atomic_t num_closes;
48690- atomic_t num_deletes;
48691- atomic_t num_mkdirs;
48692- atomic_t num_posixopens;
48693- atomic_t num_posixmkdirs;
48694- atomic_t num_rmdirs;
48695- atomic_t num_renames;
48696- atomic_t num_t2renames;
48697- atomic_t num_ffirst;
48698- atomic_t num_fnext;
48699- atomic_t num_fclose;
48700- atomic_t num_hardlinks;
48701- atomic_t num_symlinks;
48702- atomic_t num_locks;
48703- atomic_t num_acl_get;
48704- atomic_t num_acl_set;
48705+ atomic_unchecked_t num_writes;
48706+ atomic_unchecked_t num_reads;
48707+ atomic_unchecked_t num_flushes;
48708+ atomic_unchecked_t num_oplock_brks;
48709+ atomic_unchecked_t num_opens;
48710+ atomic_unchecked_t num_closes;
48711+ atomic_unchecked_t num_deletes;
48712+ atomic_unchecked_t num_mkdirs;
48713+ atomic_unchecked_t num_posixopens;
48714+ atomic_unchecked_t num_posixmkdirs;
48715+ atomic_unchecked_t num_rmdirs;
48716+ atomic_unchecked_t num_renames;
48717+ atomic_unchecked_t num_t2renames;
48718+ atomic_unchecked_t num_ffirst;
48719+ atomic_unchecked_t num_fnext;
48720+ atomic_unchecked_t num_fclose;
48721+ atomic_unchecked_t num_hardlinks;
48722+ atomic_unchecked_t num_symlinks;
48723+ atomic_unchecked_t num_locks;
48724+ atomic_unchecked_t num_acl_get;
48725+ atomic_unchecked_t num_acl_set;
48726 } cifs_stats;
48727 #ifdef CONFIG_CIFS_SMB2
48728 struct {
48729- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
48730- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
48731+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
48732+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
48733 } smb2_stats;
48734 #endif /* CONFIG_CIFS_SMB2 */
48735 } stats;
48736@@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
48737 }
48738
48739 #ifdef CONFIG_CIFS_STATS
48740-#define cifs_stats_inc atomic_inc
48741+#define cifs_stats_inc atomic_inc_unchecked
48742
48743 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
48744 unsigned int bytes)
48745@@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
48746 /* Various Debug counters */
48747 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
48748 #ifdef CONFIG_CIFS_STATS2
48749-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
48750-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
48751+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
48752+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
48753 #endif
48754 GLOBAL_EXTERN atomic_t smBufAllocCount;
48755 GLOBAL_EXTERN atomic_t midCount;
48756diff --git a/fs/cifs/link.c b/fs/cifs/link.c
48757index 51dc2fb..1e12a33 100644
48758--- a/fs/cifs/link.c
48759+++ b/fs/cifs/link.c
48760@@ -616,7 +616,7 @@ symlink_exit:
48761
48762 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
48763 {
48764- char *p = nd_get_link(nd);
48765+ const char *p = nd_get_link(nd);
48766 if (!IS_ERR(p))
48767 kfree(p);
48768 }
48769diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
48770index 3a00c0d..42d901c 100644
48771--- a/fs/cifs/misc.c
48772+++ b/fs/cifs/misc.c
48773@@ -169,7 +169,7 @@ cifs_buf_get(void)
48774 memset(ret_buf, 0, buf_size + 3);
48775 atomic_inc(&bufAllocCount);
48776 #ifdef CONFIG_CIFS_STATS2
48777- atomic_inc(&totBufAllocCount);
48778+ atomic_inc_unchecked(&totBufAllocCount);
48779 #endif /* CONFIG_CIFS_STATS2 */
48780 }
48781
48782@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
48783 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
48784 atomic_inc(&smBufAllocCount);
48785 #ifdef CONFIG_CIFS_STATS2
48786- atomic_inc(&totSmBufAllocCount);
48787+ atomic_inc_unchecked(&totSmBufAllocCount);
48788 #endif /* CONFIG_CIFS_STATS2 */
48789
48790 }
48791diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
48792index 47bc5a8..10decbe 100644
48793--- a/fs/cifs/smb1ops.c
48794+++ b/fs/cifs/smb1ops.c
48795@@ -586,27 +586,27 @@ static void
48796 cifs_clear_stats(struct cifs_tcon *tcon)
48797 {
48798 #ifdef CONFIG_CIFS_STATS
48799- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
48800- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
48801- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
48802- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
48803- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
48804- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
48805- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
48806- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
48807- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
48808- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
48809- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
48810- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
48811- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
48812- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
48813- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
48814- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
48815- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
48816- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
48817- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
48818- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
48819- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
48820+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
48821+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
48822+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
48823+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
48824+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
48825+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
48826+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
48827+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
48828+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
48829+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
48830+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
48831+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
48832+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
48833+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
48834+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
48835+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
48836+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
48837+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
48838+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
48839+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
48840+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
48841 #endif
48842 }
48843
48844@@ -615,36 +615,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
48845 {
48846 #ifdef CONFIG_CIFS_STATS
48847 seq_printf(m, " Oplocks breaks: %d",
48848- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
48849+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
48850 seq_printf(m, "\nReads: %d Bytes: %llu",
48851- atomic_read(&tcon->stats.cifs_stats.num_reads),
48852+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
48853 (long long)(tcon->bytes_read));
48854 seq_printf(m, "\nWrites: %d Bytes: %llu",
48855- atomic_read(&tcon->stats.cifs_stats.num_writes),
48856+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
48857 (long long)(tcon->bytes_written));
48858 seq_printf(m, "\nFlushes: %d",
48859- atomic_read(&tcon->stats.cifs_stats.num_flushes));
48860+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
48861 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
48862- atomic_read(&tcon->stats.cifs_stats.num_locks),
48863- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
48864- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
48865+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
48866+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
48867+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
48868 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
48869- atomic_read(&tcon->stats.cifs_stats.num_opens),
48870- atomic_read(&tcon->stats.cifs_stats.num_closes),
48871- atomic_read(&tcon->stats.cifs_stats.num_deletes));
48872+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
48873+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
48874+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
48875 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
48876- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
48877- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
48878+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
48879+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
48880 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
48881- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
48882- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
48883+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
48884+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
48885 seq_printf(m, "\nRenames: %d T2 Renames %d",
48886- atomic_read(&tcon->stats.cifs_stats.num_renames),
48887- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
48888+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
48889+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
48890 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
48891- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
48892- atomic_read(&tcon->stats.cifs_stats.num_fnext),
48893- atomic_read(&tcon->stats.cifs_stats.num_fclose));
48894+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
48895+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
48896+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
48897 #endif
48898 }
48899
48900diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
48901index c9c7aa7..065056a 100644
48902--- a/fs/cifs/smb2ops.c
48903+++ b/fs/cifs/smb2ops.c
48904@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
48905 #ifdef CONFIG_CIFS_STATS
48906 int i;
48907 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
48908- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
48909- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
48910+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
48911+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
48912 }
48913 #endif
48914 }
48915@@ -284,66 +284,66 @@ static void
48916 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
48917 {
48918 #ifdef CONFIG_CIFS_STATS
48919- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
48920- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
48921+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
48922+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
48923 seq_printf(m, "\nNegotiates: %d sent %d failed",
48924- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
48925- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
48926+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
48927+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
48928 seq_printf(m, "\nSessionSetups: %d sent %d failed",
48929- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
48930- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
48931+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
48932+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
48933 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
48934 seq_printf(m, "\nLogoffs: %d sent %d failed",
48935- atomic_read(&sent[SMB2_LOGOFF_HE]),
48936- atomic_read(&failed[SMB2_LOGOFF_HE]));
48937+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
48938+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
48939 seq_printf(m, "\nTreeConnects: %d sent %d failed",
48940- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
48941- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
48942+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
48943+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
48944 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
48945- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
48946- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
48947+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
48948+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
48949 seq_printf(m, "\nCreates: %d sent %d failed",
48950- atomic_read(&sent[SMB2_CREATE_HE]),
48951- atomic_read(&failed[SMB2_CREATE_HE]));
48952+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
48953+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
48954 seq_printf(m, "\nCloses: %d sent %d failed",
48955- atomic_read(&sent[SMB2_CLOSE_HE]),
48956- atomic_read(&failed[SMB2_CLOSE_HE]));
48957+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
48958+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
48959 seq_printf(m, "\nFlushes: %d sent %d failed",
48960- atomic_read(&sent[SMB2_FLUSH_HE]),
48961- atomic_read(&failed[SMB2_FLUSH_HE]));
48962+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
48963+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
48964 seq_printf(m, "\nReads: %d sent %d failed",
48965- atomic_read(&sent[SMB2_READ_HE]),
48966- atomic_read(&failed[SMB2_READ_HE]));
48967+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
48968+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
48969 seq_printf(m, "\nWrites: %d sent %d failed",
48970- atomic_read(&sent[SMB2_WRITE_HE]),
48971- atomic_read(&failed[SMB2_WRITE_HE]));
48972+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
48973+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
48974 seq_printf(m, "\nLocks: %d sent %d failed",
48975- atomic_read(&sent[SMB2_LOCK_HE]),
48976- atomic_read(&failed[SMB2_LOCK_HE]));
48977+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
48978+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
48979 seq_printf(m, "\nIOCTLs: %d sent %d failed",
48980- atomic_read(&sent[SMB2_IOCTL_HE]),
48981- atomic_read(&failed[SMB2_IOCTL_HE]));
48982+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
48983+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
48984 seq_printf(m, "\nCancels: %d sent %d failed",
48985- atomic_read(&sent[SMB2_CANCEL_HE]),
48986- atomic_read(&failed[SMB2_CANCEL_HE]));
48987+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
48988+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
48989 seq_printf(m, "\nEchos: %d sent %d failed",
48990- atomic_read(&sent[SMB2_ECHO_HE]),
48991- atomic_read(&failed[SMB2_ECHO_HE]));
48992+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
48993+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
48994 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
48995- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
48996- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
48997+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
48998+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
48999 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
49000- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
49001- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
49002+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
49003+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
49004 seq_printf(m, "\nQueryInfos: %d sent %d failed",
49005- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
49006- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
49007+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
49008+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
49009 seq_printf(m, "\nSetInfos: %d sent %d failed",
49010- atomic_read(&sent[SMB2_SET_INFO_HE]),
49011- atomic_read(&failed[SMB2_SET_INFO_HE]));
49012+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
49013+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
49014 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
49015- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
49016- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
49017+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
49018+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
49019 #endif
49020 }
49021
49022diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
49023index 41d9d07..dbb4772 100644
49024--- a/fs/cifs/smb2pdu.c
49025+++ b/fs/cifs/smb2pdu.c
49026@@ -1761,8 +1761,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
49027 default:
49028 cERROR(1, "info level %u isn't supported",
49029 srch_inf->info_level);
49030- rc = -EINVAL;
49031- goto qdir_exit;
49032+ return -EINVAL;
49033 }
49034
49035 req->FileIndex = cpu_to_le32(index);
49036diff --git a/fs/coda/cache.c b/fs/coda/cache.c
49037index 958ae0e..505c9d0 100644
49038--- a/fs/coda/cache.c
49039+++ b/fs/coda/cache.c
49040@@ -24,7 +24,7 @@
49041 #include "coda_linux.h"
49042 #include "coda_cache.h"
49043
49044-static atomic_t permission_epoch = ATOMIC_INIT(0);
49045+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
49046
49047 /* replace or extend an acl cache hit */
49048 void coda_cache_enter(struct inode *inode, int mask)
49049@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
49050 struct coda_inode_info *cii = ITOC(inode);
49051
49052 spin_lock(&cii->c_lock);
49053- cii->c_cached_epoch = atomic_read(&permission_epoch);
49054+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
49055 if (cii->c_uid != current_fsuid()) {
49056 cii->c_uid = current_fsuid();
49057 cii->c_cached_perm = mask;
49058@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
49059 {
49060 struct coda_inode_info *cii = ITOC(inode);
49061 spin_lock(&cii->c_lock);
49062- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
49063+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
49064 spin_unlock(&cii->c_lock);
49065 }
49066
49067 /* remove all acl caches */
49068 void coda_cache_clear_all(struct super_block *sb)
49069 {
49070- atomic_inc(&permission_epoch);
49071+ atomic_inc_unchecked(&permission_epoch);
49072 }
49073
49074
49075@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
49076 spin_lock(&cii->c_lock);
49077 hit = (mask & cii->c_cached_perm) == mask &&
49078 cii->c_uid == current_fsuid() &&
49079- cii->c_cached_epoch == atomic_read(&permission_epoch);
49080+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
49081 spin_unlock(&cii->c_lock);
49082
49083 return hit;
49084diff --git a/fs/compat.c b/fs/compat.c
49085index 015e1e1..b8966ac 100644
49086--- a/fs/compat.c
49087+++ b/fs/compat.c
49088@@ -54,7 +54,7 @@
49089 #include <asm/ioctls.h>
49090 #include "internal.h"
49091
49092-int compat_log = 1;
49093+int compat_log = 0;
49094
49095 int compat_printk(const char *fmt, ...)
49096 {
49097@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
49098
49099 set_fs(KERNEL_DS);
49100 /* The __user pointer cast is valid because of the set_fs() */
49101- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
49102+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
49103 set_fs(oldfs);
49104 /* truncating is ok because it's a user address */
49105 if (!ret)
49106@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
49107 goto out;
49108
49109 ret = -EINVAL;
49110- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
49111+ if (nr_segs > UIO_MAXIOV)
49112 goto out;
49113 if (nr_segs > fast_segs) {
49114 ret = -ENOMEM;
49115@@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
49116
49117 struct compat_readdir_callback {
49118 struct compat_old_linux_dirent __user *dirent;
49119+ struct file * file;
49120 int result;
49121 };
49122
49123@@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
49124 buf->result = -EOVERFLOW;
49125 return -EOVERFLOW;
49126 }
49127+
49128+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49129+ return 0;
49130+
49131 buf->result++;
49132 dirent = buf->dirent;
49133 if (!access_ok(VERIFY_WRITE, dirent,
49134@@ -878,6 +883,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
49135
49136 buf.result = 0;
49137 buf.dirent = dirent;
49138+ buf.file = f.file;
49139
49140 error = vfs_readdir(f.file, compat_fillonedir, &buf);
49141 if (buf.result)
49142@@ -897,6 +903,7 @@ struct compat_linux_dirent {
49143 struct compat_getdents_callback {
49144 struct compat_linux_dirent __user *current_dir;
49145 struct compat_linux_dirent __user *previous;
49146+ struct file * file;
49147 int count;
49148 int error;
49149 };
49150@@ -918,6 +925,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
49151 buf->error = -EOVERFLOW;
49152 return -EOVERFLOW;
49153 }
49154+
49155+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49156+ return 0;
49157+
49158 dirent = buf->previous;
49159 if (dirent) {
49160 if (__put_user(offset, &dirent->d_off))
49161@@ -963,6 +974,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
49162 buf.previous = NULL;
49163 buf.count = count;
49164 buf.error = 0;
49165+ buf.file = f.file;
49166
49167 error = vfs_readdir(f.file, compat_filldir, &buf);
49168 if (error >= 0)
49169@@ -983,6 +995,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
49170 struct compat_getdents_callback64 {
49171 struct linux_dirent64 __user *current_dir;
49172 struct linux_dirent64 __user *previous;
49173+ struct file * file;
49174 int count;
49175 int error;
49176 };
49177@@ -999,6 +1012,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
49178 buf->error = -EINVAL; /* only used if we fail.. */
49179 if (reclen > buf->count)
49180 return -EINVAL;
49181+
49182+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49183+ return 0;
49184+
49185 dirent = buf->previous;
49186
49187 if (dirent) {
49188@@ -1048,13 +1065,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
49189 buf.previous = NULL;
49190 buf.count = count;
49191 buf.error = 0;
49192+ buf.file = f.file;
49193
49194 error = vfs_readdir(f.file, compat_filldir64, &buf);
49195 if (error >= 0)
49196 error = buf.error;
49197 lastdirent = buf.previous;
49198 if (lastdirent) {
49199- typeof(lastdirent->d_off) d_off = f.file->f_pos;
49200+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
49201 if (__put_user_unaligned(d_off, &lastdirent->d_off))
49202 error = -EFAULT;
49203 else
49204diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
49205index a81147e..20bf2b5 100644
49206--- a/fs/compat_binfmt_elf.c
49207+++ b/fs/compat_binfmt_elf.c
49208@@ -30,11 +30,13 @@
49209 #undef elf_phdr
49210 #undef elf_shdr
49211 #undef elf_note
49212+#undef elf_dyn
49213 #undef elf_addr_t
49214 #define elfhdr elf32_hdr
49215 #define elf_phdr elf32_phdr
49216 #define elf_shdr elf32_shdr
49217 #define elf_note elf32_note
49218+#define elf_dyn Elf32_Dyn
49219 #define elf_addr_t Elf32_Addr
49220
49221 /*
49222diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
49223index e2f57a0..3c78771 100644
49224--- a/fs/compat_ioctl.c
49225+++ b/fs/compat_ioctl.c
49226@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
49227 return -EFAULT;
49228 if (__get_user(udata, &ss32->iomem_base))
49229 return -EFAULT;
49230- ss.iomem_base = compat_ptr(udata);
49231+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
49232 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
49233 __get_user(ss.port_high, &ss32->port_high))
49234 return -EFAULT;
49235@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
49236 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
49237 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
49238 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
49239- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
49240+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
49241 return -EFAULT;
49242
49243 return ioctl_preallocate(file, p);
49244@@ -1620,8 +1620,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
49245 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
49246 {
49247 unsigned int a, b;
49248- a = *(unsigned int *)p;
49249- b = *(unsigned int *)q;
49250+ a = *(const unsigned int *)p;
49251+ b = *(const unsigned int *)q;
49252 if (a > b)
49253 return 1;
49254 if (a < b)
49255diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
49256index 712b10f..c33c4ca 100644
49257--- a/fs/configfs/dir.c
49258+++ b/fs/configfs/dir.c
49259@@ -1037,10 +1037,11 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
49260 static int configfs_depend_prep(struct dentry *origin,
49261 struct config_item *target)
49262 {
49263- struct configfs_dirent *child_sd, *sd = origin->d_fsdata;
49264+ struct configfs_dirent *child_sd, *sd;
49265 int ret = 0;
49266
49267- BUG_ON(!origin || !sd);
49268+ BUG_ON(!origin || !origin->d_fsdata);
49269+ sd = origin->d_fsdata;
49270
49271 if (sd->s_element == target) /* Boo-yah */
49272 goto out;
49273@@ -1564,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
49274 }
49275 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
49276 struct configfs_dirent *next;
49277- const char * name;
49278+ const unsigned char * name;
49279+ char d_name[sizeof(next->s_dentry->d_iname)];
49280 int len;
49281 struct inode *inode = NULL;
49282
49283@@ -1574,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
49284 continue;
49285
49286 name = configfs_get_name(next);
49287- len = strlen(name);
49288+ if (next->s_dentry && name == next->s_dentry->d_iname) {
49289+ len = next->s_dentry->d_name.len;
49290+ memcpy(d_name, name, len);
49291+ name = d_name;
49292+ } else
49293+ len = strlen(name);
49294
49295 /*
49296 * We'll have a dentry and an inode for
49297diff --git a/fs/coredump.c b/fs/coredump.c
49298index 1774932..5812106 100644
49299--- a/fs/coredump.c
49300+++ b/fs/coredump.c
49301@@ -52,7 +52,7 @@ struct core_name {
49302 char *corename;
49303 int used, size;
49304 };
49305-static atomic_t call_count = ATOMIC_INIT(1);
49306+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
49307
49308 /* The maximal length of core_pattern is also specified in sysctl.c */
49309
49310@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
49311 {
49312 char *old_corename = cn->corename;
49313
49314- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
49315+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
49316 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
49317
49318 if (!cn->corename) {
49319@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
49320 int pid_in_pattern = 0;
49321 int err = 0;
49322
49323- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
49324+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
49325 cn->corename = kmalloc(cn->size, GFP_KERNEL);
49326 cn->used = 0;
49327
49328@@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
49329 pipe = file->f_path.dentry->d_inode->i_pipe;
49330
49331 pipe_lock(pipe);
49332- pipe->readers++;
49333- pipe->writers--;
49334+ atomic_inc(&pipe->readers);
49335+ atomic_dec(&pipe->writers);
49336
49337- while ((pipe->readers > 1) && (!signal_pending(current))) {
49338+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
49339 wake_up_interruptible_sync(&pipe->wait);
49340 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
49341 pipe_wait(pipe);
49342 }
49343
49344- pipe->readers--;
49345- pipe->writers++;
49346+ atomic_dec(&pipe->readers);
49347+ atomic_inc(&pipe->writers);
49348 pipe_unlock(pipe);
49349
49350 }
49351@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo)
49352 int ispipe;
49353 struct files_struct *displaced;
49354 bool need_nonrelative = false;
49355- static atomic_t core_dump_count = ATOMIC_INIT(0);
49356+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49357+ long signr = siginfo->si_signo;
49358 struct coredump_params cprm = {
49359 .siginfo = siginfo,
49360 .regs = signal_pt_regs(),
49361@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo)
49362 .mm_flags = mm->flags,
49363 };
49364
49365- audit_core_dumps(siginfo->si_signo);
49366+ audit_core_dumps(signr);
49367+
49368+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49369+ gr_handle_brute_attach(cprm.mm_flags);
49370
49371 binfmt = mm->binfmt;
49372 if (!binfmt || !binfmt->core_dump)
49373@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo)
49374 need_nonrelative = true;
49375 }
49376
49377- retval = coredump_wait(siginfo->si_signo, &core_state);
49378+ retval = coredump_wait(signr, &core_state);
49379 if (retval < 0)
49380 goto fail_creds;
49381
49382@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo)
49383 }
49384 cprm.limit = RLIM_INFINITY;
49385
49386- dump_count = atomic_inc_return(&core_dump_count);
49387+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
49388 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49389 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49390 task_tgid_vnr(current), current->comm);
49391@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo)
49392 } else {
49393 struct inode *inode;
49394
49395+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49396+
49397 if (cprm.limit < binfmt->min_coredump)
49398 goto fail_unlock;
49399
49400@@ -640,7 +646,7 @@ close_fail:
49401 filp_close(cprm.file, NULL);
49402 fail_dropcount:
49403 if (ispipe)
49404- atomic_dec(&core_dump_count);
49405+ atomic_dec_unchecked(&core_dump_count);
49406 fail_unlock:
49407 kfree(cn.corename);
49408 fail_corename:
49409@@ -659,7 +665,7 @@ fail:
49410 */
49411 int dump_write(struct file *file, const void *addr, int nr)
49412 {
49413- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
49414+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
49415 }
49416 EXPORT_SYMBOL(dump_write);
49417
49418diff --git a/fs/dcache.c b/fs/dcache.c
49419index 19153a0..428c2f5 100644
49420--- a/fs/dcache.c
49421+++ b/fs/dcache.c
49422@@ -3133,7 +3133,7 @@ void __init vfs_caches_init(unsigned long mempages)
49423 mempages -= reserve;
49424
49425 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
49426- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
49427+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
49428
49429 dcache_init();
49430 inode_init();
49431diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
49432index a5f12b7..4ee8a6f 100644
49433--- a/fs/debugfs/inode.c
49434+++ b/fs/debugfs/inode.c
49435@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
49436 */
49437 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
49438 {
49439+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49440+ return __create_file(name, S_IFDIR | S_IRWXU,
49441+#else
49442 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
49443+#endif
49444 parent, NULL, NULL);
49445 }
49446 EXPORT_SYMBOL_GPL(debugfs_create_dir);
49447diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
49448index cc7709e..7e7211f 100644
49449--- a/fs/ecryptfs/inode.c
49450+++ b/fs/ecryptfs/inode.c
49451@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
49452 old_fs = get_fs();
49453 set_fs(get_ds());
49454 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
49455- (char __user *)lower_buf,
49456+ (char __force_user *)lower_buf,
49457 PATH_MAX);
49458 set_fs(old_fs);
49459 if (rc < 0)
49460@@ -706,7 +706,7 @@ out:
49461 static void
49462 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
49463 {
49464- char *buf = nd_get_link(nd);
49465+ const char *buf = nd_get_link(nd);
49466 if (!IS_ERR(buf)) {
49467 /* Free the char* */
49468 kfree(buf);
49469diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
49470index 412e6ed..4292d22 100644
49471--- a/fs/ecryptfs/miscdev.c
49472+++ b/fs/ecryptfs/miscdev.c
49473@@ -315,7 +315,7 @@ check_list:
49474 goto out_unlock_msg_ctx;
49475 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
49476 if (msg_ctx->msg) {
49477- if (copy_to_user(&buf[i], packet_length, packet_length_size))
49478+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
49479 goto out_unlock_msg_ctx;
49480 i += packet_length_size;
49481 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
49482diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
49483index b2a34a1..162fa69 100644
49484--- a/fs/ecryptfs/read_write.c
49485+++ b/fs/ecryptfs/read_write.c
49486@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
49487 return -EIO;
49488 fs_save = get_fs();
49489 set_fs(get_ds());
49490- rc = vfs_write(lower_file, data, size, &offset);
49491+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
49492 set_fs(fs_save);
49493 mark_inode_dirty_sync(ecryptfs_inode);
49494 return rc;
49495@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
49496 return -EIO;
49497 fs_save = get_fs();
49498 set_fs(get_ds());
49499- rc = vfs_read(lower_file, data, size, &offset);
49500+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
49501 set_fs(fs_save);
49502 return rc;
49503 }
49504diff --git a/fs/exec.c b/fs/exec.c
49505index 20df02c..9b8f78d 100644
49506--- a/fs/exec.c
49507+++ b/fs/exec.c
49508@@ -55,6 +55,17 @@
49509 #include <linux/pipe_fs_i.h>
49510 #include <linux/oom.h>
49511 #include <linux/compat.h>
49512+#include <linux/random.h>
49513+#include <linux/seq_file.h>
49514+#include <linux/coredump.h>
49515+#include <linux/mman.h>
49516+
49517+#ifdef CONFIG_PAX_REFCOUNT
49518+#include <linux/kallsyms.h>
49519+#include <linux/kdebug.h>
49520+#endif
49521+
49522+#include <trace/events/fs.h>
49523
49524 #include <asm/uaccess.h>
49525 #include <asm/mmu_context.h>
49526@@ -66,6 +77,18 @@
49527
49528 #include <trace/events/sched.h>
49529
49530+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49531+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
49532+{
49533+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
49534+}
49535+#endif
49536+
49537+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
49538+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
49539+EXPORT_SYMBOL(pax_set_initial_flags_func);
49540+#endif
49541+
49542 int suid_dumpable = 0;
49543
49544 static LIST_HEAD(formats);
49545@@ -75,8 +98,8 @@ void __register_binfmt(struct linux_binfmt * fmt, int insert)
49546 {
49547 BUG_ON(!fmt);
49548 write_lock(&binfmt_lock);
49549- insert ? list_add(&fmt->lh, &formats) :
49550- list_add_tail(&fmt->lh, &formats);
49551+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
49552+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
49553 write_unlock(&binfmt_lock);
49554 }
49555
49556@@ -85,7 +108,7 @@ EXPORT_SYMBOL(__register_binfmt);
49557 void unregister_binfmt(struct linux_binfmt * fmt)
49558 {
49559 write_lock(&binfmt_lock);
49560- list_del(&fmt->lh);
49561+ pax_list_del((struct list_head *)&fmt->lh);
49562 write_unlock(&binfmt_lock);
49563 }
49564
49565@@ -180,18 +203,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
49566 int write)
49567 {
49568 struct page *page;
49569- int ret;
49570
49571-#ifdef CONFIG_STACK_GROWSUP
49572- if (write) {
49573- ret = expand_downwards(bprm->vma, pos);
49574- if (ret < 0)
49575- return NULL;
49576- }
49577-#endif
49578- ret = get_user_pages(current, bprm->mm, pos,
49579- 1, write, 1, &page, NULL);
49580- if (ret <= 0)
49581+ if (0 > expand_downwards(bprm->vma, pos))
49582+ return NULL;
49583+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
49584 return NULL;
49585
49586 if (write) {
49587@@ -207,6 +222,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
49588 if (size <= ARG_MAX)
49589 return page;
49590
49591+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49592+ // only allow 512KB for argv+env on suid/sgid binaries
49593+ // to prevent easy ASLR exhaustion
49594+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
49595+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
49596+ (size > (512 * 1024))) {
49597+ put_page(page);
49598+ return NULL;
49599+ }
49600+#endif
49601+
49602 /*
49603 * Limit to 1/4-th the stack size for the argv+env strings.
49604 * This ensures that:
49605@@ -266,6 +292,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
49606 vma->vm_end = STACK_TOP_MAX;
49607 vma->vm_start = vma->vm_end - PAGE_SIZE;
49608 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
49609+
49610+#ifdef CONFIG_PAX_SEGMEXEC
49611+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
49612+#endif
49613+
49614 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
49615 INIT_LIST_HEAD(&vma->anon_vma_chain);
49616
49617@@ -276,6 +307,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
49618 mm->stack_vm = mm->total_vm = 1;
49619 up_write(&mm->mmap_sem);
49620 bprm->p = vma->vm_end - sizeof(void *);
49621+
49622+#ifdef CONFIG_PAX_RANDUSTACK
49623+ if (randomize_va_space)
49624+ bprm->p ^= random32() & ~PAGE_MASK;
49625+#endif
49626+
49627 return 0;
49628 err:
49629 up_write(&mm->mmap_sem);
49630@@ -384,19 +421,7 @@ err:
49631 return err;
49632 }
49633
49634-struct user_arg_ptr {
49635-#ifdef CONFIG_COMPAT
49636- bool is_compat;
49637-#endif
49638- union {
49639- const char __user *const __user *native;
49640-#ifdef CONFIG_COMPAT
49641- const compat_uptr_t __user *compat;
49642-#endif
49643- } ptr;
49644-};
49645-
49646-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
49647+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
49648 {
49649 const char __user *native;
49650
49651@@ -405,14 +430,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
49652 compat_uptr_t compat;
49653
49654 if (get_user(compat, argv.ptr.compat + nr))
49655- return ERR_PTR(-EFAULT);
49656+ return (const char __force_user *)ERR_PTR(-EFAULT);
49657
49658 return compat_ptr(compat);
49659 }
49660 #endif
49661
49662 if (get_user(native, argv.ptr.native + nr))
49663- return ERR_PTR(-EFAULT);
49664+ return (const char __force_user *)ERR_PTR(-EFAULT);
49665
49666 return native;
49667 }
49668@@ -431,7 +456,7 @@ static int count(struct user_arg_ptr argv, int max)
49669 if (!p)
49670 break;
49671
49672- if (IS_ERR(p))
49673+ if (IS_ERR((const char __force_kernel *)p))
49674 return -EFAULT;
49675
49676 if (i >= max)
49677@@ -466,7 +491,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
49678
49679 ret = -EFAULT;
49680 str = get_user_arg_ptr(argv, argc);
49681- if (IS_ERR(str))
49682+ if (IS_ERR((const char __force_kernel *)str))
49683 goto out;
49684
49685 len = strnlen_user(str, MAX_ARG_STRLEN);
49686@@ -548,7 +573,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
49687 int r;
49688 mm_segment_t oldfs = get_fs();
49689 struct user_arg_ptr argv = {
49690- .ptr.native = (const char __user *const __user *)__argv,
49691+ .ptr.native = (const char __force_user *const __force_user *)__argv,
49692 };
49693
49694 set_fs(KERNEL_DS);
49695@@ -583,7 +608,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
49696 unsigned long new_end = old_end - shift;
49697 struct mmu_gather tlb;
49698
49699- BUG_ON(new_start > new_end);
49700+ if (new_start >= new_end || new_start < mmap_min_addr)
49701+ return -ENOMEM;
49702
49703 /*
49704 * ensure there are no vmas between where we want to go
49705@@ -592,6 +618,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
49706 if (vma != find_vma(mm, new_start))
49707 return -EFAULT;
49708
49709+#ifdef CONFIG_PAX_SEGMEXEC
49710+ BUG_ON(pax_find_mirror_vma(vma));
49711+#endif
49712+
49713 /*
49714 * cover the whole range: [new_start, old_end)
49715 */
49716@@ -672,10 +702,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
49717 stack_top = arch_align_stack(stack_top);
49718 stack_top = PAGE_ALIGN(stack_top);
49719
49720- if (unlikely(stack_top < mmap_min_addr) ||
49721- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
49722- return -ENOMEM;
49723-
49724 stack_shift = vma->vm_end - stack_top;
49725
49726 bprm->p -= stack_shift;
49727@@ -687,8 +713,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
49728 bprm->exec -= stack_shift;
49729
49730 down_write(&mm->mmap_sem);
49731+
49732+ /* Move stack pages down in memory. */
49733+ if (stack_shift) {
49734+ ret = shift_arg_pages(vma, stack_shift);
49735+ if (ret)
49736+ goto out_unlock;
49737+ }
49738+
49739 vm_flags = VM_STACK_FLAGS;
49740
49741+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49742+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49743+ vm_flags &= ~VM_EXEC;
49744+
49745+#ifdef CONFIG_PAX_MPROTECT
49746+ if (mm->pax_flags & MF_PAX_MPROTECT)
49747+ vm_flags &= ~VM_MAYEXEC;
49748+#endif
49749+
49750+ }
49751+#endif
49752+
49753 /*
49754 * Adjust stack execute permissions; explicitly enable for
49755 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
49756@@ -707,13 +753,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
49757 goto out_unlock;
49758 BUG_ON(prev != vma);
49759
49760- /* Move stack pages down in memory. */
49761- if (stack_shift) {
49762- ret = shift_arg_pages(vma, stack_shift);
49763- if (ret)
49764- goto out_unlock;
49765- }
49766-
49767 /* mprotect_fixup is overkill to remove the temporary stack flags */
49768 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
49769
49770@@ -737,6 +776,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
49771 #endif
49772 current->mm->start_stack = bprm->p;
49773 ret = expand_stack(vma, stack_base);
49774+
49775+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_ASLR)
49776+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
49777+ unsigned long size, flags, vm_flags;
49778+
49779+ size = STACK_TOP - vma->vm_end;
49780+ flags = MAP_FIXED | MAP_PRIVATE;
49781+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
49782+
49783+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, flags, vm_flags, 0);
49784+
49785+#ifdef CONFIG_X86
49786+ if (!ret) {
49787+ size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
49788+ ret = 0 != mmap_region(NULL, 0, size, flags, vm_flags, 0);
49789+ }
49790+#endif
49791+
49792+ }
49793+#endif
49794+
49795 if (ret)
49796 ret = -EFAULT;
49797
49798@@ -772,6 +832,8 @@ struct file *open_exec(const char *name)
49799
49800 fsnotify_open(file);
49801
49802+ trace_open_exec(name);
49803+
49804 err = deny_write_access(file);
49805 if (err)
49806 goto exit;
49807@@ -795,7 +857,7 @@ int kernel_read(struct file *file, loff_t offset,
49808 old_fs = get_fs();
49809 set_fs(get_ds());
49810 /* The cast to a user pointer is valid due to the set_fs() */
49811- result = vfs_read(file, (void __user *)addr, count, &pos);
49812+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
49813 set_fs(old_fs);
49814 return result;
49815 }
49816@@ -1247,7 +1309,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
49817 }
49818 rcu_read_unlock();
49819
49820- if (p->fs->users > n_fs) {
49821+ if (atomic_read(&p->fs->users) > n_fs) {
49822 bprm->unsafe |= LSM_UNSAFE_SHARE;
49823 } else {
49824 res = -EAGAIN;
49825@@ -1447,6 +1509,28 @@ int search_binary_handler(struct linux_binprm *bprm)
49826
49827 EXPORT_SYMBOL(search_binary_handler);
49828
49829+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49830+static DEFINE_PER_CPU(u64, exec_counter);
49831+static int __init init_exec_counters(void)
49832+{
49833+ unsigned int cpu;
49834+
49835+ for_each_possible_cpu(cpu) {
49836+ per_cpu(exec_counter, cpu) = (u64)cpu;
49837+ }
49838+
49839+ return 0;
49840+}
49841+early_initcall(init_exec_counters);
49842+static inline void increment_exec_counter(void)
49843+{
49844+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
49845+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
49846+}
49847+#else
49848+static inline void increment_exec_counter(void) {}
49849+#endif
49850+
49851 /*
49852 * sys_execve() executes a new program.
49853 */
49854@@ -1454,6 +1538,11 @@ static int do_execve_common(const char *filename,
49855 struct user_arg_ptr argv,
49856 struct user_arg_ptr envp)
49857 {
49858+#ifdef CONFIG_GRKERNSEC
49859+ struct file *old_exec_file;
49860+ struct acl_subject_label *old_acl;
49861+ struct rlimit old_rlim[RLIM_NLIMITS];
49862+#endif
49863 struct linux_binprm *bprm;
49864 struct file *file;
49865 struct files_struct *displaced;
49866@@ -1461,6 +1550,8 @@ static int do_execve_common(const char *filename,
49867 int retval;
49868 const struct cred *cred = current_cred();
49869
49870+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
49871+
49872 /*
49873 * We move the actual failure in case of RLIMIT_NPROC excess from
49874 * set*uid() to execve() because too many poorly written programs
49875@@ -1501,12 +1592,27 @@ static int do_execve_common(const char *filename,
49876 if (IS_ERR(file))
49877 goto out_unmark;
49878
49879+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
49880+ retval = -EPERM;
49881+ goto out_file;
49882+ }
49883+
49884 sched_exec();
49885
49886 bprm->file = file;
49887 bprm->filename = filename;
49888 bprm->interp = filename;
49889
49890+ if (gr_process_user_ban()) {
49891+ retval = -EPERM;
49892+ goto out_file;
49893+ }
49894+
49895+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
49896+ retval = -EACCES;
49897+ goto out_file;
49898+ }
49899+
49900 retval = bprm_mm_init(bprm);
49901 if (retval)
49902 goto out_file;
49903@@ -1523,24 +1629,65 @@ static int do_execve_common(const char *filename,
49904 if (retval < 0)
49905 goto out;
49906
49907+#ifdef CONFIG_GRKERNSEC
49908+ old_acl = current->acl;
49909+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
49910+ old_exec_file = current->exec_file;
49911+ get_file(file);
49912+ current->exec_file = file;
49913+#endif
49914+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49915+ /* limit suid stack to 8MB
49916+ * we saved the old limits above and will restore them if this exec fails
49917+ */
49918+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
49919+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
49920+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
49921+#endif
49922+
49923+ if (!gr_tpe_allow(file)) {
49924+ retval = -EACCES;
49925+ goto out_fail;
49926+ }
49927+
49928+ if (gr_check_crash_exec(file)) {
49929+ retval = -EACCES;
49930+ goto out_fail;
49931+ }
49932+
49933+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
49934+ bprm->unsafe);
49935+ if (retval < 0)
49936+ goto out_fail;
49937+
49938 retval = copy_strings_kernel(1, &bprm->filename, bprm);
49939 if (retval < 0)
49940- goto out;
49941+ goto out_fail;
49942
49943 bprm->exec = bprm->p;
49944 retval = copy_strings(bprm->envc, envp, bprm);
49945 if (retval < 0)
49946- goto out;
49947+ goto out_fail;
49948
49949 retval = copy_strings(bprm->argc, argv, bprm);
49950 if (retval < 0)
49951- goto out;
49952+ goto out_fail;
49953+
49954+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
49955+
49956+ gr_handle_exec_args(bprm, argv);
49957
49958 retval = search_binary_handler(bprm);
49959 if (retval < 0)
49960- goto out;
49961+ goto out_fail;
49962+#ifdef CONFIG_GRKERNSEC
49963+ if (old_exec_file)
49964+ fput(old_exec_file);
49965+#endif
49966
49967 /* execve succeeded */
49968+
49969+ increment_exec_counter();
49970 current->fs->in_exec = 0;
49971 current->in_execve = 0;
49972 acct_update_integrals(current);
49973@@ -1549,6 +1696,14 @@ static int do_execve_common(const char *filename,
49974 put_files_struct(displaced);
49975 return retval;
49976
49977+out_fail:
49978+#ifdef CONFIG_GRKERNSEC
49979+ current->acl = old_acl;
49980+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
49981+ fput(current->exec_file);
49982+ current->exec_file = old_exec_file;
49983+#endif
49984+
49985 out:
49986 if (bprm->mm) {
49987 acct_arg_size(bprm, 0);
49988@@ -1697,3 +1852,253 @@ asmlinkage long compat_sys_execve(const char __user * filename,
49989 return error;
49990 }
49991 #endif
49992+
49993+int pax_check_flags(unsigned long *flags)
49994+{
49995+ int retval = 0;
49996+
49997+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
49998+ if (*flags & MF_PAX_SEGMEXEC)
49999+ {
50000+ *flags &= ~MF_PAX_SEGMEXEC;
50001+ retval = -EINVAL;
50002+ }
50003+#endif
50004+
50005+ if ((*flags & MF_PAX_PAGEEXEC)
50006+
50007+#ifdef CONFIG_PAX_PAGEEXEC
50008+ && (*flags & MF_PAX_SEGMEXEC)
50009+#endif
50010+
50011+ )
50012+ {
50013+ *flags &= ~MF_PAX_PAGEEXEC;
50014+ retval = -EINVAL;
50015+ }
50016+
50017+ if ((*flags & MF_PAX_MPROTECT)
50018+
50019+#ifdef CONFIG_PAX_MPROTECT
50020+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
50021+#endif
50022+
50023+ )
50024+ {
50025+ *flags &= ~MF_PAX_MPROTECT;
50026+ retval = -EINVAL;
50027+ }
50028+
50029+ if ((*flags & MF_PAX_EMUTRAMP)
50030+
50031+#ifdef CONFIG_PAX_EMUTRAMP
50032+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
50033+#endif
50034+
50035+ )
50036+ {
50037+ *flags &= ~MF_PAX_EMUTRAMP;
50038+ retval = -EINVAL;
50039+ }
50040+
50041+ return retval;
50042+}
50043+
50044+EXPORT_SYMBOL(pax_check_flags);
50045+
50046+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50047+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
50048+{
50049+ struct task_struct *tsk = current;
50050+ struct mm_struct *mm = current->mm;
50051+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
50052+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
50053+ char *path_exec = NULL;
50054+ char *path_fault = NULL;
50055+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
50056+ siginfo_t info = { };
50057+
50058+ if (buffer_exec && buffer_fault) {
50059+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
50060+
50061+ down_read(&mm->mmap_sem);
50062+ vma = mm->mmap;
50063+ while (vma && (!vma_exec || !vma_fault)) {
50064+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
50065+ vma_exec = vma;
50066+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
50067+ vma_fault = vma;
50068+ vma = vma->vm_next;
50069+ }
50070+ if (vma_exec) {
50071+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
50072+ if (IS_ERR(path_exec))
50073+ path_exec = "<path too long>";
50074+ else {
50075+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
50076+ if (path_exec) {
50077+ *path_exec = 0;
50078+ path_exec = buffer_exec;
50079+ } else
50080+ path_exec = "<path too long>";
50081+ }
50082+ }
50083+ if (vma_fault) {
50084+ start = vma_fault->vm_start;
50085+ end = vma_fault->vm_end;
50086+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
50087+ if (vma_fault->vm_file) {
50088+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
50089+ if (IS_ERR(path_fault))
50090+ path_fault = "<path too long>";
50091+ else {
50092+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
50093+ if (path_fault) {
50094+ *path_fault = 0;
50095+ path_fault = buffer_fault;
50096+ } else
50097+ path_fault = "<path too long>";
50098+ }
50099+ } else
50100+ path_fault = "<anonymous mapping>";
50101+ }
50102+ up_read(&mm->mmap_sem);
50103+ }
50104+ if (tsk->signal->curr_ip)
50105+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
50106+ else
50107+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
50108+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
50109+ from_kuid(&init_user_ns, task_uid(tsk)), from_kuid(&init_user_ns, task_euid(tsk)), pc, sp);
50110+ free_page((unsigned long)buffer_exec);
50111+ free_page((unsigned long)buffer_fault);
50112+ pax_report_insns(regs, pc, sp);
50113+ info.si_signo = SIGKILL;
50114+ info.si_errno = 0;
50115+ info.si_code = SI_KERNEL;
50116+ info.si_pid = 0;
50117+ info.si_uid = 0;
50118+ do_coredump(&info);
50119+}
50120+#endif
50121+
50122+#ifdef CONFIG_PAX_REFCOUNT
50123+void pax_report_refcount_overflow(struct pt_regs *regs)
50124+{
50125+ if (current->signal->curr_ip)
50126+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
50127+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
50128+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
50129+ else
50130+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
50131+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
50132+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
50133+ show_regs(regs);
50134+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
50135+}
50136+#endif
50137+
50138+#ifdef CONFIG_PAX_USERCOPY
50139+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
50140+static noinline int check_stack_object(const void *obj, unsigned long len)
50141+{
50142+ const void * const stack = task_stack_page(current);
50143+ const void * const stackend = stack + THREAD_SIZE;
50144+
50145+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
50146+ const void *frame = NULL;
50147+ const void *oldframe;
50148+#endif
50149+
50150+ if (obj + len < obj)
50151+ return -1;
50152+
50153+ if (obj + len <= stack || stackend <= obj)
50154+ return 0;
50155+
50156+ if (obj < stack || stackend < obj + len)
50157+ return -1;
50158+
50159+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
50160+ oldframe = __builtin_frame_address(1);
50161+ if (oldframe)
50162+ frame = __builtin_frame_address(2);
50163+ /*
50164+ low ----------------------------------------------> high
50165+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
50166+ ^----------------^
50167+ allow copies only within here
50168+ */
50169+ while (stack <= frame && frame < stackend) {
50170+ /* if obj + len extends past the last frame, this
50171+ check won't pass and the next frame will be 0,
50172+ causing us to bail out and correctly report
50173+ the copy as invalid
50174+ */
50175+ if (obj + len <= frame)
50176+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
50177+ oldframe = frame;
50178+ frame = *(const void * const *)frame;
50179+ }
50180+ return -1;
50181+#else
50182+ return 1;
50183+#endif
50184+}
50185+
50186+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
50187+{
50188+ if (current->signal->curr_ip)
50189+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
50190+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
50191+ else
50192+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
50193+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
50194+ dump_stack();
50195+ gr_handle_kernel_exploit();
50196+ do_group_exit(SIGKILL);
50197+}
50198+#endif
50199+
50200+void __check_object_size(const void *ptr, unsigned long n, bool to)
50201+{
50202+
50203+#ifdef CONFIG_PAX_USERCOPY
50204+ const char *type;
50205+
50206+ if (!n)
50207+ return;
50208+
50209+ type = check_heap_object(ptr, n);
50210+ if (!type) {
50211+ if (check_stack_object(ptr, n) != -1)
50212+ return;
50213+ type = "<process stack>";
50214+ }
50215+
50216+ pax_report_usercopy(ptr, n, to, type);
50217+#endif
50218+
50219+}
50220+EXPORT_SYMBOL(__check_object_size);
50221+
50222+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
50223+void pax_track_stack(void)
50224+{
50225+ unsigned long sp = (unsigned long)&sp;
50226+ if (sp < current_thread_info()->lowest_stack &&
50227+ sp > (unsigned long)task_stack_page(current))
50228+ current_thread_info()->lowest_stack = sp;
50229+}
50230+EXPORT_SYMBOL(pax_track_stack);
50231+#endif
50232+
50233+#ifdef CONFIG_PAX_SIZE_OVERFLOW
50234+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
50235+{
50236+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
50237+ dump_stack();
50238+ do_group_exit(SIGKILL);
50239+}
50240+EXPORT_SYMBOL(report_size_overflow);
50241+#endif
50242diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
50243index 2616d0e..2ffdec9 100644
50244--- a/fs/ext2/balloc.c
50245+++ b/fs/ext2/balloc.c
50246@@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
50247
50248 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
50249 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
50250- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
50251+ if (free_blocks < root_blocks + 1 &&
50252 !uid_eq(sbi->s_resuid, current_fsuid()) &&
50253 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
50254- !in_group_p (sbi->s_resgid))) {
50255+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
50256 return 0;
50257 }
50258 return 1;
50259diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
50260index 22548f5..41521d8 100644
50261--- a/fs/ext3/balloc.c
50262+++ b/fs/ext3/balloc.c
50263@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
50264
50265 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
50266 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
50267- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
50268+ if (free_blocks < root_blocks + 1 &&
50269 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
50270 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
50271- !in_group_p (sbi->s_resgid))) {
50272+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
50273 return 0;
50274 }
50275 return 1;
50276diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
50277index 2f2e0da..89b113a 100644
50278--- a/fs/ext4/balloc.c
50279+++ b/fs/ext4/balloc.c
50280@@ -505,8 +505,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
50281 /* Hm, nope. Are (enough) root reserved clusters available? */
50282 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
50283 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
50284- capable(CAP_SYS_RESOURCE) ||
50285- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
50286+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
50287+ capable_nolog(CAP_SYS_RESOURCE)) {
50288
50289 if (free_clusters >= (nclusters + dirty_clusters))
50290 return 1;
50291diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
50292index 8462eb3..4a71af6 100644
50293--- a/fs/ext4/ext4.h
50294+++ b/fs/ext4/ext4.h
50295@@ -1265,19 +1265,19 @@ struct ext4_sb_info {
50296 unsigned long s_mb_last_start;
50297
50298 /* stats for buddy allocator */
50299- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
50300- atomic_t s_bal_success; /* we found long enough chunks */
50301- atomic_t s_bal_allocated; /* in blocks */
50302- atomic_t s_bal_ex_scanned; /* total extents scanned */
50303- atomic_t s_bal_goals; /* goal hits */
50304- atomic_t s_bal_breaks; /* too long searches */
50305- atomic_t s_bal_2orders; /* 2^order hits */
50306+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
50307+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
50308+ atomic_unchecked_t s_bal_allocated; /* in blocks */
50309+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
50310+ atomic_unchecked_t s_bal_goals; /* goal hits */
50311+ atomic_unchecked_t s_bal_breaks; /* too long searches */
50312+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
50313 spinlock_t s_bal_lock;
50314 unsigned long s_mb_buddies_generated;
50315 unsigned long long s_mb_generation_time;
50316- atomic_t s_mb_lost_chunks;
50317- atomic_t s_mb_preallocated;
50318- atomic_t s_mb_discarded;
50319+ atomic_unchecked_t s_mb_lost_chunks;
50320+ atomic_unchecked_t s_mb_preallocated;
50321+ atomic_unchecked_t s_mb_discarded;
50322 atomic_t s_lock_busy;
50323
50324 /* locality groups */
50325diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
50326index 061727a..7622abf 100644
50327--- a/fs/ext4/mballoc.c
50328+++ b/fs/ext4/mballoc.c
50329@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
50330 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
50331
50332 if (EXT4_SB(sb)->s_mb_stats)
50333- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
50334+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
50335
50336 break;
50337 }
50338@@ -2044,7 +2044,7 @@ repeat:
50339 ac->ac_status = AC_STATUS_CONTINUE;
50340 ac->ac_flags |= EXT4_MB_HINT_FIRST;
50341 cr = 3;
50342- atomic_inc(&sbi->s_mb_lost_chunks);
50343+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
50344 goto repeat;
50345 }
50346 }
50347@@ -2552,25 +2552,25 @@ int ext4_mb_release(struct super_block *sb)
50348 if (sbi->s_mb_stats) {
50349 ext4_msg(sb, KERN_INFO,
50350 "mballoc: %u blocks %u reqs (%u success)",
50351- atomic_read(&sbi->s_bal_allocated),
50352- atomic_read(&sbi->s_bal_reqs),
50353- atomic_read(&sbi->s_bal_success));
50354+ atomic_read_unchecked(&sbi->s_bal_allocated),
50355+ atomic_read_unchecked(&sbi->s_bal_reqs),
50356+ atomic_read_unchecked(&sbi->s_bal_success));
50357 ext4_msg(sb, KERN_INFO,
50358 "mballoc: %u extents scanned, %u goal hits, "
50359 "%u 2^N hits, %u breaks, %u lost",
50360- atomic_read(&sbi->s_bal_ex_scanned),
50361- atomic_read(&sbi->s_bal_goals),
50362- atomic_read(&sbi->s_bal_2orders),
50363- atomic_read(&sbi->s_bal_breaks),
50364- atomic_read(&sbi->s_mb_lost_chunks));
50365+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
50366+ atomic_read_unchecked(&sbi->s_bal_goals),
50367+ atomic_read_unchecked(&sbi->s_bal_2orders),
50368+ atomic_read_unchecked(&sbi->s_bal_breaks),
50369+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
50370 ext4_msg(sb, KERN_INFO,
50371 "mballoc: %lu generated and it took %Lu",
50372 sbi->s_mb_buddies_generated,
50373 sbi->s_mb_generation_time);
50374 ext4_msg(sb, KERN_INFO,
50375 "mballoc: %u preallocated, %u discarded",
50376- atomic_read(&sbi->s_mb_preallocated),
50377- atomic_read(&sbi->s_mb_discarded));
50378+ atomic_read_unchecked(&sbi->s_mb_preallocated),
50379+ atomic_read_unchecked(&sbi->s_mb_discarded));
50380 }
50381
50382 free_percpu(sbi->s_locality_groups);
50383@@ -3060,16 +3060,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
50384 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
50385
50386 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
50387- atomic_inc(&sbi->s_bal_reqs);
50388- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
50389+ atomic_inc_unchecked(&sbi->s_bal_reqs);
50390+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
50391 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
50392- atomic_inc(&sbi->s_bal_success);
50393- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
50394+ atomic_inc_unchecked(&sbi->s_bal_success);
50395+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
50396 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
50397 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
50398- atomic_inc(&sbi->s_bal_goals);
50399+ atomic_inc_unchecked(&sbi->s_bal_goals);
50400 if (ac->ac_found > sbi->s_mb_max_to_scan)
50401- atomic_inc(&sbi->s_bal_breaks);
50402+ atomic_inc_unchecked(&sbi->s_bal_breaks);
50403 }
50404
50405 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
50406@@ -3469,7 +3469,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
50407 trace_ext4_mb_new_inode_pa(ac, pa);
50408
50409 ext4_mb_use_inode_pa(ac, pa);
50410- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
50411+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
50412
50413 ei = EXT4_I(ac->ac_inode);
50414 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
50415@@ -3529,7 +3529,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
50416 trace_ext4_mb_new_group_pa(ac, pa);
50417
50418 ext4_mb_use_group_pa(ac, pa);
50419- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
50420+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
50421
50422 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
50423 lg = ac->ac_lg;
50424@@ -3618,7 +3618,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
50425 * from the bitmap and continue.
50426 */
50427 }
50428- atomic_add(free, &sbi->s_mb_discarded);
50429+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
50430
50431 return err;
50432 }
50433@@ -3636,7 +3636,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
50434 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
50435 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
50436 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
50437- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
50438+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
50439 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
50440
50441 return 0;
50442diff --git a/fs/ext4/super.c b/fs/ext4/super.c
50443index 0465f36..99a003a 100644
50444--- a/fs/ext4/super.c
50445+++ b/fs/ext4/super.c
50446@@ -2429,7 +2429,7 @@ struct ext4_attr {
50447 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
50448 const char *, size_t);
50449 int offset;
50450-};
50451+} __do_const;
50452
50453 static int parse_strtoul(const char *buf,
50454 unsigned long max, unsigned long *value)
50455diff --git a/fs/fcntl.c b/fs/fcntl.c
50456index 71a600a..20d87b1 100644
50457--- a/fs/fcntl.c
50458+++ b/fs/fcntl.c
50459@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
50460 if (err)
50461 return err;
50462
50463+ if (gr_handle_chroot_fowner(pid, type))
50464+ return -ENOENT;
50465+ if (gr_check_protected_task_fowner(pid, type))
50466+ return -EACCES;
50467+
50468 f_modown(filp, pid, type, force);
50469 return 0;
50470 }
50471diff --git a/fs/fhandle.c b/fs/fhandle.c
50472index 999ff5c..41f4109 100644
50473--- a/fs/fhandle.c
50474+++ b/fs/fhandle.c
50475@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
50476 } else
50477 retval = 0;
50478 /* copy the mount id */
50479- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
50480- sizeof(*mnt_id)) ||
50481+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
50482 copy_to_user(ufh, handle,
50483 sizeof(struct file_handle) + handle_bytes))
50484 retval = -EFAULT;
50485diff --git a/fs/fifo.c b/fs/fifo.c
50486index cf6f434..3d7942c 100644
50487--- a/fs/fifo.c
50488+++ b/fs/fifo.c
50489@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
50490 */
50491 filp->f_op = &read_pipefifo_fops;
50492 pipe->r_counter++;
50493- if (pipe->readers++ == 0)
50494+ if (atomic_inc_return(&pipe->readers) == 1)
50495 wake_up_partner(inode);
50496
50497- if (!pipe->writers) {
50498+ if (!atomic_read(&pipe->writers)) {
50499 if ((filp->f_flags & O_NONBLOCK)) {
50500 /* suppress POLLHUP until we have
50501 * seen a writer */
50502@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
50503 * errno=ENXIO when there is no process reading the FIFO.
50504 */
50505 ret = -ENXIO;
50506- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
50507+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
50508 goto err;
50509
50510 filp->f_op = &write_pipefifo_fops;
50511 pipe->w_counter++;
50512- if (!pipe->writers++)
50513+ if (atomic_inc_return(&pipe->writers) == 1)
50514 wake_up_partner(inode);
50515
50516- if (!pipe->readers) {
50517+ if (!atomic_read(&pipe->readers)) {
50518 if (wait_for_partner(inode, &pipe->r_counter))
50519 goto err_wr;
50520 }
50521@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
50522 */
50523 filp->f_op = &rdwr_pipefifo_fops;
50524
50525- pipe->readers++;
50526- pipe->writers++;
50527+ atomic_inc(&pipe->readers);
50528+ atomic_inc(&pipe->writers);
50529 pipe->r_counter++;
50530 pipe->w_counter++;
50531- if (pipe->readers == 1 || pipe->writers == 1)
50532+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
50533 wake_up_partner(inode);
50534 break;
50535
50536@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
50537 return 0;
50538
50539 err_rd:
50540- if (!--pipe->readers)
50541+ if (atomic_dec_and_test(&pipe->readers))
50542 wake_up_interruptible(&pipe->wait);
50543 ret = -ERESTARTSYS;
50544 goto err;
50545
50546 err_wr:
50547- if (!--pipe->writers)
50548+ if (atomic_dec_and_test(&pipe->writers))
50549 wake_up_interruptible(&pipe->wait);
50550 ret = -ERESTARTSYS;
50551 goto err;
50552
50553 err:
50554- if (!pipe->readers && !pipe->writers)
50555+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
50556 free_pipe_info(inode);
50557
50558 err_nocleanup:
50559diff --git a/fs/file.c b/fs/file.c
50560index 2b3570b..c57924b 100644
50561--- a/fs/file.c
50562+++ b/fs/file.c
50563@@ -16,6 +16,7 @@
50564 #include <linux/slab.h>
50565 #include <linux/vmalloc.h>
50566 #include <linux/file.h>
50567+#include <linux/security.h>
50568 #include <linux/fdtable.h>
50569 #include <linux/bitops.h>
50570 #include <linux/interrupt.h>
50571@@ -892,6 +893,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
50572 if (!file)
50573 return __close_fd(files, fd);
50574
50575+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
50576 if (fd >= rlimit(RLIMIT_NOFILE))
50577 return -EBADF;
50578
50579@@ -918,6 +920,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
50580 if (unlikely(oldfd == newfd))
50581 return -EINVAL;
50582
50583+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
50584 if (newfd >= rlimit(RLIMIT_NOFILE))
50585 return -EBADF;
50586
50587@@ -973,6 +976,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
50588 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
50589 {
50590 int err;
50591+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
50592 if (from >= rlimit(RLIMIT_NOFILE))
50593 return -EINVAL;
50594 err = alloc_fd(from, flags);
50595diff --git a/fs/filesystems.c b/fs/filesystems.c
50596index da165f6..3671bdb 100644
50597--- a/fs/filesystems.c
50598+++ b/fs/filesystems.c
50599@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
50600 int len = dot ? dot - name : strlen(name);
50601
50602 fs = __get_fs_type(name, len);
50603+
50604+#ifdef CONFIG_GRKERNSEC_MODHARDEN
50605+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
50606+#else
50607 if (!fs && (request_module("%.*s", len, name) == 0))
50608+#endif
50609 fs = __get_fs_type(name, len);
50610
50611 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
50612diff --git a/fs/fs_struct.c b/fs/fs_struct.c
50613index fe6ca58..65318cf 100644
50614--- a/fs/fs_struct.c
50615+++ b/fs/fs_struct.c
50616@@ -4,6 +4,7 @@
50617 #include <linux/path.h>
50618 #include <linux/slab.h>
50619 #include <linux/fs_struct.h>
50620+#include <linux/grsecurity.h>
50621 #include "internal.h"
50622
50623 /*
50624@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
50625 write_seqcount_begin(&fs->seq);
50626 old_root = fs->root;
50627 fs->root = *path;
50628+ gr_set_chroot_entries(current, path);
50629 write_seqcount_end(&fs->seq);
50630 spin_unlock(&fs->lock);
50631 if (old_root.dentry)
50632@@ -53,6 +55,21 @@ static inline int replace_path(struct path *p, const struct path *old, const str
50633 return 1;
50634 }
50635
50636+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
50637+{
50638+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
50639+ return 0;
50640+ *p = *new;
50641+
50642+ /* This function is only called from pivot_root(). Leave our
50643+ gr_chroot_dentry and is_chrooted flags as-is, so that a
50644+ pivoted root isn't treated as a chroot
50645+ */
50646+ //gr_set_chroot_entries(task, new);
50647+
50648+ return 1;
50649+}
50650+
50651 void chroot_fs_refs(struct path *old_root, struct path *new_root)
50652 {
50653 struct task_struct *g, *p;
50654@@ -67,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
50655 int hits = 0;
50656 spin_lock(&fs->lock);
50657 write_seqcount_begin(&fs->seq);
50658- hits += replace_path(&fs->root, old_root, new_root);
50659+ hits += replace_root_path(p, &fs->root, old_root, new_root);
50660 hits += replace_path(&fs->pwd, old_root, new_root);
50661 write_seqcount_end(&fs->seq);
50662 while (hits--) {
50663@@ -99,7 +116,8 @@ void exit_fs(struct task_struct *tsk)
50664 task_lock(tsk);
50665 spin_lock(&fs->lock);
50666 tsk->fs = NULL;
50667- kill = !--fs->users;
50668+ gr_clear_chroot_entries(tsk);
50669+ kill = !atomic_dec_return(&fs->users);
50670 spin_unlock(&fs->lock);
50671 task_unlock(tsk);
50672 if (kill)
50673@@ -112,7 +130,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
50674 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
50675 /* We don't need to lock fs - think why ;-) */
50676 if (fs) {
50677- fs->users = 1;
50678+ atomic_set(&fs->users, 1);
50679 fs->in_exec = 0;
50680 spin_lock_init(&fs->lock);
50681 seqcount_init(&fs->seq);
50682@@ -121,6 +139,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
50683 spin_lock(&old->lock);
50684 fs->root = old->root;
50685 path_get(&fs->root);
50686+ /* instead of calling gr_set_chroot_entries here,
50687+ we call it from every caller of this function
50688+ */
50689 fs->pwd = old->pwd;
50690 path_get(&fs->pwd);
50691 spin_unlock(&old->lock);
50692@@ -139,8 +160,9 @@ int unshare_fs_struct(void)
50693
50694 task_lock(current);
50695 spin_lock(&fs->lock);
50696- kill = !--fs->users;
50697+ kill = !atomic_dec_return(&fs->users);
50698 current->fs = new_fs;
50699+ gr_set_chroot_entries(current, &new_fs->root);
50700 spin_unlock(&fs->lock);
50701 task_unlock(current);
50702
50703@@ -153,13 +175,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
50704
50705 int current_umask(void)
50706 {
50707- return current->fs->umask;
50708+ return current->fs->umask | gr_acl_umask();
50709 }
50710 EXPORT_SYMBOL(current_umask);
50711
50712 /* to be mentioned only in INIT_TASK */
50713 struct fs_struct init_fs = {
50714- .users = 1,
50715+ .users = ATOMIC_INIT(1),
50716 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
50717 .seq = SEQCNT_ZERO,
50718 .umask = 0022,
50719diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
50720index 8dcb114..b1072e2 100644
50721--- a/fs/fscache/cookie.c
50722+++ b/fs/fscache/cookie.c
50723@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
50724 parent ? (char *) parent->def->name : "<no-parent>",
50725 def->name, netfs_data);
50726
50727- fscache_stat(&fscache_n_acquires);
50728+ fscache_stat_unchecked(&fscache_n_acquires);
50729
50730 /* if there's no parent cookie, then we don't create one here either */
50731 if (!parent) {
50732- fscache_stat(&fscache_n_acquires_null);
50733+ fscache_stat_unchecked(&fscache_n_acquires_null);
50734 _leave(" [no parent]");
50735 return NULL;
50736 }
50737@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
50738 /* allocate and initialise a cookie */
50739 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
50740 if (!cookie) {
50741- fscache_stat(&fscache_n_acquires_oom);
50742+ fscache_stat_unchecked(&fscache_n_acquires_oom);
50743 _leave(" [ENOMEM]");
50744 return NULL;
50745 }
50746@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
50747
50748 switch (cookie->def->type) {
50749 case FSCACHE_COOKIE_TYPE_INDEX:
50750- fscache_stat(&fscache_n_cookie_index);
50751+ fscache_stat_unchecked(&fscache_n_cookie_index);
50752 break;
50753 case FSCACHE_COOKIE_TYPE_DATAFILE:
50754- fscache_stat(&fscache_n_cookie_data);
50755+ fscache_stat_unchecked(&fscache_n_cookie_data);
50756 break;
50757 default:
50758- fscache_stat(&fscache_n_cookie_special);
50759+ fscache_stat_unchecked(&fscache_n_cookie_special);
50760 break;
50761 }
50762
50763@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
50764 if (fscache_acquire_non_index_cookie(cookie) < 0) {
50765 atomic_dec(&parent->n_children);
50766 __fscache_cookie_put(cookie);
50767- fscache_stat(&fscache_n_acquires_nobufs);
50768+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
50769 _leave(" = NULL");
50770 return NULL;
50771 }
50772 }
50773
50774- fscache_stat(&fscache_n_acquires_ok);
50775+ fscache_stat_unchecked(&fscache_n_acquires_ok);
50776 _leave(" = %p", cookie);
50777 return cookie;
50778 }
50779@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
50780 cache = fscache_select_cache_for_object(cookie->parent);
50781 if (!cache) {
50782 up_read(&fscache_addremove_sem);
50783- fscache_stat(&fscache_n_acquires_no_cache);
50784+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
50785 _leave(" = -ENOMEDIUM [no cache]");
50786 return -ENOMEDIUM;
50787 }
50788@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
50789 object = cache->ops->alloc_object(cache, cookie);
50790 fscache_stat_d(&fscache_n_cop_alloc_object);
50791 if (IS_ERR(object)) {
50792- fscache_stat(&fscache_n_object_no_alloc);
50793+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
50794 ret = PTR_ERR(object);
50795 goto error;
50796 }
50797
50798- fscache_stat(&fscache_n_object_alloc);
50799+ fscache_stat_unchecked(&fscache_n_object_alloc);
50800
50801 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
50802
50803@@ -378,7 +378,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
50804
50805 _enter("{%s}", cookie->def->name);
50806
50807- fscache_stat(&fscache_n_invalidates);
50808+ fscache_stat_unchecked(&fscache_n_invalidates);
50809
50810 /* Only permit invalidation of data files. Invalidating an index will
50811 * require the caller to release all its attachments to the tree rooted
50812@@ -437,10 +437,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
50813 struct fscache_object *object;
50814 struct hlist_node *_p;
50815
50816- fscache_stat(&fscache_n_updates);
50817+ fscache_stat_unchecked(&fscache_n_updates);
50818
50819 if (!cookie) {
50820- fscache_stat(&fscache_n_updates_null);
50821+ fscache_stat_unchecked(&fscache_n_updates_null);
50822 _leave(" [no cookie]");
50823 return;
50824 }
50825@@ -474,12 +474,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
50826 struct fscache_object *object;
50827 unsigned long event;
50828
50829- fscache_stat(&fscache_n_relinquishes);
50830+ fscache_stat_unchecked(&fscache_n_relinquishes);
50831 if (retire)
50832- fscache_stat(&fscache_n_relinquishes_retire);
50833+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
50834
50835 if (!cookie) {
50836- fscache_stat(&fscache_n_relinquishes_null);
50837+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
50838 _leave(" [no cookie]");
50839 return;
50840 }
50841@@ -495,7 +495,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
50842
50843 /* wait for the cookie to finish being instantiated (or to fail) */
50844 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
50845- fscache_stat(&fscache_n_relinquishes_waitcrt);
50846+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
50847 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
50848 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
50849 }
50850diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
50851index ee38fef..0a326d4 100644
50852--- a/fs/fscache/internal.h
50853+++ b/fs/fscache/internal.h
50854@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
50855 * stats.c
50856 */
50857 #ifdef CONFIG_FSCACHE_STATS
50858-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
50859-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
50860+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
50861+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
50862
50863-extern atomic_t fscache_n_op_pend;
50864-extern atomic_t fscache_n_op_run;
50865-extern atomic_t fscache_n_op_enqueue;
50866-extern atomic_t fscache_n_op_deferred_release;
50867-extern atomic_t fscache_n_op_release;
50868-extern atomic_t fscache_n_op_gc;
50869-extern atomic_t fscache_n_op_cancelled;
50870-extern atomic_t fscache_n_op_rejected;
50871+extern atomic_unchecked_t fscache_n_op_pend;
50872+extern atomic_unchecked_t fscache_n_op_run;
50873+extern atomic_unchecked_t fscache_n_op_enqueue;
50874+extern atomic_unchecked_t fscache_n_op_deferred_release;
50875+extern atomic_unchecked_t fscache_n_op_release;
50876+extern atomic_unchecked_t fscache_n_op_gc;
50877+extern atomic_unchecked_t fscache_n_op_cancelled;
50878+extern atomic_unchecked_t fscache_n_op_rejected;
50879
50880-extern atomic_t fscache_n_attr_changed;
50881-extern atomic_t fscache_n_attr_changed_ok;
50882-extern atomic_t fscache_n_attr_changed_nobufs;
50883-extern atomic_t fscache_n_attr_changed_nomem;
50884-extern atomic_t fscache_n_attr_changed_calls;
50885+extern atomic_unchecked_t fscache_n_attr_changed;
50886+extern atomic_unchecked_t fscache_n_attr_changed_ok;
50887+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
50888+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
50889+extern atomic_unchecked_t fscache_n_attr_changed_calls;
50890
50891-extern atomic_t fscache_n_allocs;
50892-extern atomic_t fscache_n_allocs_ok;
50893-extern atomic_t fscache_n_allocs_wait;
50894-extern atomic_t fscache_n_allocs_nobufs;
50895-extern atomic_t fscache_n_allocs_intr;
50896-extern atomic_t fscache_n_allocs_object_dead;
50897-extern atomic_t fscache_n_alloc_ops;
50898-extern atomic_t fscache_n_alloc_op_waits;
50899+extern atomic_unchecked_t fscache_n_allocs;
50900+extern atomic_unchecked_t fscache_n_allocs_ok;
50901+extern atomic_unchecked_t fscache_n_allocs_wait;
50902+extern atomic_unchecked_t fscache_n_allocs_nobufs;
50903+extern atomic_unchecked_t fscache_n_allocs_intr;
50904+extern atomic_unchecked_t fscache_n_allocs_object_dead;
50905+extern atomic_unchecked_t fscache_n_alloc_ops;
50906+extern atomic_unchecked_t fscache_n_alloc_op_waits;
50907
50908-extern atomic_t fscache_n_retrievals;
50909-extern atomic_t fscache_n_retrievals_ok;
50910-extern atomic_t fscache_n_retrievals_wait;
50911-extern atomic_t fscache_n_retrievals_nodata;
50912-extern atomic_t fscache_n_retrievals_nobufs;
50913-extern atomic_t fscache_n_retrievals_intr;
50914-extern atomic_t fscache_n_retrievals_nomem;
50915-extern atomic_t fscache_n_retrievals_object_dead;
50916-extern atomic_t fscache_n_retrieval_ops;
50917-extern atomic_t fscache_n_retrieval_op_waits;
50918+extern atomic_unchecked_t fscache_n_retrievals;
50919+extern atomic_unchecked_t fscache_n_retrievals_ok;
50920+extern atomic_unchecked_t fscache_n_retrievals_wait;
50921+extern atomic_unchecked_t fscache_n_retrievals_nodata;
50922+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
50923+extern atomic_unchecked_t fscache_n_retrievals_intr;
50924+extern atomic_unchecked_t fscache_n_retrievals_nomem;
50925+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
50926+extern atomic_unchecked_t fscache_n_retrieval_ops;
50927+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
50928
50929-extern atomic_t fscache_n_stores;
50930-extern atomic_t fscache_n_stores_ok;
50931-extern atomic_t fscache_n_stores_again;
50932-extern atomic_t fscache_n_stores_nobufs;
50933-extern atomic_t fscache_n_stores_oom;
50934-extern atomic_t fscache_n_store_ops;
50935-extern atomic_t fscache_n_store_calls;
50936-extern atomic_t fscache_n_store_pages;
50937-extern atomic_t fscache_n_store_radix_deletes;
50938-extern atomic_t fscache_n_store_pages_over_limit;
50939+extern atomic_unchecked_t fscache_n_stores;
50940+extern atomic_unchecked_t fscache_n_stores_ok;
50941+extern atomic_unchecked_t fscache_n_stores_again;
50942+extern atomic_unchecked_t fscache_n_stores_nobufs;
50943+extern atomic_unchecked_t fscache_n_stores_oom;
50944+extern atomic_unchecked_t fscache_n_store_ops;
50945+extern atomic_unchecked_t fscache_n_store_calls;
50946+extern atomic_unchecked_t fscache_n_store_pages;
50947+extern atomic_unchecked_t fscache_n_store_radix_deletes;
50948+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
50949
50950-extern atomic_t fscache_n_store_vmscan_not_storing;
50951-extern atomic_t fscache_n_store_vmscan_gone;
50952-extern atomic_t fscache_n_store_vmscan_busy;
50953-extern atomic_t fscache_n_store_vmscan_cancelled;
50954-extern atomic_t fscache_n_store_vmscan_wait;
50955+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50956+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
50957+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
50958+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50959+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
50960
50961-extern atomic_t fscache_n_marks;
50962-extern atomic_t fscache_n_uncaches;
50963+extern atomic_unchecked_t fscache_n_marks;
50964+extern atomic_unchecked_t fscache_n_uncaches;
50965
50966-extern atomic_t fscache_n_acquires;
50967-extern atomic_t fscache_n_acquires_null;
50968-extern atomic_t fscache_n_acquires_no_cache;
50969-extern atomic_t fscache_n_acquires_ok;
50970-extern atomic_t fscache_n_acquires_nobufs;
50971-extern atomic_t fscache_n_acquires_oom;
50972+extern atomic_unchecked_t fscache_n_acquires;
50973+extern atomic_unchecked_t fscache_n_acquires_null;
50974+extern atomic_unchecked_t fscache_n_acquires_no_cache;
50975+extern atomic_unchecked_t fscache_n_acquires_ok;
50976+extern atomic_unchecked_t fscache_n_acquires_nobufs;
50977+extern atomic_unchecked_t fscache_n_acquires_oom;
50978
50979-extern atomic_t fscache_n_invalidates;
50980-extern atomic_t fscache_n_invalidates_run;
50981+extern atomic_unchecked_t fscache_n_invalidates;
50982+extern atomic_unchecked_t fscache_n_invalidates_run;
50983
50984-extern atomic_t fscache_n_updates;
50985-extern atomic_t fscache_n_updates_null;
50986-extern atomic_t fscache_n_updates_run;
50987+extern atomic_unchecked_t fscache_n_updates;
50988+extern atomic_unchecked_t fscache_n_updates_null;
50989+extern atomic_unchecked_t fscache_n_updates_run;
50990
50991-extern atomic_t fscache_n_relinquishes;
50992-extern atomic_t fscache_n_relinquishes_null;
50993-extern atomic_t fscache_n_relinquishes_waitcrt;
50994-extern atomic_t fscache_n_relinquishes_retire;
50995+extern atomic_unchecked_t fscache_n_relinquishes;
50996+extern atomic_unchecked_t fscache_n_relinquishes_null;
50997+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50998+extern atomic_unchecked_t fscache_n_relinquishes_retire;
50999
51000-extern atomic_t fscache_n_cookie_index;
51001-extern atomic_t fscache_n_cookie_data;
51002-extern atomic_t fscache_n_cookie_special;
51003+extern atomic_unchecked_t fscache_n_cookie_index;
51004+extern atomic_unchecked_t fscache_n_cookie_data;
51005+extern atomic_unchecked_t fscache_n_cookie_special;
51006
51007-extern atomic_t fscache_n_object_alloc;
51008-extern atomic_t fscache_n_object_no_alloc;
51009-extern atomic_t fscache_n_object_lookups;
51010-extern atomic_t fscache_n_object_lookups_negative;
51011-extern atomic_t fscache_n_object_lookups_positive;
51012-extern atomic_t fscache_n_object_lookups_timed_out;
51013-extern atomic_t fscache_n_object_created;
51014-extern atomic_t fscache_n_object_avail;
51015-extern atomic_t fscache_n_object_dead;
51016+extern atomic_unchecked_t fscache_n_object_alloc;
51017+extern atomic_unchecked_t fscache_n_object_no_alloc;
51018+extern atomic_unchecked_t fscache_n_object_lookups;
51019+extern atomic_unchecked_t fscache_n_object_lookups_negative;
51020+extern atomic_unchecked_t fscache_n_object_lookups_positive;
51021+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
51022+extern atomic_unchecked_t fscache_n_object_created;
51023+extern atomic_unchecked_t fscache_n_object_avail;
51024+extern atomic_unchecked_t fscache_n_object_dead;
51025
51026-extern atomic_t fscache_n_checkaux_none;
51027-extern atomic_t fscache_n_checkaux_okay;
51028-extern atomic_t fscache_n_checkaux_update;
51029-extern atomic_t fscache_n_checkaux_obsolete;
51030+extern atomic_unchecked_t fscache_n_checkaux_none;
51031+extern atomic_unchecked_t fscache_n_checkaux_okay;
51032+extern atomic_unchecked_t fscache_n_checkaux_update;
51033+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
51034
51035 extern atomic_t fscache_n_cop_alloc_object;
51036 extern atomic_t fscache_n_cop_lookup_object;
51037@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
51038 atomic_inc(stat);
51039 }
51040
51041+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
51042+{
51043+ atomic_inc_unchecked(stat);
51044+}
51045+
51046 static inline void fscache_stat_d(atomic_t *stat)
51047 {
51048 atomic_dec(stat);
51049@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
51050
51051 #define __fscache_stat(stat) (NULL)
51052 #define fscache_stat(stat) do {} while (0)
51053+#define fscache_stat_unchecked(stat) do {} while (0)
51054 #define fscache_stat_d(stat) do {} while (0)
51055 #endif
51056
51057diff --git a/fs/fscache/object.c b/fs/fscache/object.c
51058index 50d41c1..10ee117 100644
51059--- a/fs/fscache/object.c
51060+++ b/fs/fscache/object.c
51061@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51062 /* Invalidate an object on disk */
51063 case FSCACHE_OBJECT_INVALIDATING:
51064 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
51065- fscache_stat(&fscache_n_invalidates_run);
51066+ fscache_stat_unchecked(&fscache_n_invalidates_run);
51067 fscache_stat(&fscache_n_cop_invalidate_object);
51068 fscache_invalidate_object(object);
51069 fscache_stat_d(&fscache_n_cop_invalidate_object);
51070@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51071 /* update the object metadata on disk */
51072 case FSCACHE_OBJECT_UPDATING:
51073 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
51074- fscache_stat(&fscache_n_updates_run);
51075+ fscache_stat_unchecked(&fscache_n_updates_run);
51076 fscache_stat(&fscache_n_cop_update_object);
51077 object->cache->ops->update_object(object);
51078 fscache_stat_d(&fscache_n_cop_update_object);
51079@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51080 spin_lock(&object->lock);
51081 object->state = FSCACHE_OBJECT_DEAD;
51082 spin_unlock(&object->lock);
51083- fscache_stat(&fscache_n_object_dead);
51084+ fscache_stat_unchecked(&fscache_n_object_dead);
51085 goto terminal_transit;
51086
51087 /* handle the parent cache of this object being withdrawn from
51088@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51089 spin_lock(&object->lock);
51090 object->state = FSCACHE_OBJECT_DEAD;
51091 spin_unlock(&object->lock);
51092- fscache_stat(&fscache_n_object_dead);
51093+ fscache_stat_unchecked(&fscache_n_object_dead);
51094 goto terminal_transit;
51095
51096 /* complain about the object being woken up once it is
51097@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
51098 parent->cookie->def->name, cookie->def->name,
51099 object->cache->tag->name);
51100
51101- fscache_stat(&fscache_n_object_lookups);
51102+ fscache_stat_unchecked(&fscache_n_object_lookups);
51103 fscache_stat(&fscache_n_cop_lookup_object);
51104 ret = object->cache->ops->lookup_object(object);
51105 fscache_stat_d(&fscache_n_cop_lookup_object);
51106@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
51107 if (ret == -ETIMEDOUT) {
51108 /* probably stuck behind another object, so move this one to
51109 * the back of the queue */
51110- fscache_stat(&fscache_n_object_lookups_timed_out);
51111+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
51112 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
51113 }
51114
51115@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
51116
51117 spin_lock(&object->lock);
51118 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
51119- fscache_stat(&fscache_n_object_lookups_negative);
51120+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
51121
51122 /* transit here to allow write requests to begin stacking up
51123 * and read requests to begin returning ENODATA */
51124@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
51125 * result, in which case there may be data available */
51126 spin_lock(&object->lock);
51127 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
51128- fscache_stat(&fscache_n_object_lookups_positive);
51129+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
51130
51131 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
51132
51133@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
51134 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
51135 } else {
51136 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
51137- fscache_stat(&fscache_n_object_created);
51138+ fscache_stat_unchecked(&fscache_n_object_created);
51139
51140 object->state = FSCACHE_OBJECT_AVAILABLE;
51141 spin_unlock(&object->lock);
51142@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
51143 fscache_enqueue_dependents(object);
51144
51145 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
51146- fscache_stat(&fscache_n_object_avail);
51147+ fscache_stat_unchecked(&fscache_n_object_avail);
51148
51149 _leave("");
51150 }
51151@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
51152 enum fscache_checkaux result;
51153
51154 if (!object->cookie->def->check_aux) {
51155- fscache_stat(&fscache_n_checkaux_none);
51156+ fscache_stat_unchecked(&fscache_n_checkaux_none);
51157 return FSCACHE_CHECKAUX_OKAY;
51158 }
51159
51160@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
51161 switch (result) {
51162 /* entry okay as is */
51163 case FSCACHE_CHECKAUX_OKAY:
51164- fscache_stat(&fscache_n_checkaux_okay);
51165+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
51166 break;
51167
51168 /* entry requires update */
51169 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
51170- fscache_stat(&fscache_n_checkaux_update);
51171+ fscache_stat_unchecked(&fscache_n_checkaux_update);
51172 break;
51173
51174 /* entry requires deletion */
51175 case FSCACHE_CHECKAUX_OBSOLETE:
51176- fscache_stat(&fscache_n_checkaux_obsolete);
51177+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
51178 break;
51179
51180 default:
51181diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
51182index 762a9ec..2023284 100644
51183--- a/fs/fscache/operation.c
51184+++ b/fs/fscache/operation.c
51185@@ -17,7 +17,7 @@
51186 #include <linux/slab.h>
51187 #include "internal.h"
51188
51189-atomic_t fscache_op_debug_id;
51190+atomic_unchecked_t fscache_op_debug_id;
51191 EXPORT_SYMBOL(fscache_op_debug_id);
51192
51193 /**
51194@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
51195 ASSERTCMP(atomic_read(&op->usage), >, 0);
51196 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
51197
51198- fscache_stat(&fscache_n_op_enqueue);
51199+ fscache_stat_unchecked(&fscache_n_op_enqueue);
51200 switch (op->flags & FSCACHE_OP_TYPE) {
51201 case FSCACHE_OP_ASYNC:
51202 _debug("queue async");
51203@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
51204 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
51205 if (op->processor)
51206 fscache_enqueue_operation(op);
51207- fscache_stat(&fscache_n_op_run);
51208+ fscache_stat_unchecked(&fscache_n_op_run);
51209 }
51210
51211 /*
51212@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
51213 if (object->n_in_progress > 0) {
51214 atomic_inc(&op->usage);
51215 list_add_tail(&op->pend_link, &object->pending_ops);
51216- fscache_stat(&fscache_n_op_pend);
51217+ fscache_stat_unchecked(&fscache_n_op_pend);
51218 } else if (!list_empty(&object->pending_ops)) {
51219 atomic_inc(&op->usage);
51220 list_add_tail(&op->pend_link, &object->pending_ops);
51221- fscache_stat(&fscache_n_op_pend);
51222+ fscache_stat_unchecked(&fscache_n_op_pend);
51223 fscache_start_operations(object);
51224 } else {
51225 ASSERTCMP(object->n_in_progress, ==, 0);
51226@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
51227 object->n_exclusive++; /* reads and writes must wait */
51228 atomic_inc(&op->usage);
51229 list_add_tail(&op->pend_link, &object->pending_ops);
51230- fscache_stat(&fscache_n_op_pend);
51231+ fscache_stat_unchecked(&fscache_n_op_pend);
51232 ret = 0;
51233 } else {
51234 /* If we're in any other state, there must have been an I/O
51235@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
51236 if (object->n_exclusive > 0) {
51237 atomic_inc(&op->usage);
51238 list_add_tail(&op->pend_link, &object->pending_ops);
51239- fscache_stat(&fscache_n_op_pend);
51240+ fscache_stat_unchecked(&fscache_n_op_pend);
51241 } else if (!list_empty(&object->pending_ops)) {
51242 atomic_inc(&op->usage);
51243 list_add_tail(&op->pend_link, &object->pending_ops);
51244- fscache_stat(&fscache_n_op_pend);
51245+ fscache_stat_unchecked(&fscache_n_op_pend);
51246 fscache_start_operations(object);
51247 } else {
51248 ASSERTCMP(object->n_exclusive, ==, 0);
51249@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
51250 object->n_ops++;
51251 atomic_inc(&op->usage);
51252 list_add_tail(&op->pend_link, &object->pending_ops);
51253- fscache_stat(&fscache_n_op_pend);
51254+ fscache_stat_unchecked(&fscache_n_op_pend);
51255 ret = 0;
51256 } else if (object->state == FSCACHE_OBJECT_DYING ||
51257 object->state == FSCACHE_OBJECT_LC_DYING ||
51258 object->state == FSCACHE_OBJECT_WITHDRAWING) {
51259- fscache_stat(&fscache_n_op_rejected);
51260+ fscache_stat_unchecked(&fscache_n_op_rejected);
51261 op->state = FSCACHE_OP_ST_CANCELLED;
51262 ret = -ENOBUFS;
51263 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
51264@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
51265 ret = -EBUSY;
51266 if (op->state == FSCACHE_OP_ST_PENDING) {
51267 ASSERT(!list_empty(&op->pend_link));
51268- fscache_stat(&fscache_n_op_cancelled);
51269+ fscache_stat_unchecked(&fscache_n_op_cancelled);
51270 list_del_init(&op->pend_link);
51271 if (do_cancel)
51272 do_cancel(op);
51273@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
51274 while (!list_empty(&object->pending_ops)) {
51275 op = list_entry(object->pending_ops.next,
51276 struct fscache_operation, pend_link);
51277- fscache_stat(&fscache_n_op_cancelled);
51278+ fscache_stat_unchecked(&fscache_n_op_cancelled);
51279 list_del_init(&op->pend_link);
51280
51281 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
51282@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
51283 op->state, ==, FSCACHE_OP_ST_CANCELLED);
51284 op->state = FSCACHE_OP_ST_DEAD;
51285
51286- fscache_stat(&fscache_n_op_release);
51287+ fscache_stat_unchecked(&fscache_n_op_release);
51288
51289 if (op->release) {
51290 op->release(op);
51291@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
51292 * lock, and defer it otherwise */
51293 if (!spin_trylock(&object->lock)) {
51294 _debug("defer put");
51295- fscache_stat(&fscache_n_op_deferred_release);
51296+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
51297
51298 cache = object->cache;
51299 spin_lock(&cache->op_gc_list_lock);
51300@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
51301
51302 _debug("GC DEFERRED REL OBJ%x OP%x",
51303 object->debug_id, op->debug_id);
51304- fscache_stat(&fscache_n_op_gc);
51305+ fscache_stat_unchecked(&fscache_n_op_gc);
51306
51307 ASSERTCMP(atomic_read(&op->usage), ==, 0);
51308 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
51309diff --git a/fs/fscache/page.c b/fs/fscache/page.c
51310index ff000e5..c44ec6d 100644
51311--- a/fs/fscache/page.c
51312+++ b/fs/fscache/page.c
51313@@ -61,7 +61,7 @@ try_again:
51314 val = radix_tree_lookup(&cookie->stores, page->index);
51315 if (!val) {
51316 rcu_read_unlock();
51317- fscache_stat(&fscache_n_store_vmscan_not_storing);
51318+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
51319 __fscache_uncache_page(cookie, page);
51320 return true;
51321 }
51322@@ -91,11 +91,11 @@ try_again:
51323 spin_unlock(&cookie->stores_lock);
51324
51325 if (xpage) {
51326- fscache_stat(&fscache_n_store_vmscan_cancelled);
51327- fscache_stat(&fscache_n_store_radix_deletes);
51328+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
51329+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
51330 ASSERTCMP(xpage, ==, page);
51331 } else {
51332- fscache_stat(&fscache_n_store_vmscan_gone);
51333+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
51334 }
51335
51336 wake_up_bit(&cookie->flags, 0);
51337@@ -110,11 +110,11 @@ page_busy:
51338 * sleeping on memory allocation, so we may need to impose a timeout
51339 * too. */
51340 if (!(gfp & __GFP_WAIT)) {
51341- fscache_stat(&fscache_n_store_vmscan_busy);
51342+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
51343 return false;
51344 }
51345
51346- fscache_stat(&fscache_n_store_vmscan_wait);
51347+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
51348 __fscache_wait_on_page_write(cookie, page);
51349 gfp &= ~__GFP_WAIT;
51350 goto try_again;
51351@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
51352 FSCACHE_COOKIE_STORING_TAG);
51353 if (!radix_tree_tag_get(&cookie->stores, page->index,
51354 FSCACHE_COOKIE_PENDING_TAG)) {
51355- fscache_stat(&fscache_n_store_radix_deletes);
51356+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
51357 xpage = radix_tree_delete(&cookie->stores, page->index);
51358 }
51359 spin_unlock(&cookie->stores_lock);
51360@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
51361
51362 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
51363
51364- fscache_stat(&fscache_n_attr_changed_calls);
51365+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
51366
51367 if (fscache_object_is_active(object)) {
51368 fscache_stat(&fscache_n_cop_attr_changed);
51369@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
51370
51371 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
51372
51373- fscache_stat(&fscache_n_attr_changed);
51374+ fscache_stat_unchecked(&fscache_n_attr_changed);
51375
51376 op = kzalloc(sizeof(*op), GFP_KERNEL);
51377 if (!op) {
51378- fscache_stat(&fscache_n_attr_changed_nomem);
51379+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
51380 _leave(" = -ENOMEM");
51381 return -ENOMEM;
51382 }
51383@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
51384 if (fscache_submit_exclusive_op(object, op) < 0)
51385 goto nobufs;
51386 spin_unlock(&cookie->lock);
51387- fscache_stat(&fscache_n_attr_changed_ok);
51388+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
51389 fscache_put_operation(op);
51390 _leave(" = 0");
51391 return 0;
51392@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
51393 nobufs:
51394 spin_unlock(&cookie->lock);
51395 kfree(op);
51396- fscache_stat(&fscache_n_attr_changed_nobufs);
51397+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
51398 _leave(" = %d", -ENOBUFS);
51399 return -ENOBUFS;
51400 }
51401@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
51402 /* allocate a retrieval operation and attempt to submit it */
51403 op = kzalloc(sizeof(*op), GFP_NOIO);
51404 if (!op) {
51405- fscache_stat(&fscache_n_retrievals_nomem);
51406+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
51407 return NULL;
51408 }
51409
51410@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
51411 return 0;
51412 }
51413
51414- fscache_stat(&fscache_n_retrievals_wait);
51415+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
51416
51417 jif = jiffies;
51418 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
51419 fscache_wait_bit_interruptible,
51420 TASK_INTERRUPTIBLE) != 0) {
51421- fscache_stat(&fscache_n_retrievals_intr);
51422+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
51423 _leave(" = -ERESTARTSYS");
51424 return -ERESTARTSYS;
51425 }
51426@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
51427 */
51428 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
51429 struct fscache_retrieval *op,
51430- atomic_t *stat_op_waits,
51431- atomic_t *stat_object_dead)
51432+ atomic_unchecked_t *stat_op_waits,
51433+ atomic_unchecked_t *stat_object_dead)
51434 {
51435 int ret;
51436
51437@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
51438 goto check_if_dead;
51439
51440 _debug(">>> WT");
51441- fscache_stat(stat_op_waits);
51442+ fscache_stat_unchecked(stat_op_waits);
51443 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
51444 fscache_wait_bit_interruptible,
51445 TASK_INTERRUPTIBLE) != 0) {
51446@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
51447
51448 check_if_dead:
51449 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
51450- fscache_stat(stat_object_dead);
51451+ fscache_stat_unchecked(stat_object_dead);
51452 _leave(" = -ENOBUFS [cancelled]");
51453 return -ENOBUFS;
51454 }
51455 if (unlikely(fscache_object_is_dead(object))) {
51456 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
51457 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
51458- fscache_stat(stat_object_dead);
51459+ fscache_stat_unchecked(stat_object_dead);
51460 return -ENOBUFS;
51461 }
51462 return 0;
51463@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
51464
51465 _enter("%p,%p,,,", cookie, page);
51466
51467- fscache_stat(&fscache_n_retrievals);
51468+ fscache_stat_unchecked(&fscache_n_retrievals);
51469
51470 if (hlist_empty(&cookie->backing_objects))
51471 goto nobufs;
51472@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
51473 goto nobufs_unlock_dec;
51474 spin_unlock(&cookie->lock);
51475
51476- fscache_stat(&fscache_n_retrieval_ops);
51477+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
51478
51479 /* pin the netfs read context in case we need to do the actual netfs
51480 * read because we've encountered a cache read failure */
51481@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
51482
51483 error:
51484 if (ret == -ENOMEM)
51485- fscache_stat(&fscache_n_retrievals_nomem);
51486+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
51487 else if (ret == -ERESTARTSYS)
51488- fscache_stat(&fscache_n_retrievals_intr);
51489+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
51490 else if (ret == -ENODATA)
51491- fscache_stat(&fscache_n_retrievals_nodata);
51492+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
51493 else if (ret < 0)
51494- fscache_stat(&fscache_n_retrievals_nobufs);
51495+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51496 else
51497- fscache_stat(&fscache_n_retrievals_ok);
51498+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
51499
51500 fscache_put_retrieval(op);
51501 _leave(" = %d", ret);
51502@@ -467,7 +467,7 @@ nobufs_unlock:
51503 spin_unlock(&cookie->lock);
51504 kfree(op);
51505 nobufs:
51506- fscache_stat(&fscache_n_retrievals_nobufs);
51507+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51508 _leave(" = -ENOBUFS");
51509 return -ENOBUFS;
51510 }
51511@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
51512
51513 _enter("%p,,%d,,,", cookie, *nr_pages);
51514
51515- fscache_stat(&fscache_n_retrievals);
51516+ fscache_stat_unchecked(&fscache_n_retrievals);
51517
51518 if (hlist_empty(&cookie->backing_objects))
51519 goto nobufs;
51520@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
51521 goto nobufs_unlock_dec;
51522 spin_unlock(&cookie->lock);
51523
51524- fscache_stat(&fscache_n_retrieval_ops);
51525+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
51526
51527 /* pin the netfs read context in case we need to do the actual netfs
51528 * read because we've encountered a cache read failure */
51529@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
51530
51531 error:
51532 if (ret == -ENOMEM)
51533- fscache_stat(&fscache_n_retrievals_nomem);
51534+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
51535 else if (ret == -ERESTARTSYS)
51536- fscache_stat(&fscache_n_retrievals_intr);
51537+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
51538 else if (ret == -ENODATA)
51539- fscache_stat(&fscache_n_retrievals_nodata);
51540+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
51541 else if (ret < 0)
51542- fscache_stat(&fscache_n_retrievals_nobufs);
51543+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51544 else
51545- fscache_stat(&fscache_n_retrievals_ok);
51546+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
51547
51548 fscache_put_retrieval(op);
51549 _leave(" = %d", ret);
51550@@ -591,7 +591,7 @@ nobufs_unlock:
51551 spin_unlock(&cookie->lock);
51552 kfree(op);
51553 nobufs:
51554- fscache_stat(&fscache_n_retrievals_nobufs);
51555+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51556 _leave(" = -ENOBUFS");
51557 return -ENOBUFS;
51558 }
51559@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
51560
51561 _enter("%p,%p,,,", cookie, page);
51562
51563- fscache_stat(&fscache_n_allocs);
51564+ fscache_stat_unchecked(&fscache_n_allocs);
51565
51566 if (hlist_empty(&cookie->backing_objects))
51567 goto nobufs;
51568@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
51569 goto nobufs_unlock;
51570 spin_unlock(&cookie->lock);
51571
51572- fscache_stat(&fscache_n_alloc_ops);
51573+ fscache_stat_unchecked(&fscache_n_alloc_ops);
51574
51575 ret = fscache_wait_for_retrieval_activation(
51576 object, op,
51577@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
51578
51579 error:
51580 if (ret == -ERESTARTSYS)
51581- fscache_stat(&fscache_n_allocs_intr);
51582+ fscache_stat_unchecked(&fscache_n_allocs_intr);
51583 else if (ret < 0)
51584- fscache_stat(&fscache_n_allocs_nobufs);
51585+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
51586 else
51587- fscache_stat(&fscache_n_allocs_ok);
51588+ fscache_stat_unchecked(&fscache_n_allocs_ok);
51589
51590 fscache_put_retrieval(op);
51591 _leave(" = %d", ret);
51592@@ -677,7 +677,7 @@ nobufs_unlock:
51593 spin_unlock(&cookie->lock);
51594 kfree(op);
51595 nobufs:
51596- fscache_stat(&fscache_n_allocs_nobufs);
51597+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
51598 _leave(" = -ENOBUFS");
51599 return -ENOBUFS;
51600 }
51601@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
51602
51603 spin_lock(&cookie->stores_lock);
51604
51605- fscache_stat(&fscache_n_store_calls);
51606+ fscache_stat_unchecked(&fscache_n_store_calls);
51607
51608 /* find a page to store */
51609 page = NULL;
51610@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
51611 page = results[0];
51612 _debug("gang %d [%lx]", n, page->index);
51613 if (page->index > op->store_limit) {
51614- fscache_stat(&fscache_n_store_pages_over_limit);
51615+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
51616 goto superseded;
51617 }
51618
51619@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
51620 spin_unlock(&cookie->stores_lock);
51621 spin_unlock(&object->lock);
51622
51623- fscache_stat(&fscache_n_store_pages);
51624+ fscache_stat_unchecked(&fscache_n_store_pages);
51625 fscache_stat(&fscache_n_cop_write_page);
51626 ret = object->cache->ops->write_page(op, page);
51627 fscache_stat_d(&fscache_n_cop_write_page);
51628@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51629 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
51630 ASSERT(PageFsCache(page));
51631
51632- fscache_stat(&fscache_n_stores);
51633+ fscache_stat_unchecked(&fscache_n_stores);
51634
51635 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
51636 _leave(" = -ENOBUFS [invalidating]");
51637@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51638 spin_unlock(&cookie->stores_lock);
51639 spin_unlock(&object->lock);
51640
51641- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
51642+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
51643 op->store_limit = object->store_limit;
51644
51645 if (fscache_submit_op(object, &op->op) < 0)
51646@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51647
51648 spin_unlock(&cookie->lock);
51649 radix_tree_preload_end();
51650- fscache_stat(&fscache_n_store_ops);
51651- fscache_stat(&fscache_n_stores_ok);
51652+ fscache_stat_unchecked(&fscache_n_store_ops);
51653+ fscache_stat_unchecked(&fscache_n_stores_ok);
51654
51655 /* the work queue now carries its own ref on the object */
51656 fscache_put_operation(&op->op);
51657@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51658 return 0;
51659
51660 already_queued:
51661- fscache_stat(&fscache_n_stores_again);
51662+ fscache_stat_unchecked(&fscache_n_stores_again);
51663 already_pending:
51664 spin_unlock(&cookie->stores_lock);
51665 spin_unlock(&object->lock);
51666 spin_unlock(&cookie->lock);
51667 radix_tree_preload_end();
51668 kfree(op);
51669- fscache_stat(&fscache_n_stores_ok);
51670+ fscache_stat_unchecked(&fscache_n_stores_ok);
51671 _leave(" = 0");
51672 return 0;
51673
51674@@ -959,14 +959,14 @@ nobufs:
51675 spin_unlock(&cookie->lock);
51676 radix_tree_preload_end();
51677 kfree(op);
51678- fscache_stat(&fscache_n_stores_nobufs);
51679+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
51680 _leave(" = -ENOBUFS");
51681 return -ENOBUFS;
51682
51683 nomem_free:
51684 kfree(op);
51685 nomem:
51686- fscache_stat(&fscache_n_stores_oom);
51687+ fscache_stat_unchecked(&fscache_n_stores_oom);
51688 _leave(" = -ENOMEM");
51689 return -ENOMEM;
51690 }
51691@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
51692 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
51693 ASSERTCMP(page, !=, NULL);
51694
51695- fscache_stat(&fscache_n_uncaches);
51696+ fscache_stat_unchecked(&fscache_n_uncaches);
51697
51698 /* cache withdrawal may beat us to it */
51699 if (!PageFsCache(page))
51700@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
51701 struct fscache_cookie *cookie = op->op.object->cookie;
51702
51703 #ifdef CONFIG_FSCACHE_STATS
51704- atomic_inc(&fscache_n_marks);
51705+ atomic_inc_unchecked(&fscache_n_marks);
51706 #endif
51707
51708 _debug("- mark %p{%lx}", page, page->index);
51709diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
51710index 8179e8b..5072cc7 100644
51711--- a/fs/fscache/stats.c
51712+++ b/fs/fscache/stats.c
51713@@ -18,99 +18,99 @@
51714 /*
51715 * operation counters
51716 */
51717-atomic_t fscache_n_op_pend;
51718-atomic_t fscache_n_op_run;
51719-atomic_t fscache_n_op_enqueue;
51720-atomic_t fscache_n_op_requeue;
51721-atomic_t fscache_n_op_deferred_release;
51722-atomic_t fscache_n_op_release;
51723-atomic_t fscache_n_op_gc;
51724-atomic_t fscache_n_op_cancelled;
51725-atomic_t fscache_n_op_rejected;
51726+atomic_unchecked_t fscache_n_op_pend;
51727+atomic_unchecked_t fscache_n_op_run;
51728+atomic_unchecked_t fscache_n_op_enqueue;
51729+atomic_unchecked_t fscache_n_op_requeue;
51730+atomic_unchecked_t fscache_n_op_deferred_release;
51731+atomic_unchecked_t fscache_n_op_release;
51732+atomic_unchecked_t fscache_n_op_gc;
51733+atomic_unchecked_t fscache_n_op_cancelled;
51734+atomic_unchecked_t fscache_n_op_rejected;
51735
51736-atomic_t fscache_n_attr_changed;
51737-atomic_t fscache_n_attr_changed_ok;
51738-atomic_t fscache_n_attr_changed_nobufs;
51739-atomic_t fscache_n_attr_changed_nomem;
51740-atomic_t fscache_n_attr_changed_calls;
51741+atomic_unchecked_t fscache_n_attr_changed;
51742+atomic_unchecked_t fscache_n_attr_changed_ok;
51743+atomic_unchecked_t fscache_n_attr_changed_nobufs;
51744+atomic_unchecked_t fscache_n_attr_changed_nomem;
51745+atomic_unchecked_t fscache_n_attr_changed_calls;
51746
51747-atomic_t fscache_n_allocs;
51748-atomic_t fscache_n_allocs_ok;
51749-atomic_t fscache_n_allocs_wait;
51750-atomic_t fscache_n_allocs_nobufs;
51751-atomic_t fscache_n_allocs_intr;
51752-atomic_t fscache_n_allocs_object_dead;
51753-atomic_t fscache_n_alloc_ops;
51754-atomic_t fscache_n_alloc_op_waits;
51755+atomic_unchecked_t fscache_n_allocs;
51756+atomic_unchecked_t fscache_n_allocs_ok;
51757+atomic_unchecked_t fscache_n_allocs_wait;
51758+atomic_unchecked_t fscache_n_allocs_nobufs;
51759+atomic_unchecked_t fscache_n_allocs_intr;
51760+atomic_unchecked_t fscache_n_allocs_object_dead;
51761+atomic_unchecked_t fscache_n_alloc_ops;
51762+atomic_unchecked_t fscache_n_alloc_op_waits;
51763
51764-atomic_t fscache_n_retrievals;
51765-atomic_t fscache_n_retrievals_ok;
51766-atomic_t fscache_n_retrievals_wait;
51767-atomic_t fscache_n_retrievals_nodata;
51768-atomic_t fscache_n_retrievals_nobufs;
51769-atomic_t fscache_n_retrievals_intr;
51770-atomic_t fscache_n_retrievals_nomem;
51771-atomic_t fscache_n_retrievals_object_dead;
51772-atomic_t fscache_n_retrieval_ops;
51773-atomic_t fscache_n_retrieval_op_waits;
51774+atomic_unchecked_t fscache_n_retrievals;
51775+atomic_unchecked_t fscache_n_retrievals_ok;
51776+atomic_unchecked_t fscache_n_retrievals_wait;
51777+atomic_unchecked_t fscache_n_retrievals_nodata;
51778+atomic_unchecked_t fscache_n_retrievals_nobufs;
51779+atomic_unchecked_t fscache_n_retrievals_intr;
51780+atomic_unchecked_t fscache_n_retrievals_nomem;
51781+atomic_unchecked_t fscache_n_retrievals_object_dead;
51782+atomic_unchecked_t fscache_n_retrieval_ops;
51783+atomic_unchecked_t fscache_n_retrieval_op_waits;
51784
51785-atomic_t fscache_n_stores;
51786-atomic_t fscache_n_stores_ok;
51787-atomic_t fscache_n_stores_again;
51788-atomic_t fscache_n_stores_nobufs;
51789-atomic_t fscache_n_stores_oom;
51790-atomic_t fscache_n_store_ops;
51791-atomic_t fscache_n_store_calls;
51792-atomic_t fscache_n_store_pages;
51793-atomic_t fscache_n_store_radix_deletes;
51794-atomic_t fscache_n_store_pages_over_limit;
51795+atomic_unchecked_t fscache_n_stores;
51796+atomic_unchecked_t fscache_n_stores_ok;
51797+atomic_unchecked_t fscache_n_stores_again;
51798+atomic_unchecked_t fscache_n_stores_nobufs;
51799+atomic_unchecked_t fscache_n_stores_oom;
51800+atomic_unchecked_t fscache_n_store_ops;
51801+atomic_unchecked_t fscache_n_store_calls;
51802+atomic_unchecked_t fscache_n_store_pages;
51803+atomic_unchecked_t fscache_n_store_radix_deletes;
51804+atomic_unchecked_t fscache_n_store_pages_over_limit;
51805
51806-atomic_t fscache_n_store_vmscan_not_storing;
51807-atomic_t fscache_n_store_vmscan_gone;
51808-atomic_t fscache_n_store_vmscan_busy;
51809-atomic_t fscache_n_store_vmscan_cancelled;
51810-atomic_t fscache_n_store_vmscan_wait;
51811+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
51812+atomic_unchecked_t fscache_n_store_vmscan_gone;
51813+atomic_unchecked_t fscache_n_store_vmscan_busy;
51814+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
51815+atomic_unchecked_t fscache_n_store_vmscan_wait;
51816
51817-atomic_t fscache_n_marks;
51818-atomic_t fscache_n_uncaches;
51819+atomic_unchecked_t fscache_n_marks;
51820+atomic_unchecked_t fscache_n_uncaches;
51821
51822-atomic_t fscache_n_acquires;
51823-atomic_t fscache_n_acquires_null;
51824-atomic_t fscache_n_acquires_no_cache;
51825-atomic_t fscache_n_acquires_ok;
51826-atomic_t fscache_n_acquires_nobufs;
51827-atomic_t fscache_n_acquires_oom;
51828+atomic_unchecked_t fscache_n_acquires;
51829+atomic_unchecked_t fscache_n_acquires_null;
51830+atomic_unchecked_t fscache_n_acquires_no_cache;
51831+atomic_unchecked_t fscache_n_acquires_ok;
51832+atomic_unchecked_t fscache_n_acquires_nobufs;
51833+atomic_unchecked_t fscache_n_acquires_oom;
51834
51835-atomic_t fscache_n_invalidates;
51836-atomic_t fscache_n_invalidates_run;
51837+atomic_unchecked_t fscache_n_invalidates;
51838+atomic_unchecked_t fscache_n_invalidates_run;
51839
51840-atomic_t fscache_n_updates;
51841-atomic_t fscache_n_updates_null;
51842-atomic_t fscache_n_updates_run;
51843+atomic_unchecked_t fscache_n_updates;
51844+atomic_unchecked_t fscache_n_updates_null;
51845+atomic_unchecked_t fscache_n_updates_run;
51846
51847-atomic_t fscache_n_relinquishes;
51848-atomic_t fscache_n_relinquishes_null;
51849-atomic_t fscache_n_relinquishes_waitcrt;
51850-atomic_t fscache_n_relinquishes_retire;
51851+atomic_unchecked_t fscache_n_relinquishes;
51852+atomic_unchecked_t fscache_n_relinquishes_null;
51853+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
51854+atomic_unchecked_t fscache_n_relinquishes_retire;
51855
51856-atomic_t fscache_n_cookie_index;
51857-atomic_t fscache_n_cookie_data;
51858-atomic_t fscache_n_cookie_special;
51859+atomic_unchecked_t fscache_n_cookie_index;
51860+atomic_unchecked_t fscache_n_cookie_data;
51861+atomic_unchecked_t fscache_n_cookie_special;
51862
51863-atomic_t fscache_n_object_alloc;
51864-atomic_t fscache_n_object_no_alloc;
51865-atomic_t fscache_n_object_lookups;
51866-atomic_t fscache_n_object_lookups_negative;
51867-atomic_t fscache_n_object_lookups_positive;
51868-atomic_t fscache_n_object_lookups_timed_out;
51869-atomic_t fscache_n_object_created;
51870-atomic_t fscache_n_object_avail;
51871-atomic_t fscache_n_object_dead;
51872+atomic_unchecked_t fscache_n_object_alloc;
51873+atomic_unchecked_t fscache_n_object_no_alloc;
51874+atomic_unchecked_t fscache_n_object_lookups;
51875+atomic_unchecked_t fscache_n_object_lookups_negative;
51876+atomic_unchecked_t fscache_n_object_lookups_positive;
51877+atomic_unchecked_t fscache_n_object_lookups_timed_out;
51878+atomic_unchecked_t fscache_n_object_created;
51879+atomic_unchecked_t fscache_n_object_avail;
51880+atomic_unchecked_t fscache_n_object_dead;
51881
51882-atomic_t fscache_n_checkaux_none;
51883-atomic_t fscache_n_checkaux_okay;
51884-atomic_t fscache_n_checkaux_update;
51885-atomic_t fscache_n_checkaux_obsolete;
51886+atomic_unchecked_t fscache_n_checkaux_none;
51887+atomic_unchecked_t fscache_n_checkaux_okay;
51888+atomic_unchecked_t fscache_n_checkaux_update;
51889+atomic_unchecked_t fscache_n_checkaux_obsolete;
51890
51891 atomic_t fscache_n_cop_alloc_object;
51892 atomic_t fscache_n_cop_lookup_object;
51893@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
51894 seq_puts(m, "FS-Cache statistics\n");
51895
51896 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
51897- atomic_read(&fscache_n_cookie_index),
51898- atomic_read(&fscache_n_cookie_data),
51899- atomic_read(&fscache_n_cookie_special));
51900+ atomic_read_unchecked(&fscache_n_cookie_index),
51901+ atomic_read_unchecked(&fscache_n_cookie_data),
51902+ atomic_read_unchecked(&fscache_n_cookie_special));
51903
51904 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
51905- atomic_read(&fscache_n_object_alloc),
51906- atomic_read(&fscache_n_object_no_alloc),
51907- atomic_read(&fscache_n_object_avail),
51908- atomic_read(&fscache_n_object_dead));
51909+ atomic_read_unchecked(&fscache_n_object_alloc),
51910+ atomic_read_unchecked(&fscache_n_object_no_alloc),
51911+ atomic_read_unchecked(&fscache_n_object_avail),
51912+ atomic_read_unchecked(&fscache_n_object_dead));
51913 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
51914- atomic_read(&fscache_n_checkaux_none),
51915- atomic_read(&fscache_n_checkaux_okay),
51916- atomic_read(&fscache_n_checkaux_update),
51917- atomic_read(&fscache_n_checkaux_obsolete));
51918+ atomic_read_unchecked(&fscache_n_checkaux_none),
51919+ atomic_read_unchecked(&fscache_n_checkaux_okay),
51920+ atomic_read_unchecked(&fscache_n_checkaux_update),
51921+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
51922
51923 seq_printf(m, "Pages : mrk=%u unc=%u\n",
51924- atomic_read(&fscache_n_marks),
51925- atomic_read(&fscache_n_uncaches));
51926+ atomic_read_unchecked(&fscache_n_marks),
51927+ atomic_read_unchecked(&fscache_n_uncaches));
51928
51929 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
51930 " oom=%u\n",
51931- atomic_read(&fscache_n_acquires),
51932- atomic_read(&fscache_n_acquires_null),
51933- atomic_read(&fscache_n_acquires_no_cache),
51934- atomic_read(&fscache_n_acquires_ok),
51935- atomic_read(&fscache_n_acquires_nobufs),
51936- atomic_read(&fscache_n_acquires_oom));
51937+ atomic_read_unchecked(&fscache_n_acquires),
51938+ atomic_read_unchecked(&fscache_n_acquires_null),
51939+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
51940+ atomic_read_unchecked(&fscache_n_acquires_ok),
51941+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
51942+ atomic_read_unchecked(&fscache_n_acquires_oom));
51943
51944 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
51945- atomic_read(&fscache_n_object_lookups),
51946- atomic_read(&fscache_n_object_lookups_negative),
51947- atomic_read(&fscache_n_object_lookups_positive),
51948- atomic_read(&fscache_n_object_created),
51949- atomic_read(&fscache_n_object_lookups_timed_out));
51950+ atomic_read_unchecked(&fscache_n_object_lookups),
51951+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
51952+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
51953+ atomic_read_unchecked(&fscache_n_object_created),
51954+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
51955
51956 seq_printf(m, "Invals : n=%u run=%u\n",
51957- atomic_read(&fscache_n_invalidates),
51958- atomic_read(&fscache_n_invalidates_run));
51959+ atomic_read_unchecked(&fscache_n_invalidates),
51960+ atomic_read_unchecked(&fscache_n_invalidates_run));
51961
51962 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
51963- atomic_read(&fscache_n_updates),
51964- atomic_read(&fscache_n_updates_null),
51965- atomic_read(&fscache_n_updates_run));
51966+ atomic_read_unchecked(&fscache_n_updates),
51967+ atomic_read_unchecked(&fscache_n_updates_null),
51968+ atomic_read_unchecked(&fscache_n_updates_run));
51969
51970 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
51971- atomic_read(&fscache_n_relinquishes),
51972- atomic_read(&fscache_n_relinquishes_null),
51973- atomic_read(&fscache_n_relinquishes_waitcrt),
51974- atomic_read(&fscache_n_relinquishes_retire));
51975+ atomic_read_unchecked(&fscache_n_relinquishes),
51976+ atomic_read_unchecked(&fscache_n_relinquishes_null),
51977+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
51978+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
51979
51980 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
51981- atomic_read(&fscache_n_attr_changed),
51982- atomic_read(&fscache_n_attr_changed_ok),
51983- atomic_read(&fscache_n_attr_changed_nobufs),
51984- atomic_read(&fscache_n_attr_changed_nomem),
51985- atomic_read(&fscache_n_attr_changed_calls));
51986+ atomic_read_unchecked(&fscache_n_attr_changed),
51987+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
51988+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
51989+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
51990+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
51991
51992 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
51993- atomic_read(&fscache_n_allocs),
51994- atomic_read(&fscache_n_allocs_ok),
51995- atomic_read(&fscache_n_allocs_wait),
51996- atomic_read(&fscache_n_allocs_nobufs),
51997- atomic_read(&fscache_n_allocs_intr));
51998+ atomic_read_unchecked(&fscache_n_allocs),
51999+ atomic_read_unchecked(&fscache_n_allocs_ok),
52000+ atomic_read_unchecked(&fscache_n_allocs_wait),
52001+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
52002+ atomic_read_unchecked(&fscache_n_allocs_intr));
52003 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
52004- atomic_read(&fscache_n_alloc_ops),
52005- atomic_read(&fscache_n_alloc_op_waits),
52006- atomic_read(&fscache_n_allocs_object_dead));
52007+ atomic_read_unchecked(&fscache_n_alloc_ops),
52008+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
52009+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
52010
52011 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
52012 " int=%u oom=%u\n",
52013- atomic_read(&fscache_n_retrievals),
52014- atomic_read(&fscache_n_retrievals_ok),
52015- atomic_read(&fscache_n_retrievals_wait),
52016- atomic_read(&fscache_n_retrievals_nodata),
52017- atomic_read(&fscache_n_retrievals_nobufs),
52018- atomic_read(&fscache_n_retrievals_intr),
52019- atomic_read(&fscache_n_retrievals_nomem));
52020+ atomic_read_unchecked(&fscache_n_retrievals),
52021+ atomic_read_unchecked(&fscache_n_retrievals_ok),
52022+ atomic_read_unchecked(&fscache_n_retrievals_wait),
52023+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
52024+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
52025+ atomic_read_unchecked(&fscache_n_retrievals_intr),
52026+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
52027 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
52028- atomic_read(&fscache_n_retrieval_ops),
52029- atomic_read(&fscache_n_retrieval_op_waits),
52030- atomic_read(&fscache_n_retrievals_object_dead));
52031+ atomic_read_unchecked(&fscache_n_retrieval_ops),
52032+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
52033+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
52034
52035 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
52036- atomic_read(&fscache_n_stores),
52037- atomic_read(&fscache_n_stores_ok),
52038- atomic_read(&fscache_n_stores_again),
52039- atomic_read(&fscache_n_stores_nobufs),
52040- atomic_read(&fscache_n_stores_oom));
52041+ atomic_read_unchecked(&fscache_n_stores),
52042+ atomic_read_unchecked(&fscache_n_stores_ok),
52043+ atomic_read_unchecked(&fscache_n_stores_again),
52044+ atomic_read_unchecked(&fscache_n_stores_nobufs),
52045+ atomic_read_unchecked(&fscache_n_stores_oom));
52046 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
52047- atomic_read(&fscache_n_store_ops),
52048- atomic_read(&fscache_n_store_calls),
52049- atomic_read(&fscache_n_store_pages),
52050- atomic_read(&fscache_n_store_radix_deletes),
52051- atomic_read(&fscache_n_store_pages_over_limit));
52052+ atomic_read_unchecked(&fscache_n_store_ops),
52053+ atomic_read_unchecked(&fscache_n_store_calls),
52054+ atomic_read_unchecked(&fscache_n_store_pages),
52055+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
52056+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
52057
52058 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
52059- atomic_read(&fscache_n_store_vmscan_not_storing),
52060- atomic_read(&fscache_n_store_vmscan_gone),
52061- atomic_read(&fscache_n_store_vmscan_busy),
52062- atomic_read(&fscache_n_store_vmscan_cancelled),
52063- atomic_read(&fscache_n_store_vmscan_wait));
52064+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
52065+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
52066+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
52067+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
52068+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
52069
52070 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
52071- atomic_read(&fscache_n_op_pend),
52072- atomic_read(&fscache_n_op_run),
52073- atomic_read(&fscache_n_op_enqueue),
52074- atomic_read(&fscache_n_op_cancelled),
52075- atomic_read(&fscache_n_op_rejected));
52076+ atomic_read_unchecked(&fscache_n_op_pend),
52077+ atomic_read_unchecked(&fscache_n_op_run),
52078+ atomic_read_unchecked(&fscache_n_op_enqueue),
52079+ atomic_read_unchecked(&fscache_n_op_cancelled),
52080+ atomic_read_unchecked(&fscache_n_op_rejected));
52081 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
52082- atomic_read(&fscache_n_op_deferred_release),
52083- atomic_read(&fscache_n_op_release),
52084- atomic_read(&fscache_n_op_gc));
52085+ atomic_read_unchecked(&fscache_n_op_deferred_release),
52086+ atomic_read_unchecked(&fscache_n_op_release),
52087+ atomic_read_unchecked(&fscache_n_op_gc));
52088
52089 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
52090 atomic_read(&fscache_n_cop_alloc_object),
52091diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
52092index e397b67..b0d8709 100644
52093--- a/fs/fuse/cuse.c
52094+++ b/fs/fuse/cuse.c
52095@@ -593,10 +593,12 @@ static int __init cuse_init(void)
52096 INIT_LIST_HEAD(&cuse_conntbl[i]);
52097
52098 /* inherit and extend fuse_dev_operations */
52099- cuse_channel_fops = fuse_dev_operations;
52100- cuse_channel_fops.owner = THIS_MODULE;
52101- cuse_channel_fops.open = cuse_channel_open;
52102- cuse_channel_fops.release = cuse_channel_release;
52103+ pax_open_kernel();
52104+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
52105+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
52106+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
52107+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
52108+ pax_close_kernel();
52109
52110 cuse_class = class_create(THIS_MODULE, "cuse");
52111 if (IS_ERR(cuse_class))
52112diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
52113index e83351a..41e3c9c 100644
52114--- a/fs/fuse/dev.c
52115+++ b/fs/fuse/dev.c
52116@@ -1236,7 +1236,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
52117 ret = 0;
52118 pipe_lock(pipe);
52119
52120- if (!pipe->readers) {
52121+ if (!atomic_read(&pipe->readers)) {
52122 send_sig(SIGPIPE, current, 0);
52123 if (!ret)
52124 ret = -EPIPE;
52125diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
52126index 315e1f8..91f890c 100644
52127--- a/fs/fuse/dir.c
52128+++ b/fs/fuse/dir.c
52129@@ -1233,7 +1233,7 @@ static char *read_link(struct dentry *dentry)
52130 return link;
52131 }
52132
52133-static void free_link(char *link)
52134+static void free_link(const char *link)
52135 {
52136 if (!IS_ERR(link))
52137 free_page((unsigned long) link);
52138diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
52139index 2b6f569..fcb4d1f 100644
52140--- a/fs/gfs2/inode.c
52141+++ b/fs/gfs2/inode.c
52142@@ -1499,7 +1499,7 @@ out:
52143
52144 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
52145 {
52146- char *s = nd_get_link(nd);
52147+ const char *s = nd_get_link(nd);
52148 if (!IS_ERR(s))
52149 kfree(s);
52150 }
52151diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
52152index 78bde32..767e906 100644
52153--- a/fs/hugetlbfs/inode.c
52154+++ b/fs/hugetlbfs/inode.c
52155@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
52156 struct mm_struct *mm = current->mm;
52157 struct vm_area_struct *vma;
52158 struct hstate *h = hstate_file(file);
52159+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
52160 struct vm_unmapped_area_info info;
52161
52162 if (len & ~huge_page_mask(h))
52163@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
52164 return addr;
52165 }
52166
52167+#ifdef CONFIG_PAX_RANDMMAP
52168+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
52169+#endif
52170+
52171 if (addr) {
52172 addr = ALIGN(addr, huge_page_size(h));
52173 vma = find_vma(mm, addr);
52174- if (TASK_SIZE - len >= addr &&
52175- (!vma || addr + len <= vma->vm_start))
52176+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
52177 return addr;
52178 }
52179
52180 info.flags = 0;
52181 info.length = len;
52182 info.low_limit = TASK_UNMAPPED_BASE;
52183+
52184+#ifdef CONFIG_PAX_RANDMMAP
52185+ if (mm->pax_flags & MF_PAX_RANDMMAP)
52186+ info.low_limit += mm->delta_mmap;
52187+#endif
52188+
52189 info.high_limit = TASK_SIZE;
52190 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
52191 info.align_offset = 0;
52192@@ -897,7 +907,7 @@ static struct file_system_type hugetlbfs_fs_type = {
52193 .kill_sb = kill_litter_super,
52194 };
52195
52196-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
52197+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
52198
52199 static int can_do_hugetlb_shm(void)
52200 {
52201diff --git a/fs/inode.c b/fs/inode.c
52202index 14084b7..29af1d9 100644
52203--- a/fs/inode.c
52204+++ b/fs/inode.c
52205@@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
52206
52207 #ifdef CONFIG_SMP
52208 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
52209- static atomic_t shared_last_ino;
52210- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
52211+ static atomic_unchecked_t shared_last_ino;
52212+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
52213
52214 res = next - LAST_INO_BATCH;
52215 }
52216diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
52217index 4a6cf28..d3a29d3 100644
52218--- a/fs/jffs2/erase.c
52219+++ b/fs/jffs2/erase.c
52220@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
52221 struct jffs2_unknown_node marker = {
52222 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
52223 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
52224- .totlen = cpu_to_je32(c->cleanmarker_size)
52225+ .totlen = cpu_to_je32(c->cleanmarker_size),
52226+ .hdr_crc = cpu_to_je32(0)
52227 };
52228
52229 jffs2_prealloc_raw_node_refs(c, jeb, 1);
52230diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
52231index a6597d6..41b30ec 100644
52232--- a/fs/jffs2/wbuf.c
52233+++ b/fs/jffs2/wbuf.c
52234@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
52235 {
52236 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
52237 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
52238- .totlen = constant_cpu_to_je32(8)
52239+ .totlen = constant_cpu_to_je32(8),
52240+ .hdr_crc = constant_cpu_to_je32(0)
52241 };
52242
52243 /*
52244diff --git a/fs/jfs/super.c b/fs/jfs/super.c
52245index 1a543be..a4e1363 100644
52246--- a/fs/jfs/super.c
52247+++ b/fs/jfs/super.c
52248@@ -225,7 +225,7 @@ static const match_table_t tokens = {
52249 static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
52250 int *flag)
52251 {
52252- void *nls_map = (void *)-1; /* -1: no change; NULL: none */
52253+ const void *nls_map = (const void *)-1; /* -1: no change; NULL: none */
52254 char *p;
52255 struct jfs_sb_info *sbi = JFS_SBI(sb);
52256
52257@@ -253,7 +253,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
52258 /* Don't do anything ;-) */
52259 break;
52260 case Opt_iocharset:
52261- if (nls_map && nls_map != (void *) -1)
52262+ if (nls_map && nls_map != (const void *) -1)
52263 unload_nls(nls_map);
52264 if (!strcmp(args[0].from, "none"))
52265 nls_map = NULL;
52266@@ -855,7 +855,7 @@ static int __init init_jfs_fs(void)
52267
52268 jfs_inode_cachep =
52269 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
52270- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
52271+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
52272 init_once);
52273 if (jfs_inode_cachep == NULL)
52274 return -ENOMEM;
52275diff --git a/fs/libfs.c b/fs/libfs.c
52276index 916da8c..1588998 100644
52277--- a/fs/libfs.c
52278+++ b/fs/libfs.c
52279@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
52280
52281 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
52282 struct dentry *next;
52283+ char d_name[sizeof(next->d_iname)];
52284+ const unsigned char *name;
52285+
52286 next = list_entry(p, struct dentry, d_u.d_child);
52287 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
52288 if (!simple_positive(next)) {
52289@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
52290
52291 spin_unlock(&next->d_lock);
52292 spin_unlock(&dentry->d_lock);
52293- if (filldir(dirent, next->d_name.name,
52294+ name = next->d_name.name;
52295+ if (name == next->d_iname) {
52296+ memcpy(d_name, name, next->d_name.len);
52297+ name = d_name;
52298+ }
52299+ if (filldir(dirent, name,
52300 next->d_name.len, filp->f_pos,
52301 next->d_inode->i_ino,
52302 dt_type(next->d_inode)) < 0)
52303diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
52304index 52e5120..808936e 100644
52305--- a/fs/lockd/clntproc.c
52306+++ b/fs/lockd/clntproc.c
52307@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
52308 /*
52309 * Cookie counter for NLM requests
52310 */
52311-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
52312+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
52313
52314 void nlmclnt_next_cookie(struct nlm_cookie *c)
52315 {
52316- u32 cookie = atomic_inc_return(&nlm_cookie);
52317+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
52318
52319 memcpy(c->data, &cookie, 4);
52320 c->len=4;
52321diff --git a/fs/locks.c b/fs/locks.c
52322index a94e331..060bce3 100644
52323--- a/fs/locks.c
52324+++ b/fs/locks.c
52325@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
52326 return;
52327
52328 if (filp->f_op && filp->f_op->flock) {
52329- struct file_lock fl = {
52330+ struct file_lock flock = {
52331 .fl_pid = current->tgid,
52332 .fl_file = filp,
52333 .fl_flags = FL_FLOCK,
52334 .fl_type = F_UNLCK,
52335 .fl_end = OFFSET_MAX,
52336 };
52337- filp->f_op->flock(filp, F_SETLKW, &fl);
52338- if (fl.fl_ops && fl.fl_ops->fl_release_private)
52339- fl.fl_ops->fl_release_private(&fl);
52340+ filp->f_op->flock(filp, F_SETLKW, &flock);
52341+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
52342+ flock.fl_ops->fl_release_private(&flock);
52343 }
52344
52345 lock_flocks();
52346diff --git a/fs/namei.c b/fs/namei.c
52347index 43a97ee..4e585fd 100644
52348--- a/fs/namei.c
52349+++ b/fs/namei.c
52350@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
52351 if (ret != -EACCES)
52352 return ret;
52353
52354+#ifdef CONFIG_GRKERNSEC
52355+ /* we'll block if we have to log due to a denied capability use */
52356+ if (mask & MAY_NOT_BLOCK)
52357+ return -ECHILD;
52358+#endif
52359+
52360 if (S_ISDIR(inode->i_mode)) {
52361 /* DACs are overridable for directories */
52362- if (inode_capable(inode, CAP_DAC_OVERRIDE))
52363- return 0;
52364 if (!(mask & MAY_WRITE))
52365- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
52366+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
52367+ inode_capable(inode, CAP_DAC_READ_SEARCH))
52368 return 0;
52369+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
52370+ return 0;
52371 return -EACCES;
52372 }
52373 /*
52374+ * Searching includes executable on directories, else just read.
52375+ */
52376+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
52377+ if (mask == MAY_READ)
52378+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
52379+ inode_capable(inode, CAP_DAC_READ_SEARCH))
52380+ return 0;
52381+
52382+ /*
52383 * Read/write DACs are always overridable.
52384 * Executable DACs are overridable when there is
52385 * at least one exec bit set.
52386@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
52387 if (inode_capable(inode, CAP_DAC_OVERRIDE))
52388 return 0;
52389
52390- /*
52391- * Searching includes executable on directories, else just read.
52392- */
52393- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
52394- if (mask == MAY_READ)
52395- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
52396- return 0;
52397-
52398 return -EACCES;
52399 }
52400
52401@@ -826,7 +834,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
52402 {
52403 struct dentry *dentry = link->dentry;
52404 int error;
52405- char *s;
52406+ const char *s;
52407
52408 BUG_ON(nd->flags & LOOKUP_RCU);
52409
52410@@ -847,6 +855,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
52411 if (error)
52412 goto out_put_nd_path;
52413
52414+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
52415+ dentry->d_inode, dentry, nd->path.mnt)) {
52416+ error = -EACCES;
52417+ goto out_put_nd_path;
52418+ }
52419+
52420 nd->last_type = LAST_BIND;
52421 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
52422 error = PTR_ERR(*p);
52423@@ -1596,6 +1610,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
52424 break;
52425 res = walk_component(nd, path, &nd->last,
52426 nd->last_type, LOOKUP_FOLLOW);
52427+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
52428+ res = -EACCES;
52429 put_link(nd, &link, cookie);
52430 } while (res > 0);
52431
52432@@ -1694,7 +1710,7 @@ EXPORT_SYMBOL(full_name_hash);
52433 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
52434 {
52435 unsigned long a, b, adata, bdata, mask, hash, len;
52436- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
52437+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
52438
52439 hash = a = 0;
52440 len = -sizeof(unsigned long);
52441@@ -1979,6 +1995,8 @@ static int path_lookupat(int dfd, const char *name,
52442 if (err)
52443 break;
52444 err = lookup_last(nd, &path);
52445+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
52446+ err = -EACCES;
52447 put_link(nd, &link, cookie);
52448 }
52449 }
52450@@ -1986,6 +2004,19 @@ static int path_lookupat(int dfd, const char *name,
52451 if (!err)
52452 err = complete_walk(nd);
52453
52454+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
52455+#ifdef CONFIG_GRKERNSEC
52456+ if (flags & LOOKUP_RCU) {
52457+ path_put(&nd->path);
52458+ err = -ECHILD;
52459+ } else
52460+#endif
52461+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
52462+ path_put(&nd->path);
52463+ err = -ENOENT;
52464+ }
52465+ }
52466+
52467 if (!err && nd->flags & LOOKUP_DIRECTORY) {
52468 if (!nd->inode->i_op->lookup) {
52469 path_put(&nd->path);
52470@@ -2013,8 +2044,17 @@ static int filename_lookup(int dfd, struct filename *name,
52471 retval = path_lookupat(dfd, name->name,
52472 flags | LOOKUP_REVAL, nd);
52473
52474- if (likely(!retval))
52475+ if (likely(!retval)) {
52476+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
52477+#ifdef CONFIG_GRKERNSEC
52478+ if (flags & LOOKUP_RCU)
52479+ return -ECHILD;
52480+#endif
52481+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
52482+ return -ENOENT;
52483+ }
52484 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
52485+ }
52486 return retval;
52487 }
52488
52489@@ -2392,6 +2432,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
52490 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
52491 return -EPERM;
52492
52493+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
52494+ return -EPERM;
52495+ if (gr_handle_rawio(inode))
52496+ return -EPERM;
52497+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
52498+ return -EACCES;
52499+
52500 return 0;
52501 }
52502
52503@@ -2613,7 +2660,7 @@ looked_up:
52504 * cleared otherwise prior to returning.
52505 */
52506 static int lookup_open(struct nameidata *nd, struct path *path,
52507- struct file *file,
52508+ struct path *link, struct file *file,
52509 const struct open_flags *op,
52510 bool got_write, int *opened)
52511 {
52512@@ -2648,6 +2695,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
52513 /* Negative dentry, just create the file */
52514 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
52515 umode_t mode = op->mode;
52516+
52517+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
52518+ error = -EACCES;
52519+ goto out_dput;
52520+ }
52521+
52522+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
52523+ error = -EACCES;
52524+ goto out_dput;
52525+ }
52526+
52527 if (!IS_POSIXACL(dir->d_inode))
52528 mode &= ~current_umask();
52529 /*
52530@@ -2669,6 +2727,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
52531 nd->flags & LOOKUP_EXCL);
52532 if (error)
52533 goto out_dput;
52534+ else
52535+ gr_handle_create(dentry, nd->path.mnt);
52536 }
52537 out_no_open:
52538 path->dentry = dentry;
52539@@ -2683,7 +2743,7 @@ out_dput:
52540 /*
52541 * Handle the last step of open()
52542 */
52543-static int do_last(struct nameidata *nd, struct path *path,
52544+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
52545 struct file *file, const struct open_flags *op,
52546 int *opened, struct filename *name)
52547 {
52548@@ -2712,16 +2772,44 @@ static int do_last(struct nameidata *nd, struct path *path,
52549 error = complete_walk(nd);
52550 if (error)
52551 return error;
52552+#ifdef CONFIG_GRKERNSEC
52553+ if (nd->flags & LOOKUP_RCU) {
52554+ error = -ECHILD;
52555+ goto out;
52556+ }
52557+#endif
52558+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
52559+ error = -ENOENT;
52560+ goto out;
52561+ }
52562 audit_inode(name, nd->path.dentry, 0);
52563 if (open_flag & O_CREAT) {
52564 error = -EISDIR;
52565 goto out;
52566 }
52567+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
52568+ error = -EACCES;
52569+ goto out;
52570+ }
52571 goto finish_open;
52572 case LAST_BIND:
52573 error = complete_walk(nd);
52574 if (error)
52575 return error;
52576+#ifdef CONFIG_GRKERNSEC
52577+ if (nd->flags & LOOKUP_RCU) {
52578+ error = -ECHILD;
52579+ goto out;
52580+ }
52581+#endif
52582+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
52583+ error = -ENOENT;
52584+ goto out;
52585+ }
52586+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
52587+ error = -EACCES;
52588+ goto out;
52589+ }
52590 audit_inode(name, dir, 0);
52591 goto finish_open;
52592 }
52593@@ -2770,7 +2858,7 @@ retry_lookup:
52594 */
52595 }
52596 mutex_lock(&dir->d_inode->i_mutex);
52597- error = lookup_open(nd, path, file, op, got_write, opened);
52598+ error = lookup_open(nd, path, link, file, op, got_write, opened);
52599 mutex_unlock(&dir->d_inode->i_mutex);
52600
52601 if (error <= 0) {
52602@@ -2794,11 +2882,28 @@ retry_lookup:
52603 goto finish_open_created;
52604 }
52605
52606+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
52607+ error = -ENOENT;
52608+ goto exit_dput;
52609+ }
52610+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
52611+ error = -EACCES;
52612+ goto exit_dput;
52613+ }
52614+
52615 /*
52616 * create/update audit record if it already exists.
52617 */
52618- if (path->dentry->d_inode)
52619+ if (path->dentry->d_inode) {
52620+ /* only check if O_CREAT is specified, all other checks need to go
52621+ into may_open */
52622+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
52623+ error = -EACCES;
52624+ goto exit_dput;
52625+ }
52626+
52627 audit_inode(name, path->dentry, 0);
52628+ }
52629
52630 /*
52631 * If atomic_open() acquired write access it is dropped now due to
52632@@ -2839,6 +2944,11 @@ finish_lookup:
52633 }
52634 }
52635 BUG_ON(inode != path->dentry->d_inode);
52636+ /* if we're resolving a symlink to another symlink */
52637+ if (link && gr_handle_symlink_owner(link, inode)) {
52638+ error = -EACCES;
52639+ goto out;
52640+ }
52641 return 1;
52642 }
52643
52644@@ -2848,7 +2958,6 @@ finish_lookup:
52645 save_parent.dentry = nd->path.dentry;
52646 save_parent.mnt = mntget(path->mnt);
52647 nd->path.dentry = path->dentry;
52648-
52649 }
52650 nd->inode = inode;
52651 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
52652@@ -2857,6 +2966,22 @@ finish_lookup:
52653 path_put(&save_parent);
52654 return error;
52655 }
52656+
52657+#ifdef CONFIG_GRKERNSEC
52658+ if (nd->flags & LOOKUP_RCU) {
52659+ error = -ECHILD;
52660+ goto out;
52661+ }
52662+#endif
52663+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
52664+ error = -ENOENT;
52665+ goto out;
52666+ }
52667+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
52668+ error = -EACCES;
52669+ goto out;
52670+ }
52671+
52672 error = -EISDIR;
52673 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
52674 goto out;
52675@@ -2955,7 +3080,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
52676 if (unlikely(error))
52677 goto out;
52678
52679- error = do_last(nd, &path, file, op, &opened, pathname);
52680+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
52681 while (unlikely(error > 0)) { /* trailing symlink */
52682 struct path link = path;
52683 void *cookie;
52684@@ -2973,7 +3098,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
52685 error = follow_link(&link, nd, &cookie);
52686 if (unlikely(error))
52687 break;
52688- error = do_last(nd, &path, file, op, &opened, pathname);
52689+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
52690 put_link(nd, &link, cookie);
52691 }
52692 out:
52693@@ -3073,8 +3198,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
52694 goto unlock;
52695
52696 error = -EEXIST;
52697- if (dentry->d_inode)
52698+ if (dentry->d_inode) {
52699+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
52700+ error = -ENOENT;
52701+ }
52702 goto fail;
52703+ }
52704 /*
52705 * Special case - lookup gave negative, but... we had foo/bar/
52706 * From the vfs_mknod() POV we just have a negative dentry -
52707@@ -3126,6 +3255,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
52708 }
52709 EXPORT_SYMBOL(user_path_create);
52710
52711+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
52712+{
52713+ struct filename *tmp = getname(pathname);
52714+ struct dentry *res;
52715+ if (IS_ERR(tmp))
52716+ return ERR_CAST(tmp);
52717+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
52718+ if (IS_ERR(res))
52719+ putname(tmp);
52720+ else
52721+ *to = tmp;
52722+ return res;
52723+}
52724+
52725 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
52726 {
52727 int error = may_create(dir, dentry);
52728@@ -3188,6 +3331,17 @@ retry:
52729
52730 if (!IS_POSIXACL(path.dentry->d_inode))
52731 mode &= ~current_umask();
52732+
52733+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
52734+ error = -EPERM;
52735+ goto out;
52736+ }
52737+
52738+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
52739+ error = -EACCES;
52740+ goto out;
52741+ }
52742+
52743 error = security_path_mknod(&path, dentry, mode, dev);
52744 if (error)
52745 goto out;
52746@@ -3204,6 +3358,8 @@ retry:
52747 break;
52748 }
52749 out:
52750+ if (!error)
52751+ gr_handle_create(dentry, path.mnt);
52752 done_path_create(&path, dentry);
52753 if (retry_estale(error, lookup_flags)) {
52754 lookup_flags |= LOOKUP_REVAL;
52755@@ -3256,9 +3412,16 @@ retry:
52756
52757 if (!IS_POSIXACL(path.dentry->d_inode))
52758 mode &= ~current_umask();
52759+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
52760+ error = -EACCES;
52761+ goto out;
52762+ }
52763 error = security_path_mkdir(&path, dentry, mode);
52764 if (!error)
52765 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
52766+ if (!error)
52767+ gr_handle_create(dentry, path.mnt);
52768+out:
52769 done_path_create(&path, dentry);
52770 if (retry_estale(error, lookup_flags)) {
52771 lookup_flags |= LOOKUP_REVAL;
52772@@ -3339,6 +3502,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
52773 struct filename *name;
52774 struct dentry *dentry;
52775 struct nameidata nd;
52776+ ino_t saved_ino = 0;
52777+ dev_t saved_dev = 0;
52778 unsigned int lookup_flags = 0;
52779 retry:
52780 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
52781@@ -3371,10 +3536,21 @@ retry:
52782 error = -ENOENT;
52783 goto exit3;
52784 }
52785+
52786+ saved_ino = dentry->d_inode->i_ino;
52787+ saved_dev = gr_get_dev_from_dentry(dentry);
52788+
52789+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
52790+ error = -EACCES;
52791+ goto exit3;
52792+ }
52793+
52794 error = security_path_rmdir(&nd.path, dentry);
52795 if (error)
52796 goto exit3;
52797 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
52798+ if (!error && (saved_dev || saved_ino))
52799+ gr_handle_delete(saved_ino, saved_dev);
52800 exit3:
52801 dput(dentry);
52802 exit2:
52803@@ -3440,6 +3616,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52804 struct dentry *dentry;
52805 struct nameidata nd;
52806 struct inode *inode = NULL;
52807+ ino_t saved_ino = 0;
52808+ dev_t saved_dev = 0;
52809 unsigned int lookup_flags = 0;
52810 retry:
52811 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
52812@@ -3466,10 +3644,22 @@ retry:
52813 if (!inode)
52814 goto slashes;
52815 ihold(inode);
52816+
52817+ if (inode->i_nlink <= 1) {
52818+ saved_ino = inode->i_ino;
52819+ saved_dev = gr_get_dev_from_dentry(dentry);
52820+ }
52821+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
52822+ error = -EACCES;
52823+ goto exit2;
52824+ }
52825+
52826 error = security_path_unlink(&nd.path, dentry);
52827 if (error)
52828 goto exit2;
52829 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
52830+ if (!error && (saved_ino || saved_dev))
52831+ gr_handle_delete(saved_ino, saved_dev);
52832 exit2:
52833 dput(dentry);
52834 }
52835@@ -3547,9 +3737,17 @@ retry:
52836 if (IS_ERR(dentry))
52837 goto out_putname;
52838
52839+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
52840+ error = -EACCES;
52841+ goto out;
52842+ }
52843+
52844 error = security_path_symlink(&path, dentry, from->name);
52845 if (!error)
52846 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
52847+ if (!error)
52848+ gr_handle_create(dentry, path.mnt);
52849+out:
52850 done_path_create(&path, dentry);
52851 if (retry_estale(error, lookup_flags)) {
52852 lookup_flags |= LOOKUP_REVAL;
52853@@ -3623,6 +3821,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52854 {
52855 struct dentry *new_dentry;
52856 struct path old_path, new_path;
52857+ struct filename *to = NULL;
52858 int how = 0;
52859 int error;
52860
52861@@ -3646,7 +3845,7 @@ retry:
52862 if (error)
52863 return error;
52864
52865- new_dentry = user_path_create(newdfd, newname, &new_path,
52866+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
52867 (how & LOOKUP_REVAL));
52868 error = PTR_ERR(new_dentry);
52869 if (IS_ERR(new_dentry))
52870@@ -3658,11 +3857,28 @@ retry:
52871 error = may_linkat(&old_path);
52872 if (unlikely(error))
52873 goto out_dput;
52874+
52875+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
52876+ old_path.dentry->d_inode,
52877+ old_path.dentry->d_inode->i_mode, to)) {
52878+ error = -EACCES;
52879+ goto out_dput;
52880+ }
52881+
52882+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
52883+ old_path.dentry, old_path.mnt, to)) {
52884+ error = -EACCES;
52885+ goto out_dput;
52886+ }
52887+
52888 error = security_path_link(old_path.dentry, &new_path, new_dentry);
52889 if (error)
52890 goto out_dput;
52891 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
52892+ if (!error)
52893+ gr_handle_create(new_dentry, new_path.mnt);
52894 out_dput:
52895+ putname(to);
52896 done_path_create(&new_path, new_dentry);
52897 if (retry_estale(error, how)) {
52898 how |= LOOKUP_REVAL;
52899@@ -3908,12 +4124,21 @@ retry:
52900 if (new_dentry == trap)
52901 goto exit5;
52902
52903+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
52904+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
52905+ to);
52906+ if (error)
52907+ goto exit5;
52908+
52909 error = security_path_rename(&oldnd.path, old_dentry,
52910 &newnd.path, new_dentry);
52911 if (error)
52912 goto exit5;
52913 error = vfs_rename(old_dir->d_inode, old_dentry,
52914 new_dir->d_inode, new_dentry);
52915+ if (!error)
52916+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
52917+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
52918 exit5:
52919 dput(new_dentry);
52920 exit4:
52921@@ -3945,6 +4170,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
52922
52923 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
52924 {
52925+ char tmpbuf[64];
52926+ const char *newlink;
52927 int len;
52928
52929 len = PTR_ERR(link);
52930@@ -3954,7 +4181,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
52931 len = strlen(link);
52932 if (len > (unsigned) buflen)
52933 len = buflen;
52934- if (copy_to_user(buffer, link, len))
52935+
52936+ if (len < sizeof(tmpbuf)) {
52937+ memcpy(tmpbuf, link, len);
52938+ newlink = tmpbuf;
52939+ } else
52940+ newlink = link;
52941+
52942+ if (copy_to_user(buffer, newlink, len))
52943 len = -EFAULT;
52944 out:
52945 return len;
52946diff --git a/fs/namespace.c b/fs/namespace.c
52947index a51054f..f9b53e5 100644
52948--- a/fs/namespace.c
52949+++ b/fs/namespace.c
52950@@ -1215,6 +1215,9 @@ static int do_umount(struct mount *mnt, int flags)
52951 if (!(sb->s_flags & MS_RDONLY))
52952 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
52953 up_write(&sb->s_umount);
52954+
52955+ gr_log_remount(mnt->mnt_devname, retval);
52956+
52957 return retval;
52958 }
52959
52960@@ -1234,6 +1237,9 @@ static int do_umount(struct mount *mnt, int flags)
52961 br_write_unlock(&vfsmount_lock);
52962 up_write(&namespace_sem);
52963 release_mounts(&umount_list);
52964+
52965+ gr_log_unmount(mnt->mnt_devname, retval);
52966+
52967 return retval;
52968 }
52969
52970@@ -2287,6 +2293,16 @@ long do_mount(const char *dev_name, const char *dir_name,
52971 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
52972 MS_STRICTATIME);
52973
52974+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
52975+ retval = -EPERM;
52976+ goto dput_out;
52977+ }
52978+
52979+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
52980+ retval = -EPERM;
52981+ goto dput_out;
52982+ }
52983+
52984 if (flags & MS_REMOUNT)
52985 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
52986 data_page);
52987@@ -2301,6 +2317,9 @@ long do_mount(const char *dev_name, const char *dir_name,
52988 dev_name, data_page);
52989 dput_out:
52990 path_put(&path);
52991+
52992+ gr_log_mount(dev_name, dir_name, retval);
52993+
52994 return retval;
52995 }
52996
52997@@ -2587,6 +2606,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
52998 if (error)
52999 goto out2;
53000
53001+ if (gr_handle_chroot_pivot()) {
53002+ error = -EPERM;
53003+ goto out2;
53004+ }
53005+
53006 get_fs_root(current->fs, &root);
53007 error = lock_mount(&old);
53008 if (error)
53009@@ -2790,7 +2814,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
53010 !nsown_capable(CAP_SYS_ADMIN))
53011 return -EPERM;
53012
53013- if (fs->users != 1)
53014+ if (atomic_read(&fs->users) != 1)
53015 return -EINVAL;
53016
53017 get_mnt_ns(mnt_ns);
53018diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
53019index 59461c9..b17c57e 100644
53020--- a/fs/nfs/callback_xdr.c
53021+++ b/fs/nfs/callback_xdr.c
53022@@ -51,7 +51,7 @@ struct callback_op {
53023 callback_decode_arg_t decode_args;
53024 callback_encode_res_t encode_res;
53025 long res_maxsize;
53026-};
53027+} __do_const;
53028
53029 static struct callback_op callback_ops[];
53030
53031diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
53032index ebeb94c..ff35337 100644
53033--- a/fs/nfs/inode.c
53034+++ b/fs/nfs/inode.c
53035@@ -1042,16 +1042,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
53036 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
53037 }
53038
53039-static atomic_long_t nfs_attr_generation_counter;
53040+static atomic_long_unchecked_t nfs_attr_generation_counter;
53041
53042 static unsigned long nfs_read_attr_generation_counter(void)
53043 {
53044- return atomic_long_read(&nfs_attr_generation_counter);
53045+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
53046 }
53047
53048 unsigned long nfs_inc_attr_generation_counter(void)
53049 {
53050- return atomic_long_inc_return(&nfs_attr_generation_counter);
53051+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
53052 }
53053
53054 void nfs_fattr_init(struct nfs_fattr *fattr)
53055diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
53056index 9d1c5db..1e13db8 100644
53057--- a/fs/nfsd/nfs4proc.c
53058+++ b/fs/nfsd/nfs4proc.c
53059@@ -1097,7 +1097,7 @@ struct nfsd4_operation {
53060 nfsd4op_rsize op_rsize_bop;
53061 stateid_getter op_get_currentstateid;
53062 stateid_setter op_set_currentstateid;
53063-};
53064+} __do_const;
53065
53066 static struct nfsd4_operation nfsd4_ops[];
53067
53068diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
53069index 0dc1158..ccf0338 100644
53070--- a/fs/nfsd/nfs4xdr.c
53071+++ b/fs/nfsd/nfs4xdr.c
53072@@ -1456,7 +1456,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
53073
53074 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
53075
53076-static nfsd4_dec nfsd4_dec_ops[] = {
53077+static const nfsd4_dec nfsd4_dec_ops[] = {
53078 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
53079 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
53080 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
53081@@ -1496,7 +1496,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
53082 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
53083 };
53084
53085-static nfsd4_dec nfsd41_dec_ops[] = {
53086+static const nfsd4_dec nfsd41_dec_ops[] = {
53087 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
53088 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
53089 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
53090@@ -1558,7 +1558,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
53091 };
53092
53093 struct nfsd4_minorversion_ops {
53094- nfsd4_dec *decoders;
53095+ const nfsd4_dec *decoders;
53096 int nops;
53097 };
53098
53099diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
53100index d586117..143d568 100644
53101--- a/fs/nfsd/vfs.c
53102+++ b/fs/nfsd/vfs.c
53103@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
53104 } else {
53105 oldfs = get_fs();
53106 set_fs(KERNEL_DS);
53107- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
53108+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
53109 set_fs(oldfs);
53110 }
53111
53112@@ -1025,7 +1025,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
53113
53114 /* Write the data. */
53115 oldfs = get_fs(); set_fs(KERNEL_DS);
53116- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
53117+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
53118 set_fs(oldfs);
53119 if (host_err < 0)
53120 goto out_nfserr;
53121@@ -1571,7 +1571,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
53122 */
53123
53124 oldfs = get_fs(); set_fs(KERNEL_DS);
53125- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
53126+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
53127 set_fs(oldfs);
53128
53129 if (host_err < 0)
53130diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
53131index fea6bd5..8ee9d81 100644
53132--- a/fs/nls/nls_base.c
53133+++ b/fs/nls/nls_base.c
53134@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
53135
53136 int register_nls(struct nls_table * nls)
53137 {
53138- struct nls_table ** tmp = &tables;
53139+ struct nls_table *tmp = tables;
53140
53141 if (nls->next)
53142 return -EBUSY;
53143
53144 spin_lock(&nls_lock);
53145- while (*tmp) {
53146- if (nls == *tmp) {
53147+ while (tmp) {
53148+ if (nls == tmp) {
53149 spin_unlock(&nls_lock);
53150 return -EBUSY;
53151 }
53152- tmp = &(*tmp)->next;
53153+ tmp = tmp->next;
53154 }
53155- nls->next = tables;
53156+ pax_open_kernel();
53157+ *(struct nls_table **)&nls->next = tables;
53158+ pax_close_kernel();
53159 tables = nls;
53160 spin_unlock(&nls_lock);
53161 return 0;
53162@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
53163
53164 int unregister_nls(struct nls_table * nls)
53165 {
53166- struct nls_table ** tmp = &tables;
53167+ struct nls_table * const * tmp = &tables;
53168
53169 spin_lock(&nls_lock);
53170 while (*tmp) {
53171 if (nls == *tmp) {
53172- *tmp = nls->next;
53173+ pax_open_kernel();
53174+ *(struct nls_table **)tmp = nls->next;
53175+ pax_close_kernel();
53176 spin_unlock(&nls_lock);
53177 return 0;
53178 }
53179diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
53180index 7424929..35f6be5 100644
53181--- a/fs/nls/nls_euc-jp.c
53182+++ b/fs/nls/nls_euc-jp.c
53183@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
53184 p_nls = load_nls("cp932");
53185
53186 if (p_nls) {
53187- table.charset2upper = p_nls->charset2upper;
53188- table.charset2lower = p_nls->charset2lower;
53189+ pax_open_kernel();
53190+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
53191+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
53192+ pax_close_kernel();
53193 return register_nls(&table);
53194 }
53195
53196diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
53197index e7bc1d7..06bd4bb 100644
53198--- a/fs/nls/nls_koi8-ru.c
53199+++ b/fs/nls/nls_koi8-ru.c
53200@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
53201 p_nls = load_nls("koi8-u");
53202
53203 if (p_nls) {
53204- table.charset2upper = p_nls->charset2upper;
53205- table.charset2lower = p_nls->charset2lower;
53206+ pax_open_kernel();
53207+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
53208+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
53209+ pax_close_kernel();
53210 return register_nls(&table);
53211 }
53212
53213diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
53214index 9ff4a5e..deb1f0f 100644
53215--- a/fs/notify/fanotify/fanotify_user.c
53216+++ b/fs/notify/fanotify/fanotify_user.c
53217@@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
53218
53219 fd = fanotify_event_metadata.fd;
53220 ret = -EFAULT;
53221- if (copy_to_user(buf, &fanotify_event_metadata,
53222- fanotify_event_metadata.event_len))
53223+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
53224+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
53225 goto out_close_fd;
53226
53227 ret = prepare_for_access_response(group, event, fd);
53228diff --git a/fs/notify/notification.c b/fs/notify/notification.c
53229index 7b51b05..5ea5ef6 100644
53230--- a/fs/notify/notification.c
53231+++ b/fs/notify/notification.c
53232@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
53233 * get set to 0 so it will never get 'freed'
53234 */
53235 static struct fsnotify_event *q_overflow_event;
53236-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53237+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53238
53239 /**
53240 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
53241@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53242 */
53243 u32 fsnotify_get_cookie(void)
53244 {
53245- return atomic_inc_return(&fsnotify_sync_cookie);
53246+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
53247 }
53248 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
53249
53250diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
53251index 99e3610..02c1068 100644
53252--- a/fs/ntfs/dir.c
53253+++ b/fs/ntfs/dir.c
53254@@ -1329,7 +1329,7 @@ find_next_index_buffer:
53255 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
53256 ~(s64)(ndir->itype.index.block_size - 1)));
53257 /* Bounds checks. */
53258- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
53259+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
53260 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
53261 "inode 0x%lx or driver bug.", vdir->i_ino);
53262 goto err_out;
53263diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
53264index 5b2d4f0..c6de396 100644
53265--- a/fs/ntfs/file.c
53266+++ b/fs/ntfs/file.c
53267@@ -2242,6 +2242,6 @@ const struct inode_operations ntfs_file_inode_ops = {
53268 #endif /* NTFS_RW */
53269 };
53270
53271-const struct file_operations ntfs_empty_file_ops = {};
53272+const struct file_operations ntfs_empty_file_ops __read_only;
53273
53274-const struct inode_operations ntfs_empty_inode_ops = {};
53275+const struct inode_operations ntfs_empty_inode_ops __read_only;
53276diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
53277index a9f78c7..ed8a381 100644
53278--- a/fs/ocfs2/localalloc.c
53279+++ b/fs/ocfs2/localalloc.c
53280@@ -1279,7 +1279,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
53281 goto bail;
53282 }
53283
53284- atomic_inc(&osb->alloc_stats.moves);
53285+ atomic_inc_unchecked(&osb->alloc_stats.moves);
53286
53287 bail:
53288 if (handle)
53289diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
53290index d355e6e..578d905 100644
53291--- a/fs/ocfs2/ocfs2.h
53292+++ b/fs/ocfs2/ocfs2.h
53293@@ -235,11 +235,11 @@ enum ocfs2_vol_state
53294
53295 struct ocfs2_alloc_stats
53296 {
53297- atomic_t moves;
53298- atomic_t local_data;
53299- atomic_t bitmap_data;
53300- atomic_t bg_allocs;
53301- atomic_t bg_extends;
53302+ atomic_unchecked_t moves;
53303+ atomic_unchecked_t local_data;
53304+ atomic_unchecked_t bitmap_data;
53305+ atomic_unchecked_t bg_allocs;
53306+ atomic_unchecked_t bg_extends;
53307 };
53308
53309 enum ocfs2_local_alloc_state
53310diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
53311index b7e74b5..19c6536 100644
53312--- a/fs/ocfs2/suballoc.c
53313+++ b/fs/ocfs2/suballoc.c
53314@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
53315 mlog_errno(status);
53316 goto bail;
53317 }
53318- atomic_inc(&osb->alloc_stats.bg_extends);
53319+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
53320
53321 /* You should never ask for this much metadata */
53322 BUG_ON(bits_wanted >
53323@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
53324 mlog_errno(status);
53325 goto bail;
53326 }
53327- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53328+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53329
53330 *suballoc_loc = res.sr_bg_blkno;
53331 *suballoc_bit_start = res.sr_bit_offset;
53332@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
53333 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
53334 res->sr_bits);
53335
53336- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53337+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53338
53339 BUG_ON(res->sr_bits != 1);
53340
53341@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
53342 mlog_errno(status);
53343 goto bail;
53344 }
53345- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53346+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53347
53348 BUG_ON(res.sr_bits != 1);
53349
53350@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
53351 cluster_start,
53352 num_clusters);
53353 if (!status)
53354- atomic_inc(&osb->alloc_stats.local_data);
53355+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
53356 } else {
53357 if (min_clusters > (osb->bitmap_cpg - 1)) {
53358 /* The only paths asking for contiguousness
53359@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
53360 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
53361 res.sr_bg_blkno,
53362 res.sr_bit_offset);
53363- atomic_inc(&osb->alloc_stats.bitmap_data);
53364+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
53365 *num_clusters = res.sr_bits;
53366 }
53367 }
53368diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
53369index 0e91ec2..f4b3fc6 100644
53370--- a/fs/ocfs2/super.c
53371+++ b/fs/ocfs2/super.c
53372@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
53373 "%10s => GlobalAllocs: %d LocalAllocs: %d "
53374 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
53375 "Stats",
53376- atomic_read(&osb->alloc_stats.bitmap_data),
53377- atomic_read(&osb->alloc_stats.local_data),
53378- atomic_read(&osb->alloc_stats.bg_allocs),
53379- atomic_read(&osb->alloc_stats.moves),
53380- atomic_read(&osb->alloc_stats.bg_extends));
53381+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
53382+ atomic_read_unchecked(&osb->alloc_stats.local_data),
53383+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
53384+ atomic_read_unchecked(&osb->alloc_stats.moves),
53385+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
53386
53387 out += snprintf(buf + out, len - out,
53388 "%10s => State: %u Descriptor: %llu Size: %u bits "
53389@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
53390 spin_lock_init(&osb->osb_xattr_lock);
53391 ocfs2_init_steal_slots(osb);
53392
53393- atomic_set(&osb->alloc_stats.moves, 0);
53394- atomic_set(&osb->alloc_stats.local_data, 0);
53395- atomic_set(&osb->alloc_stats.bitmap_data, 0);
53396- atomic_set(&osb->alloc_stats.bg_allocs, 0);
53397- atomic_set(&osb->alloc_stats.bg_extends, 0);
53398+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
53399+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
53400+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
53401+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
53402+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
53403
53404 /* Copy the blockcheck stats from the superblock probe */
53405 osb->osb_ecc_stats = *stats;
53406diff --git a/fs/open.c b/fs/open.c
53407index 9b33c0c..2ffcca2 100644
53408--- a/fs/open.c
53409+++ b/fs/open.c
53410@@ -31,6 +31,8 @@
53411 #include <linux/ima.h>
53412 #include <linux/dnotify.h>
53413
53414+#define CREATE_TRACE_POINTS
53415+#include <trace/events/fs.h>
53416 #include "internal.h"
53417
53418 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
53419@@ -101,6 +103,8 @@ long vfs_truncate(struct path *path, loff_t length)
53420 error = locks_verify_truncate(inode, NULL, length);
53421 if (!error)
53422 error = security_path_truncate(path);
53423+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
53424+ error = -EACCES;
53425 if (!error)
53426 error = do_truncate(path->dentry, length, 0, NULL);
53427
53428@@ -178,6 +182,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
53429 error = locks_verify_truncate(inode, f.file, length);
53430 if (!error)
53431 error = security_path_truncate(&f.file->f_path);
53432+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
53433+ error = -EACCES;
53434 if (!error)
53435 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
53436 sb_end_write(inode->i_sb);
53437@@ -373,6 +379,9 @@ retry:
53438 if (__mnt_is_readonly(path.mnt))
53439 res = -EROFS;
53440
53441+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
53442+ res = -EACCES;
53443+
53444 out_path_release:
53445 path_put(&path);
53446 if (retry_estale(res, lookup_flags)) {
53447@@ -404,6 +413,8 @@ retry:
53448 if (error)
53449 goto dput_and_out;
53450
53451+ gr_log_chdir(path.dentry, path.mnt);
53452+
53453 set_fs_pwd(current->fs, &path);
53454
53455 dput_and_out:
53456@@ -433,6 +444,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
53457 goto out_putf;
53458
53459 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
53460+
53461+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
53462+ error = -EPERM;
53463+
53464+ if (!error)
53465+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
53466+
53467 if (!error)
53468 set_fs_pwd(current->fs, &f.file->f_path);
53469 out_putf:
53470@@ -462,7 +480,13 @@ retry:
53471 if (error)
53472 goto dput_and_out;
53473
53474+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
53475+ goto dput_and_out;
53476+
53477 set_fs_root(current->fs, &path);
53478+
53479+ gr_handle_chroot_chdir(&path);
53480+
53481 error = 0;
53482 dput_and_out:
53483 path_put(&path);
53484@@ -484,6 +508,16 @@ static int chmod_common(struct path *path, umode_t mode)
53485 if (error)
53486 return error;
53487 mutex_lock(&inode->i_mutex);
53488+
53489+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
53490+ error = -EACCES;
53491+ goto out_unlock;
53492+ }
53493+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
53494+ error = -EACCES;
53495+ goto out_unlock;
53496+ }
53497+
53498 error = security_path_chmod(path, mode);
53499 if (error)
53500 goto out_unlock;
53501@@ -544,6 +578,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
53502 uid = make_kuid(current_user_ns(), user);
53503 gid = make_kgid(current_user_ns(), group);
53504
53505+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
53506+ return -EACCES;
53507+
53508 newattrs.ia_valid = ATTR_CTIME;
53509 if (user != (uid_t) -1) {
53510 if (!uid_valid(uid))
53511@@ -960,6 +997,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
53512 } else {
53513 fsnotify_open(f);
53514 fd_install(fd, f);
53515+ trace_do_sys_open(tmp->name, flags, mode);
53516 }
53517 }
53518 putname(tmp);
53519diff --git a/fs/pipe.c b/fs/pipe.c
53520index bd3479d..fb92c4d 100644
53521--- a/fs/pipe.c
53522+++ b/fs/pipe.c
53523@@ -438,9 +438,9 @@ redo:
53524 }
53525 if (bufs) /* More to do? */
53526 continue;
53527- if (!pipe->writers)
53528+ if (!atomic_read(&pipe->writers))
53529 break;
53530- if (!pipe->waiting_writers) {
53531+ if (!atomic_read(&pipe->waiting_writers)) {
53532 /* syscall merging: Usually we must not sleep
53533 * if O_NONBLOCK is set, or if we got some data.
53534 * But if a writer sleeps in kernel space, then
53535@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
53536 mutex_lock(&inode->i_mutex);
53537 pipe = inode->i_pipe;
53538
53539- if (!pipe->readers) {
53540+ if (!atomic_read(&pipe->readers)) {
53541 send_sig(SIGPIPE, current, 0);
53542 ret = -EPIPE;
53543 goto out;
53544@@ -553,7 +553,7 @@ redo1:
53545 for (;;) {
53546 int bufs;
53547
53548- if (!pipe->readers) {
53549+ if (!atomic_read(&pipe->readers)) {
53550 send_sig(SIGPIPE, current, 0);
53551 if (!ret)
53552 ret = -EPIPE;
53553@@ -644,9 +644,9 @@ redo2:
53554 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
53555 do_wakeup = 0;
53556 }
53557- pipe->waiting_writers++;
53558+ atomic_inc(&pipe->waiting_writers);
53559 pipe_wait(pipe);
53560- pipe->waiting_writers--;
53561+ atomic_dec(&pipe->waiting_writers);
53562 }
53563 out:
53564 mutex_unlock(&inode->i_mutex);
53565@@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53566 mask = 0;
53567 if (filp->f_mode & FMODE_READ) {
53568 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
53569- if (!pipe->writers && filp->f_version != pipe->w_counter)
53570+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
53571 mask |= POLLHUP;
53572 }
53573
53574@@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53575 * Most Unices do not set POLLERR for FIFOs but on Linux they
53576 * behave exactly like pipes for poll().
53577 */
53578- if (!pipe->readers)
53579+ if (!atomic_read(&pipe->readers))
53580 mask |= POLLERR;
53581 }
53582
53583@@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
53584
53585 mutex_lock(&inode->i_mutex);
53586 pipe = inode->i_pipe;
53587- pipe->readers -= decr;
53588- pipe->writers -= decw;
53589+ atomic_sub(decr, &pipe->readers);
53590+ atomic_sub(decw, &pipe->writers);
53591
53592- if (!pipe->readers && !pipe->writers) {
53593+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
53594 free_pipe_info(inode);
53595 } else {
53596 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
53597@@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
53598
53599 if (inode->i_pipe) {
53600 ret = 0;
53601- inode->i_pipe->readers++;
53602+ atomic_inc(&inode->i_pipe->readers);
53603 }
53604
53605 mutex_unlock(&inode->i_mutex);
53606@@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
53607
53608 if (inode->i_pipe) {
53609 ret = 0;
53610- inode->i_pipe->writers++;
53611+ atomic_inc(&inode->i_pipe->writers);
53612 }
53613
53614 mutex_unlock(&inode->i_mutex);
53615@@ -868,9 +868,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
53616 if (inode->i_pipe) {
53617 ret = 0;
53618 if (filp->f_mode & FMODE_READ)
53619- inode->i_pipe->readers++;
53620+ atomic_inc(&inode->i_pipe->readers);
53621 if (filp->f_mode & FMODE_WRITE)
53622- inode->i_pipe->writers++;
53623+ atomic_inc(&inode->i_pipe->writers);
53624 }
53625
53626 mutex_unlock(&inode->i_mutex);
53627@@ -962,7 +962,7 @@ void free_pipe_info(struct inode *inode)
53628 inode->i_pipe = NULL;
53629 }
53630
53631-static struct vfsmount *pipe_mnt __read_mostly;
53632+struct vfsmount *pipe_mnt __read_mostly;
53633
53634 /*
53635 * pipefs_dname() is called from d_path().
53636@@ -992,7 +992,8 @@ static struct inode * get_pipe_inode(void)
53637 goto fail_iput;
53638 inode->i_pipe = pipe;
53639
53640- pipe->readers = pipe->writers = 1;
53641+ atomic_set(&pipe->readers, 1);
53642+ atomic_set(&pipe->writers, 1);
53643 inode->i_fop = &rdwr_pipefifo_fops;
53644
53645 /*
53646diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
53647index 15af622..0e9f4467 100644
53648--- a/fs/proc/Kconfig
53649+++ b/fs/proc/Kconfig
53650@@ -30,12 +30,12 @@ config PROC_FS
53651
53652 config PROC_KCORE
53653 bool "/proc/kcore support" if !ARM
53654- depends on PROC_FS && MMU
53655+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
53656
53657 config PROC_VMCORE
53658 bool "/proc/vmcore support"
53659- depends on PROC_FS && CRASH_DUMP
53660- default y
53661+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
53662+ default n
53663 help
53664 Exports the dump image of crashed kernel in ELF format.
53665
53666@@ -59,8 +59,8 @@ config PROC_SYSCTL
53667 limited in memory.
53668
53669 config PROC_PAGE_MONITOR
53670- default y
53671- depends on PROC_FS && MMU
53672+ default n
53673+ depends on PROC_FS && MMU && !GRKERNSEC
53674 bool "Enable /proc page monitoring" if EXPERT
53675 help
53676 Various /proc files exist to monitor process memory utilization:
53677diff --git a/fs/proc/array.c b/fs/proc/array.c
53678index 6a91e6f..e54dbc14 100644
53679--- a/fs/proc/array.c
53680+++ b/fs/proc/array.c
53681@@ -60,6 +60,7 @@
53682 #include <linux/tty.h>
53683 #include <linux/string.h>
53684 #include <linux/mman.h>
53685+#include <linux/grsecurity.h>
53686 #include <linux/proc_fs.h>
53687 #include <linux/ioport.h>
53688 #include <linux/uaccess.h>
53689@@ -362,6 +363,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
53690 seq_putc(m, '\n');
53691 }
53692
53693+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53694+static inline void task_pax(struct seq_file *m, struct task_struct *p)
53695+{
53696+ if (p->mm)
53697+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
53698+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
53699+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
53700+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
53701+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
53702+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
53703+ else
53704+ seq_printf(m, "PaX:\t-----\n");
53705+}
53706+#endif
53707+
53708 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53709 struct pid *pid, struct task_struct *task)
53710 {
53711@@ -380,9 +396,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53712 task_cpus_allowed(m, task);
53713 cpuset_task_status_allowed(m, task);
53714 task_context_switch_counts(m, task);
53715+
53716+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53717+ task_pax(m, task);
53718+#endif
53719+
53720+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
53721+ task_grsec_rbac(m, task);
53722+#endif
53723+
53724 return 0;
53725 }
53726
53727+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53728+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53729+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53730+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53731+#endif
53732+
53733 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53734 struct pid *pid, struct task_struct *task, int whole)
53735 {
53736@@ -404,6 +435,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53737 char tcomm[sizeof(task->comm)];
53738 unsigned long flags;
53739
53740+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53741+ if (current->exec_id != m->exec_id) {
53742+ gr_log_badprocpid("stat");
53743+ return 0;
53744+ }
53745+#endif
53746+
53747 state = *get_task_state(task);
53748 vsize = eip = esp = 0;
53749 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
53750@@ -475,6 +513,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53751 gtime = task->gtime;
53752 }
53753
53754+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53755+ if (PAX_RAND_FLAGS(mm)) {
53756+ eip = 0;
53757+ esp = 0;
53758+ wchan = 0;
53759+ }
53760+#endif
53761+#ifdef CONFIG_GRKERNSEC_HIDESYM
53762+ wchan = 0;
53763+ eip =0;
53764+ esp =0;
53765+#endif
53766+
53767 /* scale priority and nice values from timeslices to -20..20 */
53768 /* to make it look like a "normal" Unix priority/nice value */
53769 priority = task_prio(task);
53770@@ -511,9 +562,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53771 seq_put_decimal_ull(m, ' ', vsize);
53772 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
53773 seq_put_decimal_ull(m, ' ', rsslim);
53774+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53775+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
53776+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
53777+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
53778+#else
53779 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
53780 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
53781 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
53782+#endif
53783 seq_put_decimal_ull(m, ' ', esp);
53784 seq_put_decimal_ull(m, ' ', eip);
53785 /* The signal information here is obsolete.
53786@@ -535,7 +592,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53787 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
53788 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
53789
53790- if (mm && permitted) {
53791+ if (mm && permitted
53792+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53793+ && !PAX_RAND_FLAGS(mm)
53794+#endif
53795+ ) {
53796 seq_put_decimal_ull(m, ' ', mm->start_data);
53797 seq_put_decimal_ull(m, ' ', mm->end_data);
53798 seq_put_decimal_ull(m, ' ', mm->start_brk);
53799@@ -573,8 +634,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53800 struct pid *pid, struct task_struct *task)
53801 {
53802 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
53803- struct mm_struct *mm = get_task_mm(task);
53804+ struct mm_struct *mm;
53805
53806+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53807+ if (current->exec_id != m->exec_id) {
53808+ gr_log_badprocpid("statm");
53809+ return 0;
53810+ }
53811+#endif
53812+ mm = get_task_mm(task);
53813 if (mm) {
53814 size = task_statm(mm, &shared, &text, &data, &resident);
53815 mmput(mm);
53816@@ -597,6 +665,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53817 return 0;
53818 }
53819
53820+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53821+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
53822+{
53823+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
53824+}
53825+#endif
53826+
53827 #ifdef CONFIG_CHECKPOINT_RESTORE
53828 static struct pid *
53829 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
53830diff --git a/fs/proc/base.c b/fs/proc/base.c
53831index 9b43ff77..ba3e990 100644
53832--- a/fs/proc/base.c
53833+++ b/fs/proc/base.c
53834@@ -111,6 +111,14 @@ struct pid_entry {
53835 union proc_op op;
53836 };
53837
53838+struct getdents_callback {
53839+ struct linux_dirent __user * current_dir;
53840+ struct linux_dirent __user * previous;
53841+ struct file * file;
53842+ int count;
53843+ int error;
53844+};
53845+
53846 #define NOD(NAME, MODE, IOP, FOP, OP) { \
53847 .name = (NAME), \
53848 .len = sizeof(NAME) - 1, \
53849@@ -208,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
53850 if (!mm->arg_end)
53851 goto out_mm; /* Shh! No looking before we're done */
53852
53853+ if (gr_acl_handle_procpidmem(task))
53854+ goto out_mm;
53855+
53856 len = mm->arg_end - mm->arg_start;
53857
53858 if (len > PAGE_SIZE)
53859@@ -235,12 +246,28 @@ out:
53860 return res;
53861 }
53862
53863+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53864+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53865+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53866+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53867+#endif
53868+
53869 static int proc_pid_auxv(struct task_struct *task, char *buffer)
53870 {
53871 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
53872 int res = PTR_ERR(mm);
53873 if (mm && !IS_ERR(mm)) {
53874 unsigned int nwords = 0;
53875+
53876+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53877+ /* allow if we're currently ptracing this task */
53878+ if (PAX_RAND_FLAGS(mm) &&
53879+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
53880+ mmput(mm);
53881+ return 0;
53882+ }
53883+#endif
53884+
53885 do {
53886 nwords += 2;
53887 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
53888@@ -254,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
53889 }
53890
53891
53892-#ifdef CONFIG_KALLSYMS
53893+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53894 /*
53895 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
53896 * Returns the resolved symbol. If that fails, simply return the address.
53897@@ -293,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
53898 mutex_unlock(&task->signal->cred_guard_mutex);
53899 }
53900
53901-#ifdef CONFIG_STACKTRACE
53902+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53903
53904 #define MAX_STACK_TRACE_DEPTH 64
53905
53906@@ -485,7 +512,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
53907 return count;
53908 }
53909
53910-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53911+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53912 static int proc_pid_syscall(struct task_struct *task, char *buffer)
53913 {
53914 long nr;
53915@@ -514,7 +541,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
53916 /************************************************************************/
53917
53918 /* permission checks */
53919-static int proc_fd_access_allowed(struct inode *inode)
53920+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
53921 {
53922 struct task_struct *task;
53923 int allowed = 0;
53924@@ -524,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
53925 */
53926 task = get_proc_task(inode);
53927 if (task) {
53928- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53929+ if (log)
53930+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53931+ else
53932+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
53933 put_task_struct(task);
53934 }
53935 return allowed;
53936@@ -555,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
53937 struct task_struct *task,
53938 int hide_pid_min)
53939 {
53940+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53941+ return false;
53942+
53943+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53944+ rcu_read_lock();
53945+ {
53946+ const struct cred *tmpcred = current_cred();
53947+ const struct cred *cred = __task_cred(task);
53948+
53949+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
53950+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53951+ || in_group_p(grsec_proc_gid)
53952+#endif
53953+ ) {
53954+ rcu_read_unlock();
53955+ return true;
53956+ }
53957+ }
53958+ rcu_read_unlock();
53959+
53960+ if (!pid->hide_pid)
53961+ return false;
53962+#endif
53963+
53964 if (pid->hide_pid < hide_pid_min)
53965 return true;
53966 if (in_group_p(pid->pid_gid))
53967 return true;
53968+
53969 return ptrace_may_access(task, PTRACE_MODE_READ);
53970 }
53971
53972@@ -576,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
53973 put_task_struct(task);
53974
53975 if (!has_perms) {
53976+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53977+ {
53978+#else
53979 if (pid->hide_pid == 2) {
53980+#endif
53981 /*
53982 * Let's make getdents(), stat(), and open()
53983 * consistent with each other. If a process
53984@@ -674,6 +733,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
53985 if (!task)
53986 return -ESRCH;
53987
53988+ if (gr_acl_handle_procpidmem(task)) {
53989+ put_task_struct(task);
53990+ return -EPERM;
53991+ }
53992+
53993 mm = mm_access(task, mode);
53994 put_task_struct(task);
53995
53996@@ -689,6 +753,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
53997
53998 file->private_data = mm;
53999
54000+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54001+ file->f_version = current->exec_id;
54002+#endif
54003+
54004 return 0;
54005 }
54006
54007@@ -710,6 +778,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
54008 ssize_t copied;
54009 char *page;
54010
54011+#ifdef CONFIG_GRKERNSEC
54012+ if (write)
54013+ return -EPERM;
54014+#endif
54015+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54016+ if (file->f_version != current->exec_id) {
54017+ gr_log_badprocpid("mem");
54018+ return 0;
54019+ }
54020+#endif
54021+
54022 if (!mm)
54023 return 0;
54024
54025@@ -814,6 +893,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
54026 if (!mm)
54027 return 0;
54028
54029+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54030+ if (file->f_version != current->exec_id) {
54031+ gr_log_badprocpid("environ");
54032+ return 0;
54033+ }
54034+#endif
54035+
54036 page = (char *)__get_free_page(GFP_TEMPORARY);
54037 if (!page)
54038 return -ENOMEM;
54039@@ -1429,7 +1515,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
54040 int error = -EACCES;
54041
54042 /* Are we allowed to snoop on the tasks file descriptors? */
54043- if (!proc_fd_access_allowed(inode))
54044+ if (!proc_fd_access_allowed(inode, 0))
54045 goto out;
54046
54047 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
54048@@ -1473,8 +1559,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
54049 struct path path;
54050
54051 /* Are we allowed to snoop on the tasks file descriptors? */
54052- if (!proc_fd_access_allowed(inode))
54053- goto out;
54054+ /* logging this is needed for learning on chromium to work properly,
54055+ but we don't want to flood the logs from 'ps' which does a readlink
54056+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
54057+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
54058+ */
54059+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
54060+ if (!proc_fd_access_allowed(inode,0))
54061+ goto out;
54062+ } else {
54063+ if (!proc_fd_access_allowed(inode,1))
54064+ goto out;
54065+ }
54066
54067 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
54068 if (error)
54069@@ -1524,7 +1620,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
54070 rcu_read_lock();
54071 cred = __task_cred(task);
54072 inode->i_uid = cred->euid;
54073+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54074+ inode->i_gid = grsec_proc_gid;
54075+#else
54076 inode->i_gid = cred->egid;
54077+#endif
54078 rcu_read_unlock();
54079 }
54080 security_task_to_inode(task, inode);
54081@@ -1560,10 +1660,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
54082 return -ENOENT;
54083 }
54084 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
54085+#ifdef CONFIG_GRKERNSEC_PROC_USER
54086+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
54087+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54088+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
54089+#endif
54090 task_dumpable(task)) {
54091 cred = __task_cred(task);
54092 stat->uid = cred->euid;
54093+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54094+ stat->gid = grsec_proc_gid;
54095+#else
54096 stat->gid = cred->egid;
54097+#endif
54098 }
54099 }
54100 rcu_read_unlock();
54101@@ -1601,11 +1710,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
54102
54103 if (task) {
54104 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
54105+#ifdef CONFIG_GRKERNSEC_PROC_USER
54106+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
54107+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54108+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
54109+#endif
54110 task_dumpable(task)) {
54111 rcu_read_lock();
54112 cred = __task_cred(task);
54113 inode->i_uid = cred->euid;
54114+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54115+ inode->i_gid = grsec_proc_gid;
54116+#else
54117 inode->i_gid = cred->egid;
54118+#endif
54119 rcu_read_unlock();
54120 } else {
54121 inode->i_uid = GLOBAL_ROOT_UID;
54122@@ -2058,6 +2176,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
54123 if (!task)
54124 goto out_no_task;
54125
54126+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54127+ goto out;
54128+
54129 /*
54130 * Yes, it does not scale. And it should not. Don't add
54131 * new entries into /proc/<tgid>/ without very good reasons.
54132@@ -2102,6 +2223,9 @@ static int proc_pident_readdir(struct file *filp,
54133 if (!task)
54134 goto out_no_task;
54135
54136+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54137+ goto out;
54138+
54139 ret = 0;
54140 i = filp->f_pos;
54141 switch (i) {
54142@@ -2515,7 +2639,7 @@ static const struct pid_entry tgid_base_stuff[] = {
54143 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
54144 #endif
54145 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
54146-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54147+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54148 INF("syscall", S_IRUGO, proc_pid_syscall),
54149 #endif
54150 INF("cmdline", S_IRUGO, proc_pid_cmdline),
54151@@ -2540,10 +2664,10 @@ static const struct pid_entry tgid_base_stuff[] = {
54152 #ifdef CONFIG_SECURITY
54153 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
54154 #endif
54155-#ifdef CONFIG_KALLSYMS
54156+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54157 INF("wchan", S_IRUGO, proc_pid_wchan),
54158 #endif
54159-#ifdef CONFIG_STACKTRACE
54160+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54161 ONE("stack", S_IRUGO, proc_pid_stack),
54162 #endif
54163 #ifdef CONFIG_SCHEDSTATS
54164@@ -2577,6 +2701,9 @@ static const struct pid_entry tgid_base_stuff[] = {
54165 #ifdef CONFIG_HARDWALL
54166 INF("hardwall", S_IRUGO, proc_pid_hardwall),
54167 #endif
54168+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54169+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
54170+#endif
54171 #ifdef CONFIG_USER_NS
54172 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
54173 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
54174@@ -2705,7 +2832,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
54175 if (!inode)
54176 goto out;
54177
54178+#ifdef CONFIG_GRKERNSEC_PROC_USER
54179+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
54180+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54181+ inode->i_gid = grsec_proc_gid;
54182+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
54183+#else
54184 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
54185+#endif
54186 inode->i_op = &proc_tgid_base_inode_operations;
54187 inode->i_fop = &proc_tgid_base_operations;
54188 inode->i_flags|=S_IMMUTABLE;
54189@@ -2743,7 +2877,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
54190 if (!task)
54191 goto out;
54192
54193+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54194+ goto out_put_task;
54195+
54196 result = proc_pid_instantiate(dir, dentry, task, NULL);
54197+out_put_task:
54198 put_task_struct(task);
54199 out:
54200 return result;
54201@@ -2806,6 +2944,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
54202 static int fake_filldir(void *buf, const char *name, int namelen,
54203 loff_t offset, u64 ino, unsigned d_type)
54204 {
54205+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
54206+ __buf->error = -EINVAL;
54207 return 0;
54208 }
54209
54210@@ -2857,7 +2997,7 @@ static const struct pid_entry tid_base_stuff[] = {
54211 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
54212 #endif
54213 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
54214-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54215+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54216 INF("syscall", S_IRUGO, proc_pid_syscall),
54217 #endif
54218 INF("cmdline", S_IRUGO, proc_pid_cmdline),
54219@@ -2884,10 +3024,10 @@ static const struct pid_entry tid_base_stuff[] = {
54220 #ifdef CONFIG_SECURITY
54221 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
54222 #endif
54223-#ifdef CONFIG_KALLSYMS
54224+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54225 INF("wchan", S_IRUGO, proc_pid_wchan),
54226 #endif
54227-#ifdef CONFIG_STACKTRACE
54228+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54229 ONE("stack", S_IRUGO, proc_pid_stack),
54230 #endif
54231 #ifdef CONFIG_SCHEDSTATS
54232diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
54233index 82676e3..5f8518a 100644
54234--- a/fs/proc/cmdline.c
54235+++ b/fs/proc/cmdline.c
54236@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
54237
54238 static int __init proc_cmdline_init(void)
54239 {
54240+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54241+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
54242+#else
54243 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
54244+#endif
54245 return 0;
54246 }
54247 module_init(proc_cmdline_init);
54248diff --git a/fs/proc/devices.c b/fs/proc/devices.c
54249index b143471..bb105e5 100644
54250--- a/fs/proc/devices.c
54251+++ b/fs/proc/devices.c
54252@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
54253
54254 static int __init proc_devices_init(void)
54255 {
54256+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54257+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
54258+#else
54259 proc_create("devices", 0, NULL, &proc_devinfo_operations);
54260+#endif
54261 return 0;
54262 }
54263 module_init(proc_devices_init);
54264diff --git a/fs/proc/fd.c b/fs/proc/fd.c
54265index d7a4a28..0201742 100644
54266--- a/fs/proc/fd.c
54267+++ b/fs/proc/fd.c
54268@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
54269 if (!task)
54270 return -ENOENT;
54271
54272- files = get_files_struct(task);
54273+ if (!gr_acl_handle_procpidmem(task))
54274+ files = get_files_struct(task);
54275 put_task_struct(task);
54276
54277 if (files) {
54278@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
54279 */
54280 int proc_fd_permission(struct inode *inode, int mask)
54281 {
54282+ struct task_struct *task;
54283 int rv = generic_permission(inode, mask);
54284- if (rv == 0)
54285- return 0;
54286+
54287 if (task_pid(current) == proc_pid(inode))
54288 rv = 0;
54289+
54290+ task = get_proc_task(inode);
54291+ if (task == NULL)
54292+ return rv;
54293+
54294+ if (gr_acl_handle_procpidmem(task))
54295+ rv = -EACCES;
54296+
54297+ put_task_struct(task);
54298+
54299 return rv;
54300 }
54301
54302diff --git a/fs/proc/inode.c b/fs/proc/inode.c
54303index 439ae688..c21ac36 100644
54304--- a/fs/proc/inode.c
54305+++ b/fs/proc/inode.c
54306@@ -21,11 +21,17 @@
54307 #include <linux/seq_file.h>
54308 #include <linux/slab.h>
54309 #include <linux/mount.h>
54310+#include <linux/grsecurity.h>
54311
54312 #include <asm/uaccess.h>
54313
54314 #include "internal.h"
54315
54316+#ifdef CONFIG_PROC_SYSCTL
54317+extern const struct inode_operations proc_sys_inode_operations;
54318+extern const struct inode_operations proc_sys_dir_operations;
54319+#endif
54320+
54321 static void proc_evict_inode(struct inode *inode)
54322 {
54323 struct proc_dir_entry *de;
54324@@ -53,6 +59,13 @@ static void proc_evict_inode(struct inode *inode)
54325 ns = PROC_I(inode)->ns;
54326 if (ns_ops && ns)
54327 ns_ops->put(ns);
54328+
54329+#ifdef CONFIG_PROC_SYSCTL
54330+ if (inode->i_op == &proc_sys_inode_operations ||
54331+ inode->i_op == &proc_sys_dir_operations)
54332+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
54333+#endif
54334+
54335 }
54336
54337 static struct kmem_cache * proc_inode_cachep;
54338@@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
54339 if (de->mode) {
54340 inode->i_mode = de->mode;
54341 inode->i_uid = de->uid;
54342+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54343+ inode->i_gid = grsec_proc_gid;
54344+#else
54345 inode->i_gid = de->gid;
54346+#endif
54347 }
54348 if (de->size)
54349 inode->i_size = de->size;
54350diff --git a/fs/proc/internal.h b/fs/proc/internal.h
54351index 252544c..04395b9 100644
54352--- a/fs/proc/internal.h
54353+++ b/fs/proc/internal.h
54354@@ -55,6 +55,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54355 struct pid *pid, struct task_struct *task);
54356 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54357 struct pid *pid, struct task_struct *task);
54358+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54359+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
54360+#endif
54361 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
54362
54363 extern const struct file_operations proc_tid_children_operations;
54364diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
54365index e96d4f1..8b116ed 100644
54366--- a/fs/proc/kcore.c
54367+++ b/fs/proc/kcore.c
54368@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54369 * the addresses in the elf_phdr on our list.
54370 */
54371 start = kc_offset_to_vaddr(*fpos - elf_buflen);
54372- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
54373+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
54374+ if (tsz > buflen)
54375 tsz = buflen;
54376-
54377+
54378 while (buflen) {
54379 struct kcore_list *m;
54380
54381@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54382 kfree(elf_buf);
54383 } else {
54384 if (kern_addr_valid(start)) {
54385- unsigned long n;
54386+ char *elf_buf;
54387+ mm_segment_t oldfs;
54388
54389- n = copy_to_user(buffer, (char *)start, tsz);
54390- /*
54391- * We cannot distinguish between fault on source
54392- * and fault on destination. When this happens
54393- * we clear too and hope it will trigger the
54394- * EFAULT again.
54395- */
54396- if (n) {
54397- if (clear_user(buffer + tsz - n,
54398- n))
54399+ elf_buf = kmalloc(tsz, GFP_KERNEL);
54400+ if (!elf_buf)
54401+ return -ENOMEM;
54402+ oldfs = get_fs();
54403+ set_fs(KERNEL_DS);
54404+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
54405+ set_fs(oldfs);
54406+ if (copy_to_user(buffer, elf_buf, tsz)) {
54407+ kfree(elf_buf);
54408 return -EFAULT;
54409+ }
54410 }
54411+ set_fs(oldfs);
54412+ kfree(elf_buf);
54413 } else {
54414 if (clear_user(buffer, tsz))
54415 return -EFAULT;
54416@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54417
54418 static int open_kcore(struct inode *inode, struct file *filp)
54419 {
54420+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
54421+ return -EPERM;
54422+#endif
54423 if (!capable(CAP_SYS_RAWIO))
54424 return -EPERM;
54425 if (kcore_need_update)
54426diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
54427index 80e4645..53e5fcf 100644
54428--- a/fs/proc/meminfo.c
54429+++ b/fs/proc/meminfo.c
54430@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
54431 vmi.used >> 10,
54432 vmi.largest_chunk >> 10
54433 #ifdef CONFIG_MEMORY_FAILURE
54434- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
54435+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
54436 #endif
54437 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
54438 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
54439diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
54440index b1822dd..df622cb 100644
54441--- a/fs/proc/nommu.c
54442+++ b/fs/proc/nommu.c
54443@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
54444 if (len < 1)
54445 len = 1;
54446 seq_printf(m, "%*c", len, ' ');
54447- seq_path(m, &file->f_path, "");
54448+ seq_path(m, &file->f_path, "\n\\");
54449 }
54450
54451 seq_putc(m, '\n');
54452diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
54453index fe72cd0..21b52ff 100644
54454--- a/fs/proc/proc_net.c
54455+++ b/fs/proc/proc_net.c
54456@@ -23,6 +23,7 @@
54457 #include <linux/nsproxy.h>
54458 #include <net/net_namespace.h>
54459 #include <linux/seq_file.h>
54460+#include <linux/grsecurity.h>
54461
54462 #include "internal.h"
54463
54464@@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
54465 struct task_struct *task;
54466 struct nsproxy *ns;
54467 struct net *net = NULL;
54468+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54469+ const struct cred *cred = current_cred();
54470+#endif
54471+
54472+#ifdef CONFIG_GRKERNSEC_PROC_USER
54473+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
54474+ return net;
54475+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54476+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
54477+ return net;
54478+#endif
54479
54480 rcu_read_lock();
54481 task = pid_task(proc_pid(dir), PIDTYPE_PID);
54482diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
54483index 1827d88..43b0279 100644
54484--- a/fs/proc/proc_sysctl.c
54485+++ b/fs/proc/proc_sysctl.c
54486@@ -12,11 +12,15 @@
54487 #include <linux/module.h>
54488 #include "internal.h"
54489
54490+extern int gr_handle_chroot_sysctl(const int op);
54491+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
54492+ const int op);
54493+
54494 static const struct dentry_operations proc_sys_dentry_operations;
54495 static const struct file_operations proc_sys_file_operations;
54496-static const struct inode_operations proc_sys_inode_operations;
54497+const struct inode_operations proc_sys_inode_operations;
54498 static const struct file_operations proc_sys_dir_file_operations;
54499-static const struct inode_operations proc_sys_dir_operations;
54500+const struct inode_operations proc_sys_dir_operations;
54501
54502 void proc_sys_poll_notify(struct ctl_table_poll *poll)
54503 {
54504@@ -466,6 +470,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
54505
54506 err = NULL;
54507 d_set_d_op(dentry, &proc_sys_dentry_operations);
54508+
54509+ gr_handle_proc_create(dentry, inode);
54510+
54511 d_add(dentry, inode);
54512
54513 out:
54514@@ -481,6 +488,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
54515 struct inode *inode = filp->f_path.dentry->d_inode;
54516 struct ctl_table_header *head = grab_header(inode);
54517 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
54518+ int op = write ? MAY_WRITE : MAY_READ;
54519 ssize_t error;
54520 size_t res;
54521
54522@@ -492,7 +500,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
54523 * and won't be until we finish.
54524 */
54525 error = -EPERM;
54526- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
54527+ if (sysctl_perm(head, table, op))
54528 goto out;
54529
54530 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
54531@@ -500,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
54532 if (!table->proc_handler)
54533 goto out;
54534
54535+#ifdef CONFIG_GRKERNSEC
54536+ error = -EPERM;
54537+ if (gr_handle_chroot_sysctl(op))
54538+ goto out;
54539+ dget(filp->f_path.dentry);
54540+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
54541+ dput(filp->f_path.dentry);
54542+ goto out;
54543+ }
54544+ dput(filp->f_path.dentry);
54545+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
54546+ goto out;
54547+ if (write && !capable(CAP_SYS_ADMIN))
54548+ goto out;
54549+#endif
54550+
54551 /* careful: calling conventions are nasty here */
54552 res = count;
54553 error = table->proc_handler(table, write, buf, &res, ppos);
54554@@ -597,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
54555 return -ENOMEM;
54556 } else {
54557 d_set_d_op(child, &proc_sys_dentry_operations);
54558+
54559+ gr_handle_proc_create(child, inode);
54560+
54561 d_add(child, inode);
54562 }
54563 } else {
54564@@ -640,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
54565 if ((*pos)++ < file->f_pos)
54566 return 0;
54567
54568+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
54569+ return 0;
54570+
54571 if (unlikely(S_ISLNK(table->mode)))
54572 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
54573 else
54574@@ -750,6 +780,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
54575 if (IS_ERR(head))
54576 return PTR_ERR(head);
54577
54578+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
54579+ return -ENOENT;
54580+
54581 generic_fillattr(inode, stat);
54582 if (table)
54583 stat->mode = (stat->mode & S_IFMT) | table->mode;
54584@@ -772,13 +805,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
54585 .llseek = generic_file_llseek,
54586 };
54587
54588-static const struct inode_operations proc_sys_inode_operations = {
54589+const struct inode_operations proc_sys_inode_operations = {
54590 .permission = proc_sys_permission,
54591 .setattr = proc_sys_setattr,
54592 .getattr = proc_sys_getattr,
54593 };
54594
54595-static const struct inode_operations proc_sys_dir_operations = {
54596+const struct inode_operations proc_sys_dir_operations = {
54597 .lookup = proc_sys_lookup,
54598 .permission = proc_sys_permission,
54599 .setattr = proc_sys_setattr,
54600@@ -854,7 +887,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
54601 static struct ctl_dir *new_dir(struct ctl_table_set *set,
54602 const char *name, int namelen)
54603 {
54604- struct ctl_table *table;
54605+ ctl_table_no_const *table;
54606 struct ctl_dir *new;
54607 struct ctl_node *node;
54608 char *new_name;
54609@@ -866,7 +899,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
54610 return NULL;
54611
54612 node = (struct ctl_node *)(new + 1);
54613- table = (struct ctl_table *)(node + 1);
54614+ table = (ctl_table_no_const *)(node + 1);
54615 new_name = (char *)(table + 2);
54616 memcpy(new_name, name, namelen);
54617 new_name[namelen] = '\0';
54618@@ -1035,7 +1068,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
54619 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
54620 struct ctl_table_root *link_root)
54621 {
54622- struct ctl_table *link_table, *entry, *link;
54623+ ctl_table_no_const *link_table, *link;
54624+ struct ctl_table *entry;
54625 struct ctl_table_header *links;
54626 struct ctl_node *node;
54627 char *link_name;
54628@@ -1058,7 +1092,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
54629 return NULL;
54630
54631 node = (struct ctl_node *)(links + 1);
54632- link_table = (struct ctl_table *)(node + nr_entries);
54633+ link_table = (ctl_table_no_const *)(node + nr_entries);
54634 link_name = (char *)&link_table[nr_entries + 1];
54635
54636 for (link = link_table, entry = table; entry->procname; link++, entry++) {
54637@@ -1306,8 +1340,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
54638 struct ctl_table_header ***subheader, struct ctl_table_set *set,
54639 struct ctl_table *table)
54640 {
54641- struct ctl_table *ctl_table_arg = NULL;
54642- struct ctl_table *entry, *files;
54643+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
54644+ struct ctl_table *entry;
54645 int nr_files = 0;
54646 int nr_dirs = 0;
54647 int err = -ENOMEM;
54648@@ -1319,10 +1353,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
54649 nr_files++;
54650 }
54651
54652- files = table;
54653 /* If there are mixed files and directories we need a new table */
54654 if (nr_dirs && nr_files) {
54655- struct ctl_table *new;
54656+ ctl_table_no_const *new;
54657 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
54658 GFP_KERNEL);
54659 if (!files)
54660@@ -1340,7 +1373,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
54661 /* Register everything except a directory full of subdirectories */
54662 if (nr_files || !nr_dirs) {
54663 struct ctl_table_header *header;
54664- header = __register_sysctl_table(set, path, files);
54665+ header = __register_sysctl_table(set, path, files ? files : table);
54666 if (!header) {
54667 kfree(ctl_table_arg);
54668 goto out;
54669diff --git a/fs/proc/root.c b/fs/proc/root.c
54670index c6e9fac..a740964 100644
54671--- a/fs/proc/root.c
54672+++ b/fs/proc/root.c
54673@@ -176,7 +176,15 @@ void __init proc_root_init(void)
54674 #ifdef CONFIG_PROC_DEVICETREE
54675 proc_device_tree_init();
54676 #endif
54677+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54678+#ifdef CONFIG_GRKERNSEC_PROC_USER
54679+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
54680+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54681+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54682+#endif
54683+#else
54684 proc_mkdir("bus", NULL);
54685+#endif
54686 proc_sys_init();
54687 }
54688
54689diff --git a/fs/proc/self.c b/fs/proc/self.c
54690index aa5cc3b..c91a5d0 100644
54691--- a/fs/proc/self.c
54692+++ b/fs/proc/self.c
54693@@ -37,7 +37,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
54694 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
54695 void *cookie)
54696 {
54697- char *s = nd_get_link(nd);
54698+ const char *s = nd_get_link(nd);
54699 if (!IS_ERR(s))
54700 kfree(s);
54701 }
54702diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
54703index ca5ce7f..02c1cf0 100644
54704--- a/fs/proc/task_mmu.c
54705+++ b/fs/proc/task_mmu.c
54706@@ -11,12 +11,19 @@
54707 #include <linux/rmap.h>
54708 #include <linux/swap.h>
54709 #include <linux/swapops.h>
54710+#include <linux/grsecurity.h>
54711
54712 #include <asm/elf.h>
54713 #include <asm/uaccess.h>
54714 #include <asm/tlbflush.h>
54715 #include "internal.h"
54716
54717+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54718+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54719+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
54720+ _mm->pax_flags & MF_PAX_SEGMEXEC))
54721+#endif
54722+
54723 void task_mem(struct seq_file *m, struct mm_struct *mm)
54724 {
54725 unsigned long data, text, lib, swap;
54726@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54727 "VmExe:\t%8lu kB\n"
54728 "VmLib:\t%8lu kB\n"
54729 "VmPTE:\t%8lu kB\n"
54730- "VmSwap:\t%8lu kB\n",
54731- hiwater_vm << (PAGE_SHIFT-10),
54732+ "VmSwap:\t%8lu kB\n"
54733+
54734+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54735+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
54736+#endif
54737+
54738+ ,hiwater_vm << (PAGE_SHIFT-10),
54739 total_vm << (PAGE_SHIFT-10),
54740 mm->locked_vm << (PAGE_SHIFT-10),
54741 mm->pinned_vm << (PAGE_SHIFT-10),
54742@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54743 data << (PAGE_SHIFT-10),
54744 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54745 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
54746- swap << (PAGE_SHIFT-10));
54747+ swap << (PAGE_SHIFT-10)
54748+
54749+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54750+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54751+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
54752+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
54753+#else
54754+ , mm->context.user_cs_base
54755+ , mm->context.user_cs_limit
54756+#endif
54757+#endif
54758+
54759+ );
54760 }
54761
54762 unsigned long task_vsize(struct mm_struct *mm)
54763@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
54764 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
54765 }
54766
54767- /* We don't show the stack guard page in /proc/maps */
54768+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54769+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
54770+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
54771+#else
54772 start = vma->vm_start;
54773- if (stack_guard_page_start(vma, start))
54774- start += PAGE_SIZE;
54775 end = vma->vm_end;
54776- if (stack_guard_page_end(vma, end))
54777- end -= PAGE_SIZE;
54778+#endif
54779
54780 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
54781 start,
54782@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
54783 flags & VM_WRITE ? 'w' : '-',
54784 flags & VM_EXEC ? 'x' : '-',
54785 flags & VM_MAYSHARE ? 's' : 'p',
54786+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54787+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
54788+#else
54789 pgoff,
54790+#endif
54791 MAJOR(dev), MINOR(dev), ino, &len);
54792
54793 /*
54794@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
54795 */
54796 if (file) {
54797 pad_len_spaces(m, len);
54798- seq_path(m, &file->f_path, "\n");
54799+ seq_path(m, &file->f_path, "\n\\");
54800 goto done;
54801 }
54802
54803@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
54804 * Thread stack in /proc/PID/task/TID/maps or
54805 * the main process stack.
54806 */
54807- if (!is_pid || (vma->vm_start <= mm->start_stack &&
54808- vma->vm_end >= mm->start_stack)) {
54809+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
54810+ (vma->vm_start <= mm->start_stack &&
54811+ vma->vm_end >= mm->start_stack)) {
54812 name = "[stack]";
54813 } else {
54814 /* Thread stack in /proc/PID/maps */
54815@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
54816 struct proc_maps_private *priv = m->private;
54817 struct task_struct *task = priv->task;
54818
54819+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54820+ if (current->exec_id != m->exec_id) {
54821+ gr_log_badprocpid("maps");
54822+ return 0;
54823+ }
54824+#endif
54825+
54826 show_map_vma(m, vma, is_pid);
54827
54828 if (m->count < m->size) /* vma is copied successfully */
54829@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
54830 .private = &mss,
54831 };
54832
54833+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54834+ if (current->exec_id != m->exec_id) {
54835+ gr_log_badprocpid("smaps");
54836+ return 0;
54837+ }
54838+#endif
54839 memset(&mss, 0, sizeof mss);
54840- mss.vma = vma;
54841- /* mmap_sem is held in m_start */
54842- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54843- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54844-
54845+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54846+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
54847+#endif
54848+ mss.vma = vma;
54849+ /* mmap_sem is held in m_start */
54850+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54851+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54852+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54853+ }
54854+#endif
54855 show_map_vma(m, vma, is_pid);
54856
54857 seq_printf(m,
54858@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
54859 "KernelPageSize: %8lu kB\n"
54860 "MMUPageSize: %8lu kB\n"
54861 "Locked: %8lu kB\n",
54862+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54863+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
54864+#else
54865 (vma->vm_end - vma->vm_start) >> 10,
54866+#endif
54867 mss.resident >> 10,
54868 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
54869 mss.shared_clean >> 10,
54870@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
54871 int n;
54872 char buffer[50];
54873
54874+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54875+ if (current->exec_id != m->exec_id) {
54876+ gr_log_badprocpid("numa_maps");
54877+ return 0;
54878+ }
54879+#endif
54880+
54881 if (!mm)
54882 return 0;
54883
54884@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
54885 mpol_to_str(buffer, sizeof(buffer), pol);
54886 mpol_cond_put(pol);
54887
54888+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54889+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
54890+#else
54891 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
54892+#endif
54893
54894 if (file) {
54895 seq_printf(m, " file=");
54896- seq_path(m, &file->f_path, "\n\t= ");
54897+ seq_path(m, &file->f_path, "\n\t\\= ");
54898 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
54899 seq_printf(m, " heap");
54900 } else {
54901diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
54902index 1ccfa53..0848f95 100644
54903--- a/fs/proc/task_nommu.c
54904+++ b/fs/proc/task_nommu.c
54905@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54906 else
54907 bytes += kobjsize(mm);
54908
54909- if (current->fs && current->fs->users > 1)
54910+ if (current->fs && atomic_read(&current->fs->users) > 1)
54911 sbytes += kobjsize(current->fs);
54912 else
54913 bytes += kobjsize(current->fs);
54914@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
54915
54916 if (file) {
54917 pad_len_spaces(m, len);
54918- seq_path(m, &file->f_path, "");
54919+ seq_path(m, &file->f_path, "\n\\");
54920 } else if (mm) {
54921 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
54922
54923diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
54924index 16e8abb..2dcf914 100644
54925--- a/fs/quota/netlink.c
54926+++ b/fs/quota/netlink.c
54927@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
54928 void quota_send_warning(struct kqid qid, dev_t dev,
54929 const char warntype)
54930 {
54931- static atomic_t seq;
54932+ static atomic_unchecked_t seq;
54933 struct sk_buff *skb;
54934 void *msg_head;
54935 int ret;
54936@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
54937 "VFS: Not enough memory to send quota warning.\n");
54938 return;
54939 }
54940- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
54941+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
54942 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
54943 if (!msg_head) {
54944 printk(KERN_ERR
54945diff --git a/fs/readdir.c b/fs/readdir.c
54946index 5e69ef5..e5d9099 100644
54947--- a/fs/readdir.c
54948+++ b/fs/readdir.c
54949@@ -17,6 +17,7 @@
54950 #include <linux/security.h>
54951 #include <linux/syscalls.h>
54952 #include <linux/unistd.h>
54953+#include <linux/namei.h>
54954
54955 #include <asm/uaccess.h>
54956
54957@@ -67,6 +68,7 @@ struct old_linux_dirent {
54958
54959 struct readdir_callback {
54960 struct old_linux_dirent __user * dirent;
54961+ struct file * file;
54962 int result;
54963 };
54964
54965@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
54966 buf->result = -EOVERFLOW;
54967 return -EOVERFLOW;
54968 }
54969+
54970+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54971+ return 0;
54972+
54973 buf->result++;
54974 dirent = buf->dirent;
54975 if (!access_ok(VERIFY_WRITE, dirent,
54976@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
54977
54978 buf.result = 0;
54979 buf.dirent = dirent;
54980+ buf.file = f.file;
54981
54982 error = vfs_readdir(f.file, fillonedir, &buf);
54983 if (buf.result)
54984@@ -139,6 +146,7 @@ struct linux_dirent {
54985 struct getdents_callback {
54986 struct linux_dirent __user * current_dir;
54987 struct linux_dirent __user * previous;
54988+ struct file * file;
54989 int count;
54990 int error;
54991 };
54992@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
54993 buf->error = -EOVERFLOW;
54994 return -EOVERFLOW;
54995 }
54996+
54997+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54998+ return 0;
54999+
55000 dirent = buf->previous;
55001 if (dirent) {
55002 if (__put_user(offset, &dirent->d_off))
55003@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
55004 buf.previous = NULL;
55005 buf.count = count;
55006 buf.error = 0;
55007+ buf.file = f.file;
55008
55009 error = vfs_readdir(f.file, filldir, &buf);
55010 if (error >= 0)
55011@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
55012 struct getdents_callback64 {
55013 struct linux_dirent64 __user * current_dir;
55014 struct linux_dirent64 __user * previous;
55015+ struct file *file;
55016 int count;
55017 int error;
55018 };
55019@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
55020 buf->error = -EINVAL; /* only used if we fail.. */
55021 if (reclen > buf->count)
55022 return -EINVAL;
55023+
55024+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55025+ return 0;
55026+
55027 dirent = buf->previous;
55028 if (dirent) {
55029 if (__put_user(offset, &dirent->d_off))
55030@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
55031
55032 buf.current_dir = dirent;
55033 buf.previous = NULL;
55034+ buf.file = f.file;
55035 buf.count = count;
55036 buf.error = 0;
55037
55038@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
55039 error = buf.error;
55040 lastdirent = buf.previous;
55041 if (lastdirent) {
55042- typeof(lastdirent->d_off) d_off = f.file->f_pos;
55043+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
55044 if (__put_user(d_off, &lastdirent->d_off))
55045 error = -EFAULT;
55046 else
55047diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
55048index 2b7882b..1c5ef48 100644
55049--- a/fs/reiserfs/do_balan.c
55050+++ b/fs/reiserfs/do_balan.c
55051@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
55052 return;
55053 }
55054
55055- atomic_inc(&(fs_generation(tb->tb_sb)));
55056+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
55057 do_balance_starts(tb);
55058
55059 /* balance leaf returns 0 except if combining L R and S into
55060diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
55061index e60e870..f40ac16 100644
55062--- a/fs/reiserfs/procfs.c
55063+++ b/fs/reiserfs/procfs.c
55064@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
55065 "SMALL_TAILS " : "NO_TAILS ",
55066 replay_only(sb) ? "REPLAY_ONLY " : "",
55067 convert_reiserfs(sb) ? "CONV " : "",
55068- atomic_read(&r->s_generation_counter),
55069+ atomic_read_unchecked(&r->s_generation_counter),
55070 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
55071 SF(s_do_balance), SF(s_unneeded_left_neighbor),
55072 SF(s_good_search_by_key_reada), SF(s_bmaps),
55073diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
55074index 157e474..65a6114 100644
55075--- a/fs/reiserfs/reiserfs.h
55076+++ b/fs/reiserfs/reiserfs.h
55077@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
55078 /* Comment? -Hans */
55079 wait_queue_head_t s_wait;
55080 /* To be obsoleted soon by per buffer seals.. -Hans */
55081- atomic_t s_generation_counter; // increased by one every time the
55082+ atomic_unchecked_t s_generation_counter; // increased by one every time the
55083 // tree gets re-balanced
55084 unsigned long s_properties; /* File system properties. Currently holds
55085 on-disk FS format */
55086@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
55087 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
55088
55089 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
55090-#define get_generation(s) atomic_read (&fs_generation(s))
55091+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
55092 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
55093 #define __fs_changed(gen,s) (gen != get_generation (s))
55094 #define fs_changed(gen,s) \
55095diff --git a/fs/select.c b/fs/select.c
55096index 2ef72d9..f213b17 100644
55097--- a/fs/select.c
55098+++ b/fs/select.c
55099@@ -20,6 +20,7 @@
55100 #include <linux/export.h>
55101 #include <linux/slab.h>
55102 #include <linux/poll.h>
55103+#include <linux/security.h>
55104 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
55105 #include <linux/file.h>
55106 #include <linux/fdtable.h>
55107@@ -826,6 +827,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
55108 struct poll_list *walk = head;
55109 unsigned long todo = nfds;
55110
55111+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
55112 if (nfds > rlimit(RLIMIT_NOFILE))
55113 return -EINVAL;
55114
55115diff --git a/fs/seq_file.c b/fs/seq_file.c
55116index f2bc3df..239d4f6 100644
55117--- a/fs/seq_file.c
55118+++ b/fs/seq_file.c
55119@@ -10,6 +10,7 @@
55120 #include <linux/seq_file.h>
55121 #include <linux/slab.h>
55122 #include <linux/cred.h>
55123+#include <linux/sched.h>
55124
55125 #include <asm/uaccess.h>
55126 #include <asm/page.h>
55127@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
55128 #ifdef CONFIG_USER_NS
55129 p->user_ns = file->f_cred->user_ns;
55130 #endif
55131+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55132+ p->exec_id = current->exec_id;
55133+#endif
55134
55135 /*
55136 * Wrappers around seq_open(e.g. swaps_open) need to be
55137@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
55138 return 0;
55139 }
55140 if (!m->buf) {
55141- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
55142+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
55143 if (!m->buf)
55144 return -ENOMEM;
55145 }
55146@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
55147 Eoverflow:
55148 m->op->stop(m, p);
55149 kfree(m->buf);
55150- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
55151+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
55152 return !m->buf ? -ENOMEM : -EAGAIN;
55153 }
55154
55155@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
55156
55157 /* grab buffer if we didn't have one */
55158 if (!m->buf) {
55159- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
55160+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
55161 if (!m->buf)
55162 goto Enomem;
55163 }
55164@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
55165 goto Fill;
55166 m->op->stop(m, p);
55167 kfree(m->buf);
55168- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
55169+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
55170 if (!m->buf)
55171 goto Enomem;
55172 m->count = 0;
55173@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
55174 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
55175 void *data)
55176 {
55177- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
55178+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
55179 int res = -ENOMEM;
55180
55181 if (op) {
55182diff --git a/fs/splice.c b/fs/splice.c
55183index 6909d89..5b2e8f9 100644
55184--- a/fs/splice.c
55185+++ b/fs/splice.c
55186@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
55187 pipe_lock(pipe);
55188
55189 for (;;) {
55190- if (!pipe->readers) {
55191+ if (!atomic_read(&pipe->readers)) {
55192 send_sig(SIGPIPE, current, 0);
55193 if (!ret)
55194 ret = -EPIPE;
55195@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
55196 do_wakeup = 0;
55197 }
55198
55199- pipe->waiting_writers++;
55200+ atomic_inc(&pipe->waiting_writers);
55201 pipe_wait(pipe);
55202- pipe->waiting_writers--;
55203+ atomic_dec(&pipe->waiting_writers);
55204 }
55205
55206 pipe_unlock(pipe);
55207@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
55208 old_fs = get_fs();
55209 set_fs(get_ds());
55210 /* The cast to a user pointer is valid due to the set_fs() */
55211- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
55212+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
55213 set_fs(old_fs);
55214
55215 return res;
55216@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
55217 old_fs = get_fs();
55218 set_fs(get_ds());
55219 /* The cast to a user pointer is valid due to the set_fs() */
55220- res = vfs_write(file, (const char __user *)buf, count, &pos);
55221+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
55222 set_fs(old_fs);
55223
55224 return res;
55225@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
55226 goto err;
55227
55228 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
55229- vec[i].iov_base = (void __user *) page_address(page);
55230+ vec[i].iov_base = (void __force_user *) page_address(page);
55231 vec[i].iov_len = this_len;
55232 spd.pages[i] = page;
55233 spd.nr_pages++;
55234@@ -851,10 +851,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
55235 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
55236 {
55237 while (!pipe->nrbufs) {
55238- if (!pipe->writers)
55239+ if (!atomic_read(&pipe->writers))
55240 return 0;
55241
55242- if (!pipe->waiting_writers && sd->num_spliced)
55243+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
55244 return 0;
55245
55246 if (sd->flags & SPLICE_F_NONBLOCK)
55247@@ -1189,7 +1189,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
55248 * out of the pipe right after the splice_to_pipe(). So set
55249 * PIPE_READERS appropriately.
55250 */
55251- pipe->readers = 1;
55252+ atomic_set(&pipe->readers, 1);
55253
55254 current->splice_pipe = pipe;
55255 }
55256@@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55257 ret = -ERESTARTSYS;
55258 break;
55259 }
55260- if (!pipe->writers)
55261+ if (!atomic_read(&pipe->writers))
55262 break;
55263- if (!pipe->waiting_writers) {
55264+ if (!atomic_read(&pipe->waiting_writers)) {
55265 if (flags & SPLICE_F_NONBLOCK) {
55266 ret = -EAGAIN;
55267 break;
55268@@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55269 pipe_lock(pipe);
55270
55271 while (pipe->nrbufs >= pipe->buffers) {
55272- if (!pipe->readers) {
55273+ if (!atomic_read(&pipe->readers)) {
55274 send_sig(SIGPIPE, current, 0);
55275 ret = -EPIPE;
55276 break;
55277@@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55278 ret = -ERESTARTSYS;
55279 break;
55280 }
55281- pipe->waiting_writers++;
55282+ atomic_inc(&pipe->waiting_writers);
55283 pipe_wait(pipe);
55284- pipe->waiting_writers--;
55285+ atomic_dec(&pipe->waiting_writers);
55286 }
55287
55288 pipe_unlock(pipe);
55289@@ -1823,14 +1823,14 @@ retry:
55290 pipe_double_lock(ipipe, opipe);
55291
55292 do {
55293- if (!opipe->readers) {
55294+ if (!atomic_read(&opipe->readers)) {
55295 send_sig(SIGPIPE, current, 0);
55296 if (!ret)
55297 ret = -EPIPE;
55298 break;
55299 }
55300
55301- if (!ipipe->nrbufs && !ipipe->writers)
55302+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
55303 break;
55304
55305 /*
55306@@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55307 pipe_double_lock(ipipe, opipe);
55308
55309 do {
55310- if (!opipe->readers) {
55311+ if (!atomic_read(&opipe->readers)) {
55312 send_sig(SIGPIPE, current, 0);
55313 if (!ret)
55314 ret = -EPIPE;
55315@@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55316 * return EAGAIN if we have the potential of some data in the
55317 * future, otherwise just return 0
55318 */
55319- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
55320+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
55321 ret = -EAGAIN;
55322
55323 pipe_unlock(ipipe);
55324diff --git a/fs/stat.c b/fs/stat.c
55325index 14f4545..9b7f55b 100644
55326--- a/fs/stat.c
55327+++ b/fs/stat.c
55328@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
55329 stat->gid = inode->i_gid;
55330 stat->rdev = inode->i_rdev;
55331 stat->size = i_size_read(inode);
55332- stat->atime = inode->i_atime;
55333- stat->mtime = inode->i_mtime;
55334+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
55335+ stat->atime = inode->i_ctime;
55336+ stat->mtime = inode->i_ctime;
55337+ } else {
55338+ stat->atime = inode->i_atime;
55339+ stat->mtime = inode->i_mtime;
55340+ }
55341 stat->ctime = inode->i_ctime;
55342 stat->blksize = (1 << inode->i_blkbits);
55343 stat->blocks = inode->i_blocks;
55344@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
55345 if (retval)
55346 return retval;
55347
55348- if (inode->i_op->getattr)
55349- return inode->i_op->getattr(mnt, dentry, stat);
55350+ if (inode->i_op->getattr) {
55351+ retval = inode->i_op->getattr(mnt, dentry, stat);
55352+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
55353+ stat->atime = stat->ctime;
55354+ stat->mtime = stat->ctime;
55355+ }
55356+ return retval;
55357+ }
55358
55359 generic_fillattr(inode, stat);
55360 return 0;
55361diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
55362index 2fbdff6..5530a61 100644
55363--- a/fs/sysfs/dir.c
55364+++ b/fs/sysfs/dir.c
55365@@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
55366 struct sysfs_dirent *sd;
55367 int rc;
55368
55369+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
55370+ const char *parent_name = parent_sd->s_name;
55371+
55372+ mode = S_IFDIR | S_IRWXU;
55373+
55374+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
55375+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
55376+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
55377+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
55378+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
55379+#endif
55380+
55381 /* allocate */
55382 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
55383 if (!sd)
55384diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
55385index 602f56d..6853db8 100644
55386--- a/fs/sysfs/file.c
55387+++ b/fs/sysfs/file.c
55388@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
55389
55390 struct sysfs_open_dirent {
55391 atomic_t refcnt;
55392- atomic_t event;
55393+ atomic_unchecked_t event;
55394 wait_queue_head_t poll;
55395 struct list_head buffers; /* goes through sysfs_buffer.list */
55396 };
55397@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
55398 if (!sysfs_get_active(attr_sd))
55399 return -ENODEV;
55400
55401- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
55402+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
55403 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
55404
55405 sysfs_put_active(attr_sd);
55406@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
55407 return -ENOMEM;
55408
55409 atomic_set(&new_od->refcnt, 0);
55410- atomic_set(&new_od->event, 1);
55411+ atomic_set_unchecked(&new_od->event, 1);
55412 init_waitqueue_head(&new_od->poll);
55413 INIT_LIST_HEAD(&new_od->buffers);
55414 goto retry;
55415@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
55416
55417 sysfs_put_active(attr_sd);
55418
55419- if (buffer->event != atomic_read(&od->event))
55420+ if (buffer->event != atomic_read_unchecked(&od->event))
55421 goto trigger;
55422
55423 return DEFAULT_POLLMASK;
55424@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
55425
55426 od = sd->s_attr.open;
55427 if (od) {
55428- atomic_inc(&od->event);
55429+ atomic_inc_unchecked(&od->event);
55430 wake_up_interruptible(&od->poll);
55431 }
55432
55433diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
55434index 3c9eb56..9dea5be 100644
55435--- a/fs/sysfs/symlink.c
55436+++ b/fs/sysfs/symlink.c
55437@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
55438
55439 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
55440 {
55441- char *page = nd_get_link(nd);
55442+ const char *page = nd_get_link(nd);
55443 if (!IS_ERR(page))
55444 free_page((unsigned long)page);
55445 }
55446diff --git a/fs/udf/misc.c b/fs/udf/misc.c
55447index c175b4d..8f36a16 100644
55448--- a/fs/udf/misc.c
55449+++ b/fs/udf/misc.c
55450@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
55451
55452 u8 udf_tag_checksum(const struct tag *t)
55453 {
55454- u8 *data = (u8 *)t;
55455+ const u8 *data = (const u8 *)t;
55456 u8 checksum = 0;
55457 int i;
55458 for (i = 0; i < sizeof(struct tag); ++i)
55459diff --git a/fs/utimes.c b/fs/utimes.c
55460index f4fb7ec..3fe03c0 100644
55461--- a/fs/utimes.c
55462+++ b/fs/utimes.c
55463@@ -1,6 +1,7 @@
55464 #include <linux/compiler.h>
55465 #include <linux/file.h>
55466 #include <linux/fs.h>
55467+#include <linux/security.h>
55468 #include <linux/linkage.h>
55469 #include <linux/mount.h>
55470 #include <linux/namei.h>
55471@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
55472 goto mnt_drop_write_and_out;
55473 }
55474 }
55475+
55476+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
55477+ error = -EACCES;
55478+ goto mnt_drop_write_and_out;
55479+ }
55480+
55481 mutex_lock(&inode->i_mutex);
55482 error = notify_change(path->dentry, &newattrs);
55483 mutex_unlock(&inode->i_mutex);
55484diff --git a/fs/xattr.c b/fs/xattr.c
55485index 3377dff..4feded6 100644
55486--- a/fs/xattr.c
55487+++ b/fs/xattr.c
55488@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
55489 * Extended attribute SET operations
55490 */
55491 static long
55492-setxattr(struct dentry *d, const char __user *name, const void __user *value,
55493+setxattr(struct path *path, const char __user *name, const void __user *value,
55494 size_t size, int flags)
55495 {
55496 int error;
55497@@ -355,7 +355,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
55498 posix_acl_fix_xattr_from_user(kvalue, size);
55499 }
55500
55501- error = vfs_setxattr(d, kname, kvalue, size, flags);
55502+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
55503+ error = -EACCES;
55504+ goto out;
55505+ }
55506+
55507+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
55508 out:
55509 if (vvalue)
55510 vfree(vvalue);
55511@@ -377,7 +382,7 @@ retry:
55512 return error;
55513 error = mnt_want_write(path.mnt);
55514 if (!error) {
55515- error = setxattr(path.dentry, name, value, size, flags);
55516+ error = setxattr(&path, name, value, size, flags);
55517 mnt_drop_write(path.mnt);
55518 }
55519 path_put(&path);
55520@@ -401,7 +406,7 @@ retry:
55521 return error;
55522 error = mnt_want_write(path.mnt);
55523 if (!error) {
55524- error = setxattr(path.dentry, name, value, size, flags);
55525+ error = setxattr(&path, name, value, size, flags);
55526 mnt_drop_write(path.mnt);
55527 }
55528 path_put(&path);
55529@@ -416,16 +421,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
55530 const void __user *,value, size_t, size, int, flags)
55531 {
55532 struct fd f = fdget(fd);
55533- struct dentry *dentry;
55534 int error = -EBADF;
55535
55536 if (!f.file)
55537 return error;
55538- dentry = f.file->f_path.dentry;
55539- audit_inode(NULL, dentry, 0);
55540+ audit_inode(NULL, f.file->f_path.dentry, 0);
55541 error = mnt_want_write_file(f.file);
55542 if (!error) {
55543- error = setxattr(dentry, name, value, size, flags);
55544+ error = setxattr(&f.file->f_path, name, value, size, flags);
55545 mnt_drop_write_file(f.file);
55546 }
55547 fdput(f);
55548diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
55549index 9fbea87..6b19972 100644
55550--- a/fs/xattr_acl.c
55551+++ b/fs/xattr_acl.c
55552@@ -76,8 +76,8 @@ struct posix_acl *
55553 posix_acl_from_xattr(struct user_namespace *user_ns,
55554 const void *value, size_t size)
55555 {
55556- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
55557- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
55558+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
55559+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
55560 int count;
55561 struct posix_acl *acl;
55562 struct posix_acl_entry *acl_e;
55563diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
55564index 572a858..12a9b0d 100644
55565--- a/fs/xfs/xfs_bmap.c
55566+++ b/fs/xfs/xfs_bmap.c
55567@@ -192,7 +192,7 @@ xfs_bmap_validate_ret(
55568 int nmap,
55569 int ret_nmap);
55570 #else
55571-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
55572+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
55573 #endif /* DEBUG */
55574
55575 STATIC int
55576diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
55577index 1b9fc3e..e1bdde0 100644
55578--- a/fs/xfs/xfs_dir2_sf.c
55579+++ b/fs/xfs/xfs_dir2_sf.c
55580@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
55581 }
55582
55583 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
55584- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
55585+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
55586+ char name[sfep->namelen];
55587+ memcpy(name, sfep->name, sfep->namelen);
55588+ if (filldir(dirent, name, sfep->namelen,
55589+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
55590+ *offset = off & 0x7fffffff;
55591+ return 0;
55592+ }
55593+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
55594 off & 0x7fffffff, ino, DT_UNKNOWN)) {
55595 *offset = off & 0x7fffffff;
55596 return 0;
55597diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
55598index c1c3ef8..0952438 100644
55599--- a/fs/xfs/xfs_ioctl.c
55600+++ b/fs/xfs/xfs_ioctl.c
55601@@ -127,7 +127,7 @@ xfs_find_handle(
55602 }
55603
55604 error = -EFAULT;
55605- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
55606+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
55607 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
55608 goto out_put;
55609
55610diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
55611index d82efaa..0904a8e 100644
55612--- a/fs/xfs/xfs_iops.c
55613+++ b/fs/xfs/xfs_iops.c
55614@@ -395,7 +395,7 @@ xfs_vn_put_link(
55615 struct nameidata *nd,
55616 void *p)
55617 {
55618- char *s = nd_get_link(nd);
55619+ const char *s = nd_get_link(nd);
55620
55621 if (!IS_ERR(s))
55622 kfree(s);
55623diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
55624new file mode 100644
55625index 0000000..92247e4
55626--- /dev/null
55627+++ b/grsecurity/Kconfig
55628@@ -0,0 +1,1021 @@
55629+#
55630+# grecurity configuration
55631+#
55632+menu "Memory Protections"
55633+depends on GRKERNSEC
55634+
55635+config GRKERNSEC_KMEM
55636+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55637+ default y if GRKERNSEC_CONFIG_AUTO
55638+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55639+ help
55640+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55641+ be written to or read from to modify or leak the contents of the running
55642+ kernel. /dev/port will also not be allowed to be opened and support
55643+ for /dev/cpu/*/msr will be removed. If you have module
55644+ support disabled, enabling this will close up five ways that are
55645+ currently used to insert malicious code into the running kernel.
55646+
55647+ Even with all these features enabled, we still highly recommend that
55648+ you use the RBAC system, as it is still possible for an attacker to
55649+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55650+
55651+ If you are not using XFree86, you may be able to stop this additional
55652+ case by enabling the 'Disable privileged I/O' option. Though nothing
55653+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55654+ but only to video memory, which is the only writing we allow in this
55655+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55656+ not be allowed to mprotect it with PROT_WRITE later.
55657+ Enabling this feature will prevent the "cpupower" and "powertop" tools
55658+ from working.
55659+
55660+ It is highly recommended that you say Y here if you meet all the
55661+ conditions above.
55662+
55663+config GRKERNSEC_VM86
55664+ bool "Restrict VM86 mode"
55665+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
55666+ depends on X86_32
55667+
55668+ help
55669+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55670+ make use of a special execution mode on 32bit x86 processors called
55671+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55672+ video cards and will still work with this option enabled. The purpose
55673+ of the option is to prevent exploitation of emulation errors in
55674+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55675+ Nearly all users should be able to enable this option.
55676+
55677+config GRKERNSEC_IO
55678+ bool "Disable privileged I/O"
55679+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
55680+ depends on X86
55681+ select RTC_CLASS
55682+ select RTC_INTF_DEV
55683+ select RTC_DRV_CMOS
55684+
55685+ help
55686+ If you say Y here, all ioperm and iopl calls will return an error.
55687+ Ioperm and iopl can be used to modify the running kernel.
55688+ Unfortunately, some programs need this access to operate properly,
55689+ the most notable of which are XFree86 and hwclock. hwclock can be
55690+ remedied by having RTC support in the kernel, so real-time
55691+ clock support is enabled if this option is enabled, to ensure
55692+ that hwclock operates correctly. XFree86 still will not
55693+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55694+ IF YOU USE XFree86. If you use XFree86 and you still want to
55695+ protect your kernel against modification, use the RBAC system.
55696+
55697+config GRKERNSEC_JIT_HARDEN
55698+ bool "Harden BPF JIT against spray attacks"
55699+ default y if GRKERNSEC_CONFIG_AUTO
55700+ depends on BPF_JIT
55701+ help
55702+ If you say Y here, the native code generated by the kernel's Berkeley
55703+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
55704+ attacks that attempt to fit attacker-beneficial instructions in
55705+ 32bit immediate fields of JIT-generated native instructions. The
55706+ attacker will generally aim to cause an unintended instruction sequence
55707+ of JIT-generated native code to execute by jumping into the middle of
55708+ a generated instruction. This feature effectively randomizes the 32bit
55709+ immediate constants present in the generated code to thwart such attacks.
55710+
55711+ If you're using KERNEXEC, it's recommended that you enable this option
55712+ to supplement the hardening of the kernel.
55713+
55714+config GRKERNSEC_RAND_THREADSTACK
55715+ bool "Insert random gaps between thread stacks"
55716+ default y if GRKERNSEC_CONFIG_AUTO
55717+ depends on PAX_RANDMMAP && !PPC
55718+ help
55719+ If you say Y here, a random-sized gap will be enforced between allocated
55720+ thread stacks. Glibc's NPTL and other threading libraries that
55721+ pass MAP_STACK to the kernel for thread stack allocation are supported.
55722+ The implementation currently provides 8 bits of entropy for the gap.
55723+
55724+ Many distributions do not compile threaded remote services with the
55725+ -fstack-check argument to GCC, causing the variable-sized stack-based
55726+ allocator, alloca(), to not probe the stack on allocation. This
55727+ permits an unbounded alloca() to skip over any guard page and potentially
55728+ modify another thread's stack reliably. An enforced random gap
55729+ reduces the reliability of such an attack and increases the chance
55730+ that such a read/write to another thread's stack instead lands in
55731+ an unmapped area, causing a crash and triggering grsecurity's
55732+ anti-bruteforcing logic.
55733+
55734+config GRKERNSEC_PROC_MEMMAP
55735+ bool "Harden ASLR against information leaks and entropy reduction"
55736+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
55737+ depends on PAX_NOEXEC || PAX_ASLR
55738+ help
55739+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55740+ give no information about the addresses of its mappings if
55741+ PaX features that rely on random addresses are enabled on the task.
55742+ In addition to sanitizing this information and disabling other
55743+ dangerous sources of information, this option causes reads of sensitive
55744+ /proc/<pid> entries where the file descriptor was opened in a different
55745+ task than the one performing the read. Such attempts are logged.
55746+ This option also limits argv/env strings for suid/sgid binaries
55747+ to 512KB to prevent a complete exhaustion of the stack entropy provided
55748+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
55749+ binaries to prevent alternative mmap layouts from being abused.
55750+
55751+ If you use PaX it is essential that you say Y here as it closes up
55752+ several holes that make full ASLR useless locally.
55753+
55754+config GRKERNSEC_BRUTE
55755+ bool "Deter exploit bruteforcing"
55756+ default y if GRKERNSEC_CONFIG_AUTO
55757+ help
55758+ If you say Y here, attempts to bruteforce exploits against forking
55759+ daemons such as apache or sshd, as well as against suid/sgid binaries
55760+ will be deterred. When a child of a forking daemon is killed by PaX
55761+ or crashes due to an illegal instruction or other suspicious signal,
55762+ the parent process will be delayed 30 seconds upon every subsequent
55763+ fork until the administrator is able to assess the situation and
55764+ restart the daemon.
55765+ In the suid/sgid case, the attempt is logged, the user has all their
55766+ processes terminated, and they are prevented from executing any further
55767+ processes for 15 minutes.
55768+ It is recommended that you also enable signal logging in the auditing
55769+ section so that logs are generated when a process triggers a suspicious
55770+ signal.
55771+ If the sysctl option is enabled, a sysctl option with name
55772+ "deter_bruteforce" is created.
55773+
55774+
55775+config GRKERNSEC_MODHARDEN
55776+ bool "Harden module auto-loading"
55777+ default y if GRKERNSEC_CONFIG_AUTO
55778+ depends on MODULES
55779+ help
55780+ If you say Y here, module auto-loading in response to use of some
55781+ feature implemented by an unloaded module will be restricted to
55782+ root users. Enabling this option helps defend against attacks
55783+ by unprivileged users who abuse the auto-loading behavior to
55784+ cause a vulnerable module to load that is then exploited.
55785+
55786+ If this option prevents a legitimate use of auto-loading for a
55787+ non-root user, the administrator can execute modprobe manually
55788+ with the exact name of the module mentioned in the alert log.
55789+ Alternatively, the administrator can add the module to the list
55790+ of modules loaded at boot by modifying init scripts.
55791+
55792+ Modification of init scripts will most likely be needed on
55793+ Ubuntu servers with encrypted home directory support enabled,
55794+ as the first non-root user logging in will cause the ecb(aes),
55795+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55796+
55797+config GRKERNSEC_HIDESYM
55798+ bool "Hide kernel symbols"
55799+ default y if GRKERNSEC_CONFIG_AUTO
55800+ select PAX_USERCOPY_SLABS
55801+ help
55802+ If you say Y here, getting information on loaded modules, and
55803+ displaying all kernel symbols through a syscall will be restricted
55804+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55805+ /proc/kallsyms will be restricted to the root user. The RBAC
55806+ system can hide that entry even from root.
55807+
55808+ This option also prevents leaking of kernel addresses through
55809+ several /proc entries.
55810+
55811+ Note that this option is only effective provided the following
55812+ conditions are met:
55813+ 1) The kernel using grsecurity is not precompiled by some distribution
55814+ 2) You have also enabled GRKERNSEC_DMESG
55815+ 3) You are using the RBAC system and hiding other files such as your
55816+ kernel image and System.map. Alternatively, enabling this option
55817+ causes the permissions on /boot, /lib/modules, and the kernel
55818+ source directory to change at compile time to prevent
55819+ reading by non-root users.
55820+ If the above conditions are met, this option will aid in providing a
55821+ useful protection against local kernel exploitation of overflows
55822+ and arbitrary read/write vulnerabilities.
55823+
55824+config GRKERNSEC_KERN_LOCKOUT
55825+ bool "Active kernel exploit response"
55826+ default y if GRKERNSEC_CONFIG_AUTO
55827+ depends on X86 || ARM || PPC || SPARC
55828+ help
55829+ If you say Y here, when a PaX alert is triggered due to suspicious
55830+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55831+ or an OOPS occurs due to bad memory accesses, instead of just
55832+ terminating the offending process (and potentially allowing
55833+ a subsequent exploit from the same user), we will take one of two
55834+ actions:
55835+ If the user was root, we will panic the system
55836+ If the user was non-root, we will log the attempt, terminate
55837+ all processes owned by the user, then prevent them from creating
55838+ any new processes until the system is restarted
55839+ This deters repeated kernel exploitation/bruteforcing attempts
55840+ and is useful for later forensics.
55841+
55842+endmenu
55843+menu "Role Based Access Control Options"
55844+depends on GRKERNSEC
55845+
55846+config GRKERNSEC_RBAC_DEBUG
55847+ bool
55848+
55849+config GRKERNSEC_NO_RBAC
55850+ bool "Disable RBAC system"
55851+ help
55852+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55853+ preventing the RBAC system from being enabled. You should only say Y
55854+ here if you have no intention of using the RBAC system, so as to prevent
55855+ an attacker with root access from misusing the RBAC system to hide files
55856+ and processes when loadable module support and /dev/[k]mem have been
55857+ locked down.
55858+
55859+config GRKERNSEC_ACL_HIDEKERN
55860+ bool "Hide kernel processes"
55861+ help
55862+ If you say Y here, all kernel threads will be hidden to all
55863+ processes but those whose subject has the "view hidden processes"
55864+ flag.
55865+
55866+config GRKERNSEC_ACL_MAXTRIES
55867+ int "Maximum tries before password lockout"
55868+ default 3
55869+ help
55870+ This option enforces the maximum number of times a user can attempt
55871+ to authorize themselves with the grsecurity RBAC system before being
55872+ denied the ability to attempt authorization again for a specified time.
55873+ The lower the number, the harder it will be to brute-force a password.
55874+
55875+config GRKERNSEC_ACL_TIMEOUT
55876+ int "Time to wait after max password tries, in seconds"
55877+ default 30
55878+ help
55879+ This option specifies the time the user must wait after attempting to
55880+ authorize to the RBAC system with the maximum number of invalid
55881+ passwords. The higher the number, the harder it will be to brute-force
55882+ a password.
55883+
55884+endmenu
55885+menu "Filesystem Protections"
55886+depends on GRKERNSEC
55887+
55888+config GRKERNSEC_PROC
55889+ bool "Proc restrictions"
55890+ default y if GRKERNSEC_CONFIG_AUTO
55891+ help
55892+ If you say Y here, the permissions of the /proc filesystem
55893+ will be altered to enhance system security and privacy. You MUST
55894+ choose either a user only restriction or a user and group restriction.
55895+ Depending upon the option you choose, you can either restrict users to
55896+ see only the processes they themselves run, or choose a group that can
55897+ view all processes and files normally restricted to root if you choose
55898+ the "restrict to user only" option. NOTE: If you're running identd or
55899+ ntpd as a non-root user, you will have to run it as the group you
55900+ specify here.
55901+
55902+config GRKERNSEC_PROC_USER
55903+ bool "Restrict /proc to user only"
55904+ depends on GRKERNSEC_PROC
55905+ help
55906+ If you say Y here, non-root users will only be able to view their own
55907+ processes, and restricts them from viewing network-related information,
55908+ and viewing kernel symbol and module information.
55909+
55910+config GRKERNSEC_PROC_USERGROUP
55911+ bool "Allow special group"
55912+ default y if GRKERNSEC_CONFIG_AUTO
55913+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55914+ help
55915+ If you say Y here, you will be able to select a group that will be
55916+ able to view all processes and network-related information. If you've
55917+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55918+ remain hidden. This option is useful if you want to run identd as
55919+ a non-root user. The group you select may also be chosen at boot time
55920+ via "grsec_proc_gid=" on the kernel commandline.
55921+
55922+config GRKERNSEC_PROC_GID
55923+ int "GID for special group"
55924+ depends on GRKERNSEC_PROC_USERGROUP
55925+ default 1001
55926+
55927+config GRKERNSEC_PROC_ADD
55928+ bool "Additional restrictions"
55929+ default y if GRKERNSEC_CONFIG_AUTO
55930+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55931+ help
55932+ If you say Y here, additional restrictions will be placed on
55933+ /proc that keep normal users from viewing device information and
55934+ slabinfo information that could be useful for exploits.
55935+
55936+config GRKERNSEC_LINK
55937+ bool "Linking restrictions"
55938+ default y if GRKERNSEC_CONFIG_AUTO
55939+ help
55940+ If you say Y here, /tmp race exploits will be prevented, since users
55941+ will no longer be able to follow symlinks owned by other users in
55942+ world-writable +t directories (e.g. /tmp), unless the owner of the
55943+ symlink is the owner of the directory. users will also not be
55944+ able to hardlink to files they do not own. If the sysctl option is
55945+ enabled, a sysctl option with name "linking_restrictions" is created.
55946+
55947+config GRKERNSEC_SYMLINKOWN
55948+ bool "Kernel-enforced SymlinksIfOwnerMatch"
55949+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
55950+ help
55951+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
55952+ that prevents it from being used as a security feature. As Apache
55953+ verifies the symlink by performing a stat() against the target of
55954+ the symlink before it is followed, an attacker can setup a symlink
55955+ to point to a same-owned file, then replace the symlink with one
55956+ that targets another user's file just after Apache "validates" the
55957+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
55958+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
55959+ will be in place for the group you specify. If the sysctl option
55960+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
55961+ created.
55962+
55963+config GRKERNSEC_SYMLINKOWN_GID
55964+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
55965+ depends on GRKERNSEC_SYMLINKOWN
55966+ default 1006
55967+ help
55968+ Setting this GID determines what group kernel-enforced
55969+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
55970+ is enabled, a sysctl option with name "symlinkown_gid" is created.
55971+
55972+config GRKERNSEC_FIFO
55973+ bool "FIFO restrictions"
55974+ default y if GRKERNSEC_CONFIG_AUTO
55975+ help
55976+ If you say Y here, users will not be able to write to FIFOs they don't
55977+ own in world-writable +t directories (e.g. /tmp), unless the owner of
55978+ the FIFO is the same owner of the directory it's held in. If the sysctl
55979+ option is enabled, a sysctl option with name "fifo_restrictions" is
55980+ created.
55981+
55982+config GRKERNSEC_SYSFS_RESTRICT
55983+ bool "Sysfs/debugfs restriction"
55984+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
55985+ depends on SYSFS
55986+ help
55987+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55988+ any filesystem normally mounted under it (e.g. debugfs) will be
55989+ mostly accessible only by root. These filesystems generally provide access
55990+ to hardware and debug information that isn't appropriate for unprivileged
55991+ users of the system. Sysfs and debugfs have also become a large source
55992+ of new vulnerabilities, ranging from infoleaks to local compromise.
55993+ There has been very little oversight with an eye toward security involved
55994+ in adding new exporters of information to these filesystems, so their
55995+ use is discouraged.
55996+ For reasons of compatibility, a few directories have been whitelisted
55997+ for access by non-root users:
55998+ /sys/fs/selinux
55999+ /sys/fs/fuse
56000+ /sys/devices/system/cpu
56001+
56002+config GRKERNSEC_ROFS
56003+ bool "Runtime read-only mount protection"
56004+ help
56005+ If you say Y here, a sysctl option with name "romount_protect" will
56006+ be created. By setting this option to 1 at runtime, filesystems
56007+ will be protected in the following ways:
56008+ * No new writable mounts will be allowed
56009+ * Existing read-only mounts won't be able to be remounted read/write
56010+ * Write operations will be denied on all block devices
56011+ This option acts independently of grsec_lock: once it is set to 1,
56012+ it cannot be turned off. Therefore, please be mindful of the resulting
56013+ behavior if this option is enabled in an init script on a read-only
56014+ filesystem. This feature is mainly intended for secure embedded systems.
56015+
56016+config GRKERNSEC_DEVICE_SIDECHANNEL
56017+ bool "Eliminate stat/notify-based device sidechannels"
56018+ default y if GRKERNSEC_CONFIG_AUTO
56019+ help
56020+ If you say Y here, timing analyses on block or character
56021+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
56022+ will be thwarted for unprivileged users. If a process without
56023+ CAP_MKNOD stats such a device, the last access and last modify times
56024+ will match the device's create time. No access or modify events
56025+ will be triggered through inotify/dnotify/fanotify for such devices.
56026+ This feature will prevent attacks that may at a minimum
56027+ allow an attacker to determine the administrator's password length.
56028+
56029+config GRKERNSEC_CHROOT
56030+ bool "Chroot jail restrictions"
56031+ default y if GRKERNSEC_CONFIG_AUTO
56032+ help
56033+ If you say Y here, you will be able to choose several options that will
56034+ make breaking out of a chrooted jail much more difficult. If you
56035+ encounter no software incompatibilities with the following options, it
56036+ is recommended that you enable each one.
56037+
56038+config GRKERNSEC_CHROOT_MOUNT
56039+ bool "Deny mounts"
56040+ default y if GRKERNSEC_CONFIG_AUTO
56041+ depends on GRKERNSEC_CHROOT
56042+ help
56043+ If you say Y here, processes inside a chroot will not be able to
56044+ mount or remount filesystems. If the sysctl option is enabled, a
56045+ sysctl option with name "chroot_deny_mount" is created.
56046+
56047+config GRKERNSEC_CHROOT_DOUBLE
56048+ bool "Deny double-chroots"
56049+ default y if GRKERNSEC_CONFIG_AUTO
56050+ depends on GRKERNSEC_CHROOT
56051+ help
56052+ If you say Y here, processes inside a chroot will not be able to chroot
56053+ again outside the chroot. This is a widely used method of breaking
56054+ out of a chroot jail and should not be allowed. If the sysctl
56055+ option is enabled, a sysctl option with name
56056+ "chroot_deny_chroot" is created.
56057+
56058+config GRKERNSEC_CHROOT_PIVOT
56059+ bool "Deny pivot_root in chroot"
56060+ default y if GRKERNSEC_CONFIG_AUTO
56061+ depends on GRKERNSEC_CHROOT
56062+ help
56063+ If you say Y here, processes inside a chroot will not be able to use
56064+ a function called pivot_root() that was introduced in Linux 2.3.41. It
56065+ works similar to chroot in that it changes the root filesystem. This
56066+ function could be misused in a chrooted process to attempt to break out
56067+ of the chroot, and therefore should not be allowed. If the sysctl
56068+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
56069+ created.
56070+
56071+config GRKERNSEC_CHROOT_CHDIR
56072+ bool "Enforce chdir(\"/\") on all chroots"
56073+ default y if GRKERNSEC_CONFIG_AUTO
56074+ depends on GRKERNSEC_CHROOT
56075+ help
56076+ If you say Y here, the current working directory of all newly-chrooted
56077+ applications will be set to the the root directory of the chroot.
56078+ The man page on chroot(2) states:
56079+ Note that this call does not change the current working
56080+ directory, so that `.' can be outside the tree rooted at
56081+ `/'. In particular, the super-user can escape from a
56082+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56083+
56084+ It is recommended that you say Y here, since it's not known to break
56085+ any software. If the sysctl option is enabled, a sysctl option with
56086+ name "chroot_enforce_chdir" is created.
56087+
56088+config GRKERNSEC_CHROOT_CHMOD
56089+ bool "Deny (f)chmod +s"
56090+ default y if GRKERNSEC_CONFIG_AUTO
56091+ depends on GRKERNSEC_CHROOT
56092+ help
56093+ If you say Y here, processes inside a chroot will not be able to chmod
56094+ or fchmod files to make them have suid or sgid bits. This protects
56095+ against another published method of breaking a chroot. If the sysctl
56096+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
56097+ created.
56098+
56099+config GRKERNSEC_CHROOT_FCHDIR
56100+ bool "Deny fchdir out of chroot"
56101+ default y if GRKERNSEC_CONFIG_AUTO
56102+ depends on GRKERNSEC_CHROOT
56103+ help
56104+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
56105+ to a file descriptor of the chrooting process that points to a directory
56106+ outside the filesystem will be stopped. If the sysctl option
56107+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56108+
56109+config GRKERNSEC_CHROOT_MKNOD
56110+ bool "Deny mknod"
56111+ default y if GRKERNSEC_CONFIG_AUTO
56112+ depends on GRKERNSEC_CHROOT
56113+ help
56114+ If you say Y here, processes inside a chroot will not be allowed to
56115+ mknod. The problem with using mknod inside a chroot is that it
56116+ would allow an attacker to create a device entry that is the same
56117+ as one on the physical root of your system, which could range from
56118+ anything from the console device to a device for your harddrive (which
56119+ they could then use to wipe the drive or steal data). It is recommended
56120+ that you say Y here, unless you run into software incompatibilities.
56121+ If the sysctl option is enabled, a sysctl option with name
56122+ "chroot_deny_mknod" is created.
56123+
56124+config GRKERNSEC_CHROOT_SHMAT
56125+ bool "Deny shmat() out of chroot"
56126+ default y if GRKERNSEC_CONFIG_AUTO
56127+ depends on GRKERNSEC_CHROOT
56128+ help
56129+ If you say Y here, processes inside a chroot will not be able to attach
56130+ to shared memory segments that were created outside of the chroot jail.
56131+ It is recommended that you say Y here. If the sysctl option is enabled,
56132+ a sysctl option with name "chroot_deny_shmat" is created.
56133+
56134+config GRKERNSEC_CHROOT_UNIX
56135+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
56136+ default y if GRKERNSEC_CONFIG_AUTO
56137+ depends on GRKERNSEC_CHROOT
56138+ help
56139+ If you say Y here, processes inside a chroot will not be able to
56140+ connect to abstract (meaning not belonging to a filesystem) Unix
56141+ domain sockets that were bound outside of a chroot. It is recommended
56142+ that you say Y here. If the sysctl option is enabled, a sysctl option
56143+ with name "chroot_deny_unix" is created.
56144+
56145+config GRKERNSEC_CHROOT_FINDTASK
56146+ bool "Protect outside processes"
56147+ default y if GRKERNSEC_CONFIG_AUTO
56148+ depends on GRKERNSEC_CHROOT
56149+ help
56150+ If you say Y here, processes inside a chroot will not be able to
56151+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56152+ getsid, or view any process outside of the chroot. If the sysctl
56153+ option is enabled, a sysctl option with name "chroot_findtask" is
56154+ created.
56155+
56156+config GRKERNSEC_CHROOT_NICE
56157+ bool "Restrict priority changes"
56158+ default y if GRKERNSEC_CONFIG_AUTO
56159+ depends on GRKERNSEC_CHROOT
56160+ help
56161+ If you say Y here, processes inside a chroot will not be able to raise
56162+ the priority of processes in the chroot, or alter the priority of
56163+ processes outside the chroot. This provides more security than simply
56164+ removing CAP_SYS_NICE from the process' capability set. If the
56165+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56166+ is created.
56167+
56168+config GRKERNSEC_CHROOT_SYSCTL
56169+ bool "Deny sysctl writes"
56170+ default y if GRKERNSEC_CONFIG_AUTO
56171+ depends on GRKERNSEC_CHROOT
56172+ help
56173+ If you say Y here, an attacker in a chroot will not be able to
56174+ write to sysctl entries, either by sysctl(2) or through a /proc
56175+ interface. It is strongly recommended that you say Y here. If the
56176+ sysctl option is enabled, a sysctl option with name
56177+ "chroot_deny_sysctl" is created.
56178+
56179+config GRKERNSEC_CHROOT_CAPS
56180+ bool "Capability restrictions"
56181+ default y if GRKERNSEC_CONFIG_AUTO
56182+ depends on GRKERNSEC_CHROOT
56183+ help
56184+ If you say Y here, the capabilities on all processes within a
56185+ chroot jail will be lowered to stop module insertion, raw i/o,
56186+ system and net admin tasks, rebooting the system, modifying immutable
56187+ files, modifying IPC owned by another, and changing the system time.
56188+ This is left an option because it can break some apps. Disable this
56189+ if your chrooted apps are having problems performing those kinds of
56190+ tasks. If the sysctl option is enabled, a sysctl option with
56191+ name "chroot_caps" is created.
56192+
56193+endmenu
56194+menu "Kernel Auditing"
56195+depends on GRKERNSEC
56196+
56197+config GRKERNSEC_AUDIT_GROUP
56198+ bool "Single group for auditing"
56199+ help
56200+ If you say Y here, the exec and chdir logging features will only operate
56201+ on a group you specify. This option is recommended if you only want to
56202+ watch certain users instead of having a large amount of logs from the
56203+ entire system. If the sysctl option is enabled, a sysctl option with
56204+ name "audit_group" is created.
56205+
56206+config GRKERNSEC_AUDIT_GID
56207+ int "GID for auditing"
56208+ depends on GRKERNSEC_AUDIT_GROUP
56209+ default 1007
56210+
56211+config GRKERNSEC_EXECLOG
56212+ bool "Exec logging"
56213+ help
56214+ If you say Y here, all execve() calls will be logged (since the
56215+ other exec*() calls are frontends to execve(), all execution
56216+ will be logged). Useful for shell-servers that like to keep track
56217+ of their users. If the sysctl option is enabled, a sysctl option with
56218+ name "exec_logging" is created.
56219+ WARNING: This option when enabled will produce a LOT of logs, especially
56220+ on an active system.
56221+
56222+config GRKERNSEC_RESLOG
56223+ bool "Resource logging"
56224+ default y if GRKERNSEC_CONFIG_AUTO
56225+ help
56226+ If you say Y here, all attempts to overstep resource limits will
56227+ be logged with the resource name, the requested size, and the current
56228+ limit. It is highly recommended that you say Y here. If the sysctl
56229+ option is enabled, a sysctl option with name "resource_logging" is
56230+ created. If the RBAC system is enabled, the sysctl value is ignored.
56231+
56232+config GRKERNSEC_CHROOT_EXECLOG
56233+ bool "Log execs within chroot"
56234+ help
56235+ If you say Y here, all executions inside a chroot jail will be logged
56236+ to syslog. This can cause a large amount of logs if certain
56237+ applications (eg. djb's daemontools) are installed on the system, and
56238+ is therefore left as an option. If the sysctl option is enabled, a
56239+ sysctl option with name "chroot_execlog" is created.
56240+
56241+config GRKERNSEC_AUDIT_PTRACE
56242+ bool "Ptrace logging"
56243+ help
56244+ If you say Y here, all attempts to attach to a process via ptrace
56245+ will be logged. If the sysctl option is enabled, a sysctl option
56246+ with name "audit_ptrace" is created.
56247+
56248+config GRKERNSEC_AUDIT_CHDIR
56249+ bool "Chdir logging"
56250+ help
56251+ If you say Y here, all chdir() calls will be logged. If the sysctl
56252+ option is enabled, a sysctl option with name "audit_chdir" is created.
56253+
56254+config GRKERNSEC_AUDIT_MOUNT
56255+ bool "(Un)Mount logging"
56256+ help
56257+ If you say Y here, all mounts and unmounts will be logged. If the
56258+ sysctl option is enabled, a sysctl option with name "audit_mount" is
56259+ created.
56260+
56261+config GRKERNSEC_SIGNAL
56262+ bool "Signal logging"
56263+ default y if GRKERNSEC_CONFIG_AUTO
56264+ help
56265+ If you say Y here, certain important signals will be logged, such as
56266+ SIGSEGV, which will as a result inform you of when a error in a program
56267+ occurred, which in some cases could mean a possible exploit attempt.
56268+ If the sysctl option is enabled, a sysctl option with name
56269+ "signal_logging" is created.
56270+
56271+config GRKERNSEC_FORKFAIL
56272+ bool "Fork failure logging"
56273+ help
56274+ If you say Y here, all failed fork() attempts will be logged.
56275+ This could suggest a fork bomb, or someone attempting to overstep
56276+ their process limit. If the sysctl option is enabled, a sysctl option
56277+ with name "forkfail_logging" is created.
56278+
56279+config GRKERNSEC_TIME
56280+ bool "Time change logging"
56281+ default y if GRKERNSEC_CONFIG_AUTO
56282+ help
56283+ If you say Y here, any changes of the system clock will be logged.
56284+ If the sysctl option is enabled, a sysctl option with name
56285+ "timechange_logging" is created.
56286+
56287+config GRKERNSEC_PROC_IPADDR
56288+ bool "/proc/<pid>/ipaddr support"
56289+ default y if GRKERNSEC_CONFIG_AUTO
56290+ help
56291+ If you say Y here, a new entry will be added to each /proc/<pid>
56292+ directory that contains the IP address of the person using the task.
56293+ The IP is carried across local TCP and AF_UNIX stream sockets.
56294+ This information can be useful for IDS/IPSes to perform remote response
56295+ to a local attack. The entry is readable by only the owner of the
56296+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56297+ the RBAC system), and thus does not create privacy concerns.
56298+
56299+config GRKERNSEC_RWXMAP_LOG
56300+ bool 'Denied RWX mmap/mprotect logging'
56301+ default y if GRKERNSEC_CONFIG_AUTO
56302+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56303+ help
56304+ If you say Y here, calls to mmap() and mprotect() with explicit
56305+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56306+ denied by the PAX_MPROTECT feature. If the sysctl option is
56307+ enabled, a sysctl option with name "rwxmap_logging" is created.
56308+
56309+config GRKERNSEC_AUDIT_TEXTREL
56310+ bool 'ELF text relocations logging (READ HELP)'
56311+ depends on PAX_MPROTECT
56312+ help
56313+ If you say Y here, text relocations will be logged with the filename
56314+ of the offending library or binary. The purpose of the feature is
56315+ to help Linux distribution developers get rid of libraries and
56316+ binaries that need text relocations which hinder the future progress
56317+ of PaX. Only Linux distribution developers should say Y here, and
56318+ never on a production machine, as this option creates an information
56319+ leak that could aid an attacker in defeating the randomization of
56320+ a single memory region. If the sysctl option is enabled, a sysctl
56321+ option with name "audit_textrel" is created.
56322+
56323+endmenu
56324+
56325+menu "Executable Protections"
56326+depends on GRKERNSEC
56327+
56328+config GRKERNSEC_DMESG
56329+ bool "Dmesg(8) restriction"
56330+ default y if GRKERNSEC_CONFIG_AUTO
56331+ help
56332+ If you say Y here, non-root users will not be able to use dmesg(8)
56333+ to view the contents of the kernel's circular log buffer.
56334+ The kernel's log buffer often contains kernel addresses and other
56335+ identifying information useful to an attacker in fingerprinting a
56336+ system for a targeted exploit.
56337+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56338+ created.
56339+
56340+config GRKERNSEC_HARDEN_PTRACE
56341+ bool "Deter ptrace-based process snooping"
56342+ default y if GRKERNSEC_CONFIG_AUTO
56343+ help
56344+ If you say Y here, TTY sniffers and other malicious monitoring
56345+ programs implemented through ptrace will be defeated. If you
56346+ have been using the RBAC system, this option has already been
56347+ enabled for several years for all users, with the ability to make
56348+ fine-grained exceptions.
56349+
56350+ This option only affects the ability of non-root users to ptrace
56351+ processes that are not a descendent of the ptracing process.
56352+ This means that strace ./binary and gdb ./binary will still work,
56353+ but attaching to arbitrary processes will not. If the sysctl
56354+ option is enabled, a sysctl option with name "harden_ptrace" is
56355+ created.
56356+
56357+config GRKERNSEC_PTRACE_READEXEC
56358+ bool "Require read access to ptrace sensitive binaries"
56359+ default y if GRKERNSEC_CONFIG_AUTO
56360+ help
56361+ If you say Y here, unprivileged users will not be able to ptrace unreadable
56362+ binaries. This option is useful in environments that
56363+ remove the read bits (e.g. file mode 4711) from suid binaries to
56364+ prevent infoleaking of their contents. This option adds
56365+ consistency to the use of that file mode, as the binary could normally
56366+ be read out when run without privileges while ptracing.
56367+
56368+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
56369+ is created.
56370+
56371+config GRKERNSEC_SETXID
56372+ bool "Enforce consistent multithreaded privileges"
56373+ default y if GRKERNSEC_CONFIG_AUTO
56374+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
56375+ help
56376+ If you say Y here, a change from a root uid to a non-root uid
56377+ in a multithreaded application will cause the resulting uids,
56378+ gids, supplementary groups, and capabilities in that thread
56379+ to be propagated to the other threads of the process. In most
56380+ cases this is unnecessary, as glibc will emulate this behavior
56381+ on behalf of the application. Other libcs do not act in the
56382+ same way, allowing the other threads of the process to continue
56383+ running with root privileges. If the sysctl option is enabled,
56384+ a sysctl option with name "consistent_setxid" is created.
56385+
56386+config GRKERNSEC_TPE
56387+ bool "Trusted Path Execution (TPE)"
56388+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
56389+ help
56390+ If you say Y here, you will be able to choose a gid to add to the
56391+ supplementary groups of users you want to mark as "untrusted."
56392+ These users will not be able to execute any files that are not in
56393+ root-owned directories writable only by root. If the sysctl option
56394+ is enabled, a sysctl option with name "tpe" is created.
56395+
56396+config GRKERNSEC_TPE_ALL
56397+ bool "Partially restrict all non-root users"
56398+ depends on GRKERNSEC_TPE
56399+ help
56400+ If you say Y here, all non-root users will be covered under
56401+ a weaker TPE restriction. This is separate from, and in addition to,
56402+ the main TPE options that you have selected elsewhere. Thus, if a
56403+ "trusted" GID is chosen, this restriction applies to even that GID.
56404+ Under this restriction, all non-root users will only be allowed to
56405+ execute files in directories they own that are not group or
56406+ world-writable, or in directories owned by root and writable only by
56407+ root. If the sysctl option is enabled, a sysctl option with name
56408+ "tpe_restrict_all" is created.
56409+
56410+config GRKERNSEC_TPE_INVERT
56411+ bool "Invert GID option"
56412+ depends on GRKERNSEC_TPE
56413+ help
56414+ If you say Y here, the group you specify in the TPE configuration will
56415+ decide what group TPE restrictions will be *disabled* for. This
56416+ option is useful if you want TPE restrictions to be applied to most
56417+ users on the system. If the sysctl option is enabled, a sysctl option
56418+ with name "tpe_invert" is created. Unlike other sysctl options, this
56419+ entry will default to on for backward-compatibility.
56420+
56421+config GRKERNSEC_TPE_GID
56422+ int
56423+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
56424+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
56425+
56426+config GRKERNSEC_TPE_UNTRUSTED_GID
56427+ int "GID for TPE-untrusted users"
56428+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56429+ default 1005
56430+ help
56431+ Setting this GID determines what group TPE restrictions will be
56432+ *enabled* for. If the sysctl option is enabled, a sysctl option
56433+ with name "tpe_gid" is created.
56434+
56435+config GRKERNSEC_TPE_TRUSTED_GID
56436+ int "GID for TPE-trusted users"
56437+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56438+ default 1005
56439+ help
56440+ Setting this GID determines what group TPE restrictions will be
56441+ *disabled* for. If the sysctl option is enabled, a sysctl option
56442+ with name "tpe_gid" is created.
56443+
56444+endmenu
56445+menu "Network Protections"
56446+depends on GRKERNSEC
56447+
56448+config GRKERNSEC_RANDNET
56449+ bool "Larger entropy pools"
56450+ default y if GRKERNSEC_CONFIG_AUTO
56451+ help
56452+ If you say Y here, the entropy pools used for many features of Linux
56453+ and grsecurity will be doubled in size. Since several grsecurity
56454+ features use additional randomness, it is recommended that you say Y
56455+ here. Saying Y here has a similar effect as modifying
56456+ /proc/sys/kernel/random/poolsize.
56457+
56458+config GRKERNSEC_BLACKHOLE
56459+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56460+ default y if GRKERNSEC_CONFIG_AUTO
56461+ depends on NET
56462+ help
56463+ If you say Y here, neither TCP resets nor ICMP
56464+ destination-unreachable packets will be sent in response to packets
56465+ sent to ports for which no associated listening process exists.
56466+ This feature supports both IPV4 and IPV6 and exempts the
56467+ loopback interface from blackholing. Enabling this feature
56468+ makes a host more resilient to DoS attacks and reduces network
56469+ visibility against scanners.
56470+
56471+ The blackhole feature as-implemented is equivalent to the FreeBSD
56472+ blackhole feature, as it prevents RST responses to all packets, not
56473+ just SYNs. Under most application behavior this causes no
56474+ problems, but applications (like haproxy) may not close certain
56475+ connections in a way that cleanly terminates them on the remote
56476+ end, leaving the remote host in LAST_ACK state. Because of this
56477+ side-effect and to prevent intentional LAST_ACK DoSes, this
56478+ feature also adds automatic mitigation against such attacks.
56479+ The mitigation drastically reduces the amount of time a socket
56480+ can spend in LAST_ACK state. If you're using haproxy and not
56481+ all servers it connects to have this option enabled, consider
56482+ disabling this feature on the haproxy host.
56483+
56484+ If the sysctl option is enabled, two sysctl options with names
56485+ "ip_blackhole" and "lastack_retries" will be created.
56486+ While "ip_blackhole" takes the standard zero/non-zero on/off
56487+ toggle, "lastack_retries" uses the same kinds of values as
56488+ "tcp_retries1" and "tcp_retries2". The default value of 4
56489+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56490+ state.
56491+
56492+config GRKERNSEC_NO_SIMULT_CONNECT
56493+ bool "Disable TCP Simultaneous Connect"
56494+ default y if GRKERNSEC_CONFIG_AUTO
56495+ depends on NET
56496+ help
56497+ If you say Y here, a feature by Willy Tarreau will be enabled that
56498+ removes a weakness in Linux's strict implementation of TCP that
56499+ allows two clients to connect to each other without either entering
56500+ a listening state. The weakness allows an attacker to easily prevent
56501+ a client from connecting to a known server provided the source port
56502+ for the connection is guessed correctly.
56503+
56504+ As the weakness could be used to prevent an antivirus or IPS from
56505+ fetching updates, or prevent an SSL gateway from fetching a CRL,
56506+ it should be eliminated by enabling this option. Though Linux is
56507+ one of few operating systems supporting simultaneous connect, it
56508+ has no legitimate use in practice and is rarely supported by firewalls.
56509+
56510+config GRKERNSEC_SOCKET
56511+ bool "Socket restrictions"
56512+ depends on NET
56513+ help
56514+ If you say Y here, you will be able to choose from several options.
56515+ If you assign a GID on your system and add it to the supplementary
56516+ groups of users you want to restrict socket access to, this patch
56517+ will perform up to three things, based on the option(s) you choose.
56518+
56519+config GRKERNSEC_SOCKET_ALL
56520+ bool "Deny any sockets to group"
56521+ depends on GRKERNSEC_SOCKET
56522+ help
56523+ If you say Y here, you will be able to choose a GID of whose users will
56524+ be unable to connect to other hosts from your machine or run server
56525+ applications from your machine. If the sysctl option is enabled, a
56526+ sysctl option with name "socket_all" is created.
56527+
56528+config GRKERNSEC_SOCKET_ALL_GID
56529+ int "GID to deny all sockets for"
56530+ depends on GRKERNSEC_SOCKET_ALL
56531+ default 1004
56532+ help
56533+ Here you can choose the GID to disable socket access for. Remember to
56534+ add the users you want socket access disabled for to the GID
56535+ specified here. If the sysctl option is enabled, a sysctl option
56536+ with name "socket_all_gid" is created.
56537+
56538+config GRKERNSEC_SOCKET_CLIENT
56539+ bool "Deny client sockets to group"
56540+ depends on GRKERNSEC_SOCKET
56541+ help
56542+ If you say Y here, you will be able to choose a GID of whose users will
56543+ be unable to connect to other hosts from your machine, but will be
56544+ able to run servers. If this option is enabled, all users in the group
56545+ you specify will have to use passive mode when initiating ftp transfers
56546+ from the shell on your machine. If the sysctl option is enabled, a
56547+ sysctl option with name "socket_client" is created.
56548+
56549+config GRKERNSEC_SOCKET_CLIENT_GID
56550+ int "GID to deny client sockets for"
56551+ depends on GRKERNSEC_SOCKET_CLIENT
56552+ default 1003
56553+ help
56554+ Here you can choose the GID to disable client socket access for.
56555+ Remember to add the users you want client socket access disabled for to
56556+ the GID specified here. If the sysctl option is enabled, a sysctl
56557+ option with name "socket_client_gid" is created.
56558+
56559+config GRKERNSEC_SOCKET_SERVER
56560+ bool "Deny server sockets to group"
56561+ depends on GRKERNSEC_SOCKET
56562+ help
56563+ If you say Y here, you will be able to choose a GID of whose users will
56564+ be unable to run server applications from your machine. If the sysctl
56565+ option is enabled, a sysctl option with name "socket_server" is created.
56566+
56567+config GRKERNSEC_SOCKET_SERVER_GID
56568+ int "GID to deny server sockets for"
56569+ depends on GRKERNSEC_SOCKET_SERVER
56570+ default 1002
56571+ help
56572+ Here you can choose the GID to disable server socket access for.
56573+ Remember to add the users you want server socket access disabled for to
56574+ the GID specified here. If the sysctl option is enabled, a sysctl
56575+ option with name "socket_server_gid" is created.
56576+
56577+endmenu
56578+menu "Sysctl Support"
56579+depends on GRKERNSEC && SYSCTL
56580+
56581+config GRKERNSEC_SYSCTL
56582+ bool "Sysctl support"
56583+ default y if GRKERNSEC_CONFIG_AUTO
56584+ help
56585+ If you say Y here, you will be able to change the options that
56586+ grsecurity runs with at bootup, without having to recompile your
56587+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56588+ to enable (1) or disable (0) various features. All the sysctl entries
56589+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56590+ All features enabled in the kernel configuration are disabled at boot
56591+ if you do not say Y to the "Turn on features by default" option.
56592+ All options should be set at startup, and the grsec_lock entry should
56593+ be set to a non-zero value after all the options are set.
56594+ *THIS IS EXTREMELY IMPORTANT*
56595+
56596+config GRKERNSEC_SYSCTL_DISTRO
56597+ bool "Extra sysctl support for distro makers (READ HELP)"
56598+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56599+ help
56600+ If you say Y here, additional sysctl options will be created
56601+ for features that affect processes running as root. Therefore,
56602+ it is critical when using this option that the grsec_lock entry be
56603+ enabled after boot. Only distros with prebuilt kernel packages
56604+ with this option enabled that can ensure grsec_lock is enabled
56605+ after boot should use this option.
56606+ *Failure to set grsec_lock after boot makes all grsec features
56607+ this option covers useless*
56608+
56609+ Currently this option creates the following sysctl entries:
56610+ "Disable Privileged I/O": "disable_priv_io"
56611+
56612+config GRKERNSEC_SYSCTL_ON
56613+ bool "Turn on features by default"
56614+ default y if GRKERNSEC_CONFIG_AUTO
56615+ depends on GRKERNSEC_SYSCTL
56616+ help
56617+ If you say Y here, instead of having all features enabled in the
56618+ kernel configuration disabled at boot time, the features will be
56619+ enabled at boot time. It is recommended you say Y here unless
56620+ there is some reason you would want all sysctl-tunable features to
56621+ be disabled by default. As mentioned elsewhere, it is important
56622+ to enable the grsec_lock entry once you have finished modifying
56623+ the sysctl entries.
56624+
56625+endmenu
56626+menu "Logging Options"
56627+depends on GRKERNSEC
56628+
56629+config GRKERNSEC_FLOODTIME
56630+ int "Seconds in between log messages (minimum)"
56631+ default 10
56632+ help
56633+ This option allows you to enforce the number of seconds between
56634+ grsecurity log messages. The default should be suitable for most
56635+ people, however, if you choose to change it, choose a value small enough
56636+ to allow informative logs to be produced, but large enough to
56637+ prevent flooding.
56638+
56639+config GRKERNSEC_FLOODBURST
56640+ int "Number of messages in a burst (maximum)"
56641+ default 6
56642+ help
56643+ This option allows you to choose the maximum number of messages allowed
56644+ within the flood time interval you chose in a separate option. The
56645+ default should be suitable for most people, however if you find that
56646+ many of your logs are being interpreted as flooding, you may want to
56647+ raise this value.
56648+
56649+endmenu
56650diff --git a/grsecurity/Makefile b/grsecurity/Makefile
56651new file mode 100644
56652index 0000000..1b9afa9
56653--- /dev/null
56654+++ b/grsecurity/Makefile
56655@@ -0,0 +1,38 @@
56656+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56657+# during 2001-2009 it has been completely redesigned by Brad Spengler
56658+# into an RBAC system
56659+#
56660+# All code in this directory and various hooks inserted throughout the kernel
56661+# are copyright Brad Spengler - Open Source Security, Inc., and released
56662+# under the GPL v2 or higher
56663+
56664+KBUILD_CFLAGS += -Werror
56665+
56666+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56667+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56668+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56669+
56670+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56671+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56672+ gracl_learn.o grsec_log.o
56673+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56674+
56675+ifdef CONFIG_NET
56676+obj-y += grsec_sock.o
56677+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56678+endif
56679+
56680+ifndef CONFIG_GRKERNSEC
56681+obj-y += grsec_disabled.o
56682+endif
56683+
56684+ifdef CONFIG_GRKERNSEC_HIDESYM
56685+extra-y := grsec_hidesym.o
56686+$(obj)/grsec_hidesym.o:
56687+ @-chmod -f 500 /boot
56688+ @-chmod -f 500 /lib/modules
56689+ @-chmod -f 500 /lib64/modules
56690+ @-chmod -f 500 /lib32/modules
56691+ @-chmod -f 700 .
56692+ @echo ' grsec: protected kernel image paths'
56693+endif
56694diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
56695new file mode 100644
56696index 0000000..0767b2e
56697--- /dev/null
56698+++ b/grsecurity/gracl.c
56699@@ -0,0 +1,4067 @@
56700+#include <linux/kernel.h>
56701+#include <linux/module.h>
56702+#include <linux/sched.h>
56703+#include <linux/mm.h>
56704+#include <linux/file.h>
56705+#include <linux/fs.h>
56706+#include <linux/namei.h>
56707+#include <linux/mount.h>
56708+#include <linux/tty.h>
56709+#include <linux/proc_fs.h>
56710+#include <linux/lglock.h>
56711+#include <linux/slab.h>
56712+#include <linux/vmalloc.h>
56713+#include <linux/types.h>
56714+#include <linux/sysctl.h>
56715+#include <linux/netdevice.h>
56716+#include <linux/ptrace.h>
56717+#include <linux/gracl.h>
56718+#include <linux/gralloc.h>
56719+#include <linux/security.h>
56720+#include <linux/grinternal.h>
56721+#include <linux/pid_namespace.h>
56722+#include <linux/stop_machine.h>
56723+#include <linux/fdtable.h>
56724+#include <linux/percpu.h>
56725+#include <linux/lglock.h>
56726+#include <linux/hugetlb.h>
56727+#include "../fs/mount.h"
56728+
56729+#include <asm/uaccess.h>
56730+#include <asm/errno.h>
56731+#include <asm/mman.h>
56732+
56733+extern struct lglock vfsmount_lock;
56734+
56735+static struct acl_role_db acl_role_set;
56736+static struct name_db name_set;
56737+static struct inodev_db inodev_set;
56738+
56739+/* for keeping track of userspace pointers used for subjects, so we
56740+ can share references in the kernel as well
56741+*/
56742+
56743+static struct path real_root;
56744+
56745+static struct acl_subj_map_db subj_map_set;
56746+
56747+static struct acl_role_label *default_role;
56748+
56749+static struct acl_role_label *role_list;
56750+
56751+static u16 acl_sp_role_value;
56752+
56753+extern char *gr_shared_page[4];
56754+static DEFINE_MUTEX(gr_dev_mutex);
56755+DEFINE_RWLOCK(gr_inode_lock);
56756+
56757+struct gr_arg *gr_usermode;
56758+
56759+static unsigned int gr_status __read_only = GR_STATUS_INIT;
56760+
56761+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
56762+extern void gr_clear_learn_entries(void);
56763+
56764+unsigned char *gr_system_salt;
56765+unsigned char *gr_system_sum;
56766+
56767+static struct sprole_pw **acl_special_roles = NULL;
56768+static __u16 num_sprole_pws = 0;
56769+
56770+static struct acl_role_label *kernel_role = NULL;
56771+
56772+static unsigned int gr_auth_attempts = 0;
56773+static unsigned long gr_auth_expires = 0UL;
56774+
56775+#ifdef CONFIG_NET
56776+extern struct vfsmount *sock_mnt;
56777+#endif
56778+
56779+extern struct vfsmount *pipe_mnt;
56780+extern struct vfsmount *shm_mnt;
56781+
56782+#ifdef CONFIG_HUGETLBFS
56783+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
56784+#endif
56785+
56786+static struct acl_object_label *fakefs_obj_rw;
56787+static struct acl_object_label *fakefs_obj_rwx;
56788+
56789+extern int gr_init_uidset(void);
56790+extern void gr_free_uidset(void);
56791+extern void gr_remove_uid(uid_t uid);
56792+extern int gr_find_uid(uid_t uid);
56793+
56794+__inline__ int
56795+gr_acl_is_enabled(void)
56796+{
56797+ return (gr_status & GR_READY);
56798+}
56799+
56800+#ifdef CONFIG_BTRFS_FS
56801+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56802+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56803+#endif
56804+
56805+static inline dev_t __get_dev(const struct dentry *dentry)
56806+{
56807+#ifdef CONFIG_BTRFS_FS
56808+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56809+ return get_btrfs_dev_from_inode(dentry->d_inode);
56810+ else
56811+#endif
56812+ return dentry->d_inode->i_sb->s_dev;
56813+}
56814+
56815+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56816+{
56817+ return __get_dev(dentry);
56818+}
56819+
56820+static char gr_task_roletype_to_char(struct task_struct *task)
56821+{
56822+ switch (task->role->roletype &
56823+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
56824+ GR_ROLE_SPECIAL)) {
56825+ case GR_ROLE_DEFAULT:
56826+ return 'D';
56827+ case GR_ROLE_USER:
56828+ return 'U';
56829+ case GR_ROLE_GROUP:
56830+ return 'G';
56831+ case GR_ROLE_SPECIAL:
56832+ return 'S';
56833+ }
56834+
56835+ return 'X';
56836+}
56837+
56838+char gr_roletype_to_char(void)
56839+{
56840+ return gr_task_roletype_to_char(current);
56841+}
56842+
56843+__inline__ int
56844+gr_acl_tpe_check(void)
56845+{
56846+ if (unlikely(!(gr_status & GR_READY)))
56847+ return 0;
56848+ if (current->role->roletype & GR_ROLE_TPE)
56849+ return 1;
56850+ else
56851+ return 0;
56852+}
56853+
56854+int
56855+gr_handle_rawio(const struct inode *inode)
56856+{
56857+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56858+ if (inode && S_ISBLK(inode->i_mode) &&
56859+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56860+ !capable(CAP_SYS_RAWIO))
56861+ return 1;
56862+#endif
56863+ return 0;
56864+}
56865+
56866+static int
56867+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56868+{
56869+ if (likely(lena != lenb))
56870+ return 0;
56871+
56872+ return !memcmp(a, b, lena);
56873+}
56874+
56875+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56876+{
56877+ *buflen -= namelen;
56878+ if (*buflen < 0)
56879+ return -ENAMETOOLONG;
56880+ *buffer -= namelen;
56881+ memcpy(*buffer, str, namelen);
56882+ return 0;
56883+}
56884+
56885+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
56886+{
56887+ return prepend(buffer, buflen, name->name, name->len);
56888+}
56889+
56890+static int prepend_path(const struct path *path, struct path *root,
56891+ char **buffer, int *buflen)
56892+{
56893+ struct dentry *dentry = path->dentry;
56894+ struct vfsmount *vfsmnt = path->mnt;
56895+ struct mount *mnt = real_mount(vfsmnt);
56896+ bool slash = false;
56897+ int error = 0;
56898+
56899+ while (dentry != root->dentry || vfsmnt != root->mnt) {
56900+ struct dentry * parent;
56901+
56902+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56903+ /* Global root? */
56904+ if (!mnt_has_parent(mnt)) {
56905+ goto out;
56906+ }
56907+ dentry = mnt->mnt_mountpoint;
56908+ mnt = mnt->mnt_parent;
56909+ vfsmnt = &mnt->mnt;
56910+ continue;
56911+ }
56912+ parent = dentry->d_parent;
56913+ prefetch(parent);
56914+ spin_lock(&dentry->d_lock);
56915+ error = prepend_name(buffer, buflen, &dentry->d_name);
56916+ spin_unlock(&dentry->d_lock);
56917+ if (!error)
56918+ error = prepend(buffer, buflen, "/", 1);
56919+ if (error)
56920+ break;
56921+
56922+ slash = true;
56923+ dentry = parent;
56924+ }
56925+
56926+out:
56927+ if (!error && !slash)
56928+ error = prepend(buffer, buflen, "/", 1);
56929+
56930+ return error;
56931+}
56932+
56933+/* this must be called with vfsmount_lock and rename_lock held */
56934+
56935+static char *__our_d_path(const struct path *path, struct path *root,
56936+ char *buf, int buflen)
56937+{
56938+ char *res = buf + buflen;
56939+ int error;
56940+
56941+ prepend(&res, &buflen, "\0", 1);
56942+ error = prepend_path(path, root, &res, &buflen);
56943+ if (error)
56944+ return ERR_PTR(error);
56945+
56946+ return res;
56947+}
56948+
56949+static char *
56950+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
56951+{
56952+ char *retval;
56953+
56954+ retval = __our_d_path(path, root, buf, buflen);
56955+ if (unlikely(IS_ERR(retval)))
56956+ retval = strcpy(buf, "<path too long>");
56957+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56958+ retval[1] = '\0';
56959+
56960+ return retval;
56961+}
56962+
56963+static char *
56964+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56965+ char *buf, int buflen)
56966+{
56967+ struct path path;
56968+ char *res;
56969+
56970+ path.dentry = (struct dentry *)dentry;
56971+ path.mnt = (struct vfsmount *)vfsmnt;
56972+
56973+ /* we can use real_root.dentry, real_root.mnt, because this is only called
56974+ by the RBAC system */
56975+ res = gen_full_path(&path, &real_root, buf, buflen);
56976+
56977+ return res;
56978+}
56979+
56980+static char *
56981+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56982+ char *buf, int buflen)
56983+{
56984+ char *res;
56985+ struct path path;
56986+ struct path root;
56987+ struct task_struct *reaper = init_pid_ns.child_reaper;
56988+
56989+ path.dentry = (struct dentry *)dentry;
56990+ path.mnt = (struct vfsmount *)vfsmnt;
56991+
56992+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
56993+ get_fs_root(reaper->fs, &root);
56994+
56995+ write_seqlock(&rename_lock);
56996+ br_read_lock(&vfsmount_lock);
56997+ res = gen_full_path(&path, &root, buf, buflen);
56998+ br_read_unlock(&vfsmount_lock);
56999+ write_sequnlock(&rename_lock);
57000+
57001+ path_put(&root);
57002+ return res;
57003+}
57004+
57005+static char *
57006+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
57007+{
57008+ char *ret;
57009+ write_seqlock(&rename_lock);
57010+ br_read_lock(&vfsmount_lock);
57011+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
57012+ PAGE_SIZE);
57013+ br_read_unlock(&vfsmount_lock);
57014+ write_sequnlock(&rename_lock);
57015+ return ret;
57016+}
57017+
57018+static char *
57019+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
57020+{
57021+ char *ret;
57022+ char *buf;
57023+ int buflen;
57024+
57025+ write_seqlock(&rename_lock);
57026+ br_read_lock(&vfsmount_lock);
57027+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
57028+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
57029+ buflen = (int)(ret - buf);
57030+ if (buflen >= 5)
57031+ prepend(&ret, &buflen, "/proc", 5);
57032+ else
57033+ ret = strcpy(buf, "<path too long>");
57034+ br_read_unlock(&vfsmount_lock);
57035+ write_sequnlock(&rename_lock);
57036+ return ret;
57037+}
57038+
57039+char *
57040+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
57041+{
57042+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
57043+ PAGE_SIZE);
57044+}
57045+
57046+char *
57047+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
57048+{
57049+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
57050+ PAGE_SIZE);
57051+}
57052+
57053+char *
57054+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
57055+{
57056+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
57057+ PAGE_SIZE);
57058+}
57059+
57060+char *
57061+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
57062+{
57063+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
57064+ PAGE_SIZE);
57065+}
57066+
57067+char *
57068+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
57069+{
57070+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
57071+ PAGE_SIZE);
57072+}
57073+
57074+__inline__ __u32
57075+to_gr_audit(const __u32 reqmode)
57076+{
57077+ /* masks off auditable permission flags, then shifts them to create
57078+ auditing flags, and adds the special case of append auditing if
57079+ we're requesting write */
57080+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
57081+}
57082+
57083+struct acl_subject_label *
57084+lookup_subject_map(const struct acl_subject_label *userp)
57085+{
57086+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
57087+ struct subject_map *match;
57088+
57089+ match = subj_map_set.s_hash[index];
57090+
57091+ while (match && match->user != userp)
57092+ match = match->next;
57093+
57094+ if (match != NULL)
57095+ return match->kernel;
57096+ else
57097+ return NULL;
57098+}
57099+
57100+static void
57101+insert_subj_map_entry(struct subject_map *subjmap)
57102+{
57103+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
57104+ struct subject_map **curr;
57105+
57106+ subjmap->prev = NULL;
57107+
57108+ curr = &subj_map_set.s_hash[index];
57109+ if (*curr != NULL)
57110+ (*curr)->prev = subjmap;
57111+
57112+ subjmap->next = *curr;
57113+ *curr = subjmap;
57114+
57115+ return;
57116+}
57117+
57118+static struct acl_role_label *
57119+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
57120+ const gid_t gid)
57121+{
57122+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
57123+ struct acl_role_label *match;
57124+ struct role_allowed_ip *ipp;
57125+ unsigned int x;
57126+ u32 curr_ip = task->signal->curr_ip;
57127+
57128+ task->signal->saved_ip = curr_ip;
57129+
57130+ match = acl_role_set.r_hash[index];
57131+
57132+ while (match) {
57133+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
57134+ for (x = 0; x < match->domain_child_num; x++) {
57135+ if (match->domain_children[x] == uid)
57136+ goto found;
57137+ }
57138+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
57139+ break;
57140+ match = match->next;
57141+ }
57142+found:
57143+ if (match == NULL) {
57144+ try_group:
57145+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
57146+ match = acl_role_set.r_hash[index];
57147+
57148+ while (match) {
57149+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
57150+ for (x = 0; x < match->domain_child_num; x++) {
57151+ if (match->domain_children[x] == gid)
57152+ goto found2;
57153+ }
57154+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
57155+ break;
57156+ match = match->next;
57157+ }
57158+found2:
57159+ if (match == NULL)
57160+ match = default_role;
57161+ if (match->allowed_ips == NULL)
57162+ return match;
57163+ else {
57164+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57165+ if (likely
57166+ ((ntohl(curr_ip) & ipp->netmask) ==
57167+ (ntohl(ipp->addr) & ipp->netmask)))
57168+ return match;
57169+ }
57170+ match = default_role;
57171+ }
57172+ } else if (match->allowed_ips == NULL) {
57173+ return match;
57174+ } else {
57175+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57176+ if (likely
57177+ ((ntohl(curr_ip) & ipp->netmask) ==
57178+ (ntohl(ipp->addr) & ipp->netmask)))
57179+ return match;
57180+ }
57181+ goto try_group;
57182+ }
57183+
57184+ return match;
57185+}
57186+
57187+struct acl_subject_label *
57188+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
57189+ const struct acl_role_label *role)
57190+{
57191+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
57192+ struct acl_subject_label *match;
57193+
57194+ match = role->subj_hash[index];
57195+
57196+ while (match && (match->inode != ino || match->device != dev ||
57197+ (match->mode & GR_DELETED))) {
57198+ match = match->next;
57199+ }
57200+
57201+ if (match && !(match->mode & GR_DELETED))
57202+ return match;
57203+ else
57204+ return NULL;
57205+}
57206+
57207+struct acl_subject_label *
57208+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
57209+ const struct acl_role_label *role)
57210+{
57211+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
57212+ struct acl_subject_label *match;
57213+
57214+ match = role->subj_hash[index];
57215+
57216+ while (match && (match->inode != ino || match->device != dev ||
57217+ !(match->mode & GR_DELETED))) {
57218+ match = match->next;
57219+ }
57220+
57221+ if (match && (match->mode & GR_DELETED))
57222+ return match;
57223+ else
57224+ return NULL;
57225+}
57226+
57227+static struct acl_object_label *
57228+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
57229+ const struct acl_subject_label *subj)
57230+{
57231+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
57232+ struct acl_object_label *match;
57233+
57234+ match = subj->obj_hash[index];
57235+
57236+ while (match && (match->inode != ino || match->device != dev ||
57237+ (match->mode & GR_DELETED))) {
57238+ match = match->next;
57239+ }
57240+
57241+ if (match && !(match->mode & GR_DELETED))
57242+ return match;
57243+ else
57244+ return NULL;
57245+}
57246+
57247+static struct acl_object_label *
57248+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
57249+ const struct acl_subject_label *subj)
57250+{
57251+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
57252+ struct acl_object_label *match;
57253+
57254+ match = subj->obj_hash[index];
57255+
57256+ while (match && (match->inode != ino || match->device != dev ||
57257+ !(match->mode & GR_DELETED))) {
57258+ match = match->next;
57259+ }
57260+
57261+ if (match && (match->mode & GR_DELETED))
57262+ return match;
57263+
57264+ match = subj->obj_hash[index];
57265+
57266+ while (match && (match->inode != ino || match->device != dev ||
57267+ (match->mode & GR_DELETED))) {
57268+ match = match->next;
57269+ }
57270+
57271+ if (match && !(match->mode & GR_DELETED))
57272+ return match;
57273+ else
57274+ return NULL;
57275+}
57276+
57277+static struct name_entry *
57278+lookup_name_entry(const char *name)
57279+{
57280+ unsigned int len = strlen(name);
57281+ unsigned int key = full_name_hash(name, len);
57282+ unsigned int index = key % name_set.n_size;
57283+ struct name_entry *match;
57284+
57285+ match = name_set.n_hash[index];
57286+
57287+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
57288+ match = match->next;
57289+
57290+ return match;
57291+}
57292+
57293+static struct name_entry *
57294+lookup_name_entry_create(const char *name)
57295+{
57296+ unsigned int len = strlen(name);
57297+ unsigned int key = full_name_hash(name, len);
57298+ unsigned int index = key % name_set.n_size;
57299+ struct name_entry *match;
57300+
57301+ match = name_set.n_hash[index];
57302+
57303+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57304+ !match->deleted))
57305+ match = match->next;
57306+
57307+ if (match && match->deleted)
57308+ return match;
57309+
57310+ match = name_set.n_hash[index];
57311+
57312+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57313+ match->deleted))
57314+ match = match->next;
57315+
57316+ if (match && !match->deleted)
57317+ return match;
57318+ else
57319+ return NULL;
57320+}
57321+
57322+static struct inodev_entry *
57323+lookup_inodev_entry(const ino_t ino, const dev_t dev)
57324+{
57325+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
57326+ struct inodev_entry *match;
57327+
57328+ match = inodev_set.i_hash[index];
57329+
57330+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
57331+ match = match->next;
57332+
57333+ return match;
57334+}
57335+
57336+static void
57337+insert_inodev_entry(struct inodev_entry *entry)
57338+{
57339+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
57340+ inodev_set.i_size);
57341+ struct inodev_entry **curr;
57342+
57343+ entry->prev = NULL;
57344+
57345+ curr = &inodev_set.i_hash[index];
57346+ if (*curr != NULL)
57347+ (*curr)->prev = entry;
57348+
57349+ entry->next = *curr;
57350+ *curr = entry;
57351+
57352+ return;
57353+}
57354+
57355+static void
57356+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
57357+{
57358+ unsigned int index =
57359+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
57360+ struct acl_role_label **curr;
57361+ struct acl_role_label *tmp, *tmp2;
57362+
57363+ curr = &acl_role_set.r_hash[index];
57364+
57365+ /* simple case, slot is empty, just set it to our role */
57366+ if (*curr == NULL) {
57367+ *curr = role;
57368+ } else {
57369+ /* example:
57370+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
57371+ 2 -> 3
57372+ */
57373+ /* first check to see if we can already be reached via this slot */
57374+ tmp = *curr;
57375+ while (tmp && tmp != role)
57376+ tmp = tmp->next;
57377+ if (tmp == role) {
57378+ /* we don't need to add ourselves to this slot's chain */
57379+ return;
57380+ }
57381+ /* we need to add ourselves to this chain, two cases */
57382+ if (role->next == NULL) {
57383+ /* simple case, append the current chain to our role */
57384+ role->next = *curr;
57385+ *curr = role;
57386+ } else {
57387+ /* 1 -> 2 -> 3 -> 4
57388+ 2 -> 3 -> 4
57389+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
57390+ */
57391+ /* trickier case: walk our role's chain until we find
57392+ the role for the start of the current slot's chain */
57393+ tmp = role;
57394+ tmp2 = *curr;
57395+ while (tmp->next && tmp->next != tmp2)
57396+ tmp = tmp->next;
57397+ if (tmp->next == tmp2) {
57398+ /* from example above, we found 3, so just
57399+ replace this slot's chain with ours */
57400+ *curr = role;
57401+ } else {
57402+ /* we didn't find a subset of our role's chain
57403+ in the current slot's chain, so append their
57404+ chain to ours, and set us as the first role in
57405+ the slot's chain
57406+
57407+ we could fold this case with the case above,
57408+ but making it explicit for clarity
57409+ */
57410+ tmp->next = tmp2;
57411+ *curr = role;
57412+ }
57413+ }
57414+ }
57415+
57416+ return;
57417+}
57418+
57419+static void
57420+insert_acl_role_label(struct acl_role_label *role)
57421+{
57422+ int i;
57423+
57424+ if (role_list == NULL) {
57425+ role_list = role;
57426+ role->prev = NULL;
57427+ } else {
57428+ role->prev = role_list;
57429+ role_list = role;
57430+ }
57431+
57432+ /* used for hash chains */
57433+ role->next = NULL;
57434+
57435+ if (role->roletype & GR_ROLE_DOMAIN) {
57436+ for (i = 0; i < role->domain_child_num; i++)
57437+ __insert_acl_role_label(role, role->domain_children[i]);
57438+ } else
57439+ __insert_acl_role_label(role, role->uidgid);
57440+}
57441+
57442+static int
57443+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
57444+{
57445+ struct name_entry **curr, *nentry;
57446+ struct inodev_entry *ientry;
57447+ unsigned int len = strlen(name);
57448+ unsigned int key = full_name_hash(name, len);
57449+ unsigned int index = key % name_set.n_size;
57450+
57451+ curr = &name_set.n_hash[index];
57452+
57453+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
57454+ curr = &((*curr)->next);
57455+
57456+ if (*curr != NULL)
57457+ return 1;
57458+
57459+ nentry = acl_alloc(sizeof (struct name_entry));
57460+ if (nentry == NULL)
57461+ return 0;
57462+ ientry = acl_alloc(sizeof (struct inodev_entry));
57463+ if (ientry == NULL)
57464+ return 0;
57465+ ientry->nentry = nentry;
57466+
57467+ nentry->key = key;
57468+ nentry->name = name;
57469+ nentry->inode = inode;
57470+ nentry->device = device;
57471+ nentry->len = len;
57472+ nentry->deleted = deleted;
57473+
57474+ nentry->prev = NULL;
57475+ curr = &name_set.n_hash[index];
57476+ if (*curr != NULL)
57477+ (*curr)->prev = nentry;
57478+ nentry->next = *curr;
57479+ *curr = nentry;
57480+
57481+ /* insert us into the table searchable by inode/dev */
57482+ insert_inodev_entry(ientry);
57483+
57484+ return 1;
57485+}
57486+
57487+static void
57488+insert_acl_obj_label(struct acl_object_label *obj,
57489+ struct acl_subject_label *subj)
57490+{
57491+ unsigned int index =
57492+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
57493+ struct acl_object_label **curr;
57494+
57495+
57496+ obj->prev = NULL;
57497+
57498+ curr = &subj->obj_hash[index];
57499+ if (*curr != NULL)
57500+ (*curr)->prev = obj;
57501+
57502+ obj->next = *curr;
57503+ *curr = obj;
57504+
57505+ return;
57506+}
57507+
57508+static void
57509+insert_acl_subj_label(struct acl_subject_label *obj,
57510+ struct acl_role_label *role)
57511+{
57512+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
57513+ struct acl_subject_label **curr;
57514+
57515+ obj->prev = NULL;
57516+
57517+ curr = &role->subj_hash[index];
57518+ if (*curr != NULL)
57519+ (*curr)->prev = obj;
57520+
57521+ obj->next = *curr;
57522+ *curr = obj;
57523+
57524+ return;
57525+}
57526+
57527+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
57528+
57529+static void *
57530+create_table(__u32 * len, int elementsize)
57531+{
57532+ unsigned int table_sizes[] = {
57533+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
57534+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
57535+ 4194301, 8388593, 16777213, 33554393, 67108859
57536+ };
57537+ void *newtable = NULL;
57538+ unsigned int pwr = 0;
57539+
57540+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
57541+ table_sizes[pwr] <= *len)
57542+ pwr++;
57543+
57544+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
57545+ return newtable;
57546+
57547+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
57548+ newtable =
57549+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
57550+ else
57551+ newtable = vmalloc(table_sizes[pwr] * elementsize);
57552+
57553+ *len = table_sizes[pwr];
57554+
57555+ return newtable;
57556+}
57557+
57558+static int
57559+init_variables(const struct gr_arg *arg)
57560+{
57561+ struct task_struct *reaper = init_pid_ns.child_reaper;
57562+ unsigned int stacksize;
57563+
57564+ subj_map_set.s_size = arg->role_db.num_subjects;
57565+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
57566+ name_set.n_size = arg->role_db.num_objects;
57567+ inodev_set.i_size = arg->role_db.num_objects;
57568+
57569+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
57570+ !name_set.n_size || !inodev_set.i_size)
57571+ return 1;
57572+
57573+ if (!gr_init_uidset())
57574+ return 1;
57575+
57576+ /* set up the stack that holds allocation info */
57577+
57578+ stacksize = arg->role_db.num_pointers + 5;
57579+
57580+ if (!acl_alloc_stack_init(stacksize))
57581+ return 1;
57582+
57583+ /* grab reference for the real root dentry and vfsmount */
57584+ get_fs_root(reaper->fs, &real_root);
57585+
57586+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57587+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
57588+#endif
57589+
57590+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
57591+ if (fakefs_obj_rw == NULL)
57592+ return 1;
57593+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
57594+
57595+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
57596+ if (fakefs_obj_rwx == NULL)
57597+ return 1;
57598+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
57599+
57600+ subj_map_set.s_hash =
57601+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
57602+ acl_role_set.r_hash =
57603+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
57604+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
57605+ inodev_set.i_hash =
57606+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
57607+
57608+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
57609+ !name_set.n_hash || !inodev_set.i_hash)
57610+ return 1;
57611+
57612+ memset(subj_map_set.s_hash, 0,
57613+ sizeof(struct subject_map *) * subj_map_set.s_size);
57614+ memset(acl_role_set.r_hash, 0,
57615+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
57616+ memset(name_set.n_hash, 0,
57617+ sizeof (struct name_entry *) * name_set.n_size);
57618+ memset(inodev_set.i_hash, 0,
57619+ sizeof (struct inodev_entry *) * inodev_set.i_size);
57620+
57621+ return 0;
57622+}
57623+
57624+/* free information not needed after startup
57625+ currently contains user->kernel pointer mappings for subjects
57626+*/
57627+
57628+static void
57629+free_init_variables(void)
57630+{
57631+ __u32 i;
57632+
57633+ if (subj_map_set.s_hash) {
57634+ for (i = 0; i < subj_map_set.s_size; i++) {
57635+ if (subj_map_set.s_hash[i]) {
57636+ kfree(subj_map_set.s_hash[i]);
57637+ subj_map_set.s_hash[i] = NULL;
57638+ }
57639+ }
57640+
57641+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
57642+ PAGE_SIZE)
57643+ kfree(subj_map_set.s_hash);
57644+ else
57645+ vfree(subj_map_set.s_hash);
57646+ }
57647+
57648+ return;
57649+}
57650+
57651+static void
57652+free_variables(void)
57653+{
57654+ struct acl_subject_label *s;
57655+ struct acl_role_label *r;
57656+ struct task_struct *task, *task2;
57657+ unsigned int x;
57658+
57659+ gr_clear_learn_entries();
57660+
57661+ read_lock(&tasklist_lock);
57662+ do_each_thread(task2, task) {
57663+ task->acl_sp_role = 0;
57664+ task->acl_role_id = 0;
57665+ task->acl = NULL;
57666+ task->role = NULL;
57667+ } while_each_thread(task2, task);
57668+ read_unlock(&tasklist_lock);
57669+
57670+ /* release the reference to the real root dentry and vfsmount */
57671+ path_put(&real_root);
57672+ memset(&real_root, 0, sizeof(real_root));
57673+
57674+ /* free all object hash tables */
57675+
57676+ FOR_EACH_ROLE_START(r)
57677+ if (r->subj_hash == NULL)
57678+ goto next_role;
57679+ FOR_EACH_SUBJECT_START(r, s, x)
57680+ if (s->obj_hash == NULL)
57681+ break;
57682+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57683+ kfree(s->obj_hash);
57684+ else
57685+ vfree(s->obj_hash);
57686+ FOR_EACH_SUBJECT_END(s, x)
57687+ FOR_EACH_NESTED_SUBJECT_START(r, s)
57688+ if (s->obj_hash == NULL)
57689+ break;
57690+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57691+ kfree(s->obj_hash);
57692+ else
57693+ vfree(s->obj_hash);
57694+ FOR_EACH_NESTED_SUBJECT_END(s)
57695+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
57696+ kfree(r->subj_hash);
57697+ else
57698+ vfree(r->subj_hash);
57699+ r->subj_hash = NULL;
57700+next_role:
57701+ FOR_EACH_ROLE_END(r)
57702+
57703+ acl_free_all();
57704+
57705+ if (acl_role_set.r_hash) {
57706+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
57707+ PAGE_SIZE)
57708+ kfree(acl_role_set.r_hash);
57709+ else
57710+ vfree(acl_role_set.r_hash);
57711+ }
57712+ if (name_set.n_hash) {
57713+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
57714+ PAGE_SIZE)
57715+ kfree(name_set.n_hash);
57716+ else
57717+ vfree(name_set.n_hash);
57718+ }
57719+
57720+ if (inodev_set.i_hash) {
57721+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
57722+ PAGE_SIZE)
57723+ kfree(inodev_set.i_hash);
57724+ else
57725+ vfree(inodev_set.i_hash);
57726+ }
57727+
57728+ gr_free_uidset();
57729+
57730+ memset(&name_set, 0, sizeof (struct name_db));
57731+ memset(&inodev_set, 0, sizeof (struct inodev_db));
57732+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
57733+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
57734+
57735+ default_role = NULL;
57736+ kernel_role = NULL;
57737+ role_list = NULL;
57738+
57739+ return;
57740+}
57741+
57742+static __u32
57743+count_user_objs(struct acl_object_label *userp)
57744+{
57745+ struct acl_object_label o_tmp;
57746+ __u32 num = 0;
57747+
57748+ while (userp) {
57749+ if (copy_from_user(&o_tmp, userp,
57750+ sizeof (struct acl_object_label)))
57751+ break;
57752+
57753+ userp = o_tmp.prev;
57754+ num++;
57755+ }
57756+
57757+ return num;
57758+}
57759+
57760+static struct acl_subject_label *
57761+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
57762+
57763+static int
57764+copy_user_glob(struct acl_object_label *obj)
57765+{
57766+ struct acl_object_label *g_tmp, **guser;
57767+ unsigned int len;
57768+ char *tmp;
57769+
57770+ if (obj->globbed == NULL)
57771+ return 0;
57772+
57773+ guser = &obj->globbed;
57774+ while (*guser) {
57775+ g_tmp = (struct acl_object_label *)
57776+ acl_alloc(sizeof (struct acl_object_label));
57777+ if (g_tmp == NULL)
57778+ return -ENOMEM;
57779+
57780+ if (copy_from_user(g_tmp, *guser,
57781+ sizeof (struct acl_object_label)))
57782+ return -EFAULT;
57783+
57784+ len = strnlen_user(g_tmp->filename, PATH_MAX);
57785+
57786+ if (!len || len >= PATH_MAX)
57787+ return -EINVAL;
57788+
57789+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57790+ return -ENOMEM;
57791+
57792+ if (copy_from_user(tmp, g_tmp->filename, len))
57793+ return -EFAULT;
57794+ tmp[len-1] = '\0';
57795+ g_tmp->filename = tmp;
57796+
57797+ *guser = g_tmp;
57798+ guser = &(g_tmp->next);
57799+ }
57800+
57801+ return 0;
57802+}
57803+
57804+static int
57805+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
57806+ struct acl_role_label *role)
57807+{
57808+ struct acl_object_label *o_tmp;
57809+ unsigned int len;
57810+ int ret;
57811+ char *tmp;
57812+
57813+ while (userp) {
57814+ if ((o_tmp = (struct acl_object_label *)
57815+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
57816+ return -ENOMEM;
57817+
57818+ if (copy_from_user(o_tmp, userp,
57819+ sizeof (struct acl_object_label)))
57820+ return -EFAULT;
57821+
57822+ userp = o_tmp->prev;
57823+
57824+ len = strnlen_user(o_tmp->filename, PATH_MAX);
57825+
57826+ if (!len || len >= PATH_MAX)
57827+ return -EINVAL;
57828+
57829+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57830+ return -ENOMEM;
57831+
57832+ if (copy_from_user(tmp, o_tmp->filename, len))
57833+ return -EFAULT;
57834+ tmp[len-1] = '\0';
57835+ o_tmp->filename = tmp;
57836+
57837+ insert_acl_obj_label(o_tmp, subj);
57838+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
57839+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
57840+ return -ENOMEM;
57841+
57842+ ret = copy_user_glob(o_tmp);
57843+ if (ret)
57844+ return ret;
57845+
57846+ if (o_tmp->nested) {
57847+ int already_copied;
57848+
57849+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
57850+ if (IS_ERR(o_tmp->nested))
57851+ return PTR_ERR(o_tmp->nested);
57852+
57853+ /* insert into nested subject list if we haven't copied this one yet
57854+ to prevent duplicate entries */
57855+ if (!already_copied) {
57856+ o_tmp->nested->next = role->hash->first;
57857+ role->hash->first = o_tmp->nested;
57858+ }
57859+ }
57860+ }
57861+
57862+ return 0;
57863+}
57864+
57865+static __u32
57866+count_user_subjs(struct acl_subject_label *userp)
57867+{
57868+ struct acl_subject_label s_tmp;
57869+ __u32 num = 0;
57870+
57871+ while (userp) {
57872+ if (copy_from_user(&s_tmp, userp,
57873+ sizeof (struct acl_subject_label)))
57874+ break;
57875+
57876+ userp = s_tmp.prev;
57877+ }
57878+
57879+ return num;
57880+}
57881+
57882+static int
57883+copy_user_allowedips(struct acl_role_label *rolep)
57884+{
57885+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57886+
57887+ ruserip = rolep->allowed_ips;
57888+
57889+ while (ruserip) {
57890+ rlast = rtmp;
57891+
57892+ if ((rtmp = (struct role_allowed_ip *)
57893+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57894+ return -ENOMEM;
57895+
57896+ if (copy_from_user(rtmp, ruserip,
57897+ sizeof (struct role_allowed_ip)))
57898+ return -EFAULT;
57899+
57900+ ruserip = rtmp->prev;
57901+
57902+ if (!rlast) {
57903+ rtmp->prev = NULL;
57904+ rolep->allowed_ips = rtmp;
57905+ } else {
57906+ rlast->next = rtmp;
57907+ rtmp->prev = rlast;
57908+ }
57909+
57910+ if (!ruserip)
57911+ rtmp->next = NULL;
57912+ }
57913+
57914+ return 0;
57915+}
57916+
57917+static int
57918+copy_user_transitions(struct acl_role_label *rolep)
57919+{
57920+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
57921+
57922+ unsigned int len;
57923+ char *tmp;
57924+
57925+ rusertp = rolep->transitions;
57926+
57927+ while (rusertp) {
57928+ rlast = rtmp;
57929+
57930+ if ((rtmp = (struct role_transition *)
57931+ acl_alloc(sizeof (struct role_transition))) == NULL)
57932+ return -ENOMEM;
57933+
57934+ if (copy_from_user(rtmp, rusertp,
57935+ sizeof (struct role_transition)))
57936+ return -EFAULT;
57937+
57938+ rusertp = rtmp->prev;
57939+
57940+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57941+
57942+ if (!len || len >= GR_SPROLE_LEN)
57943+ return -EINVAL;
57944+
57945+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57946+ return -ENOMEM;
57947+
57948+ if (copy_from_user(tmp, rtmp->rolename, len))
57949+ return -EFAULT;
57950+ tmp[len-1] = '\0';
57951+ rtmp->rolename = tmp;
57952+
57953+ if (!rlast) {
57954+ rtmp->prev = NULL;
57955+ rolep->transitions = rtmp;
57956+ } else {
57957+ rlast->next = rtmp;
57958+ rtmp->prev = rlast;
57959+ }
57960+
57961+ if (!rusertp)
57962+ rtmp->next = NULL;
57963+ }
57964+
57965+ return 0;
57966+}
57967+
57968+static struct acl_subject_label *
57969+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
57970+{
57971+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57972+ unsigned int len;
57973+ char *tmp;
57974+ __u32 num_objs;
57975+ struct acl_ip_label **i_tmp, *i_utmp2;
57976+ struct gr_hash_struct ghash;
57977+ struct subject_map *subjmap;
57978+ unsigned int i_num;
57979+ int err;
57980+
57981+ if (already_copied != NULL)
57982+ *already_copied = 0;
57983+
57984+ s_tmp = lookup_subject_map(userp);
57985+
57986+ /* we've already copied this subject into the kernel, just return
57987+ the reference to it, and don't copy it over again
57988+ */
57989+ if (s_tmp) {
57990+ if (already_copied != NULL)
57991+ *already_copied = 1;
57992+ return(s_tmp);
57993+ }
57994+
57995+ if ((s_tmp = (struct acl_subject_label *)
57996+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
57997+ return ERR_PTR(-ENOMEM);
57998+
57999+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
58000+ if (subjmap == NULL)
58001+ return ERR_PTR(-ENOMEM);
58002+
58003+ subjmap->user = userp;
58004+ subjmap->kernel = s_tmp;
58005+ insert_subj_map_entry(subjmap);
58006+
58007+ if (copy_from_user(s_tmp, userp,
58008+ sizeof (struct acl_subject_label)))
58009+ return ERR_PTR(-EFAULT);
58010+
58011+ len = strnlen_user(s_tmp->filename, PATH_MAX);
58012+
58013+ if (!len || len >= PATH_MAX)
58014+ return ERR_PTR(-EINVAL);
58015+
58016+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58017+ return ERR_PTR(-ENOMEM);
58018+
58019+ if (copy_from_user(tmp, s_tmp->filename, len))
58020+ return ERR_PTR(-EFAULT);
58021+ tmp[len-1] = '\0';
58022+ s_tmp->filename = tmp;
58023+
58024+ if (!strcmp(s_tmp->filename, "/"))
58025+ role->root_label = s_tmp;
58026+
58027+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
58028+ return ERR_PTR(-EFAULT);
58029+
58030+ /* copy user and group transition tables */
58031+
58032+ if (s_tmp->user_trans_num) {
58033+ uid_t *uidlist;
58034+
58035+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
58036+ if (uidlist == NULL)
58037+ return ERR_PTR(-ENOMEM);
58038+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
58039+ return ERR_PTR(-EFAULT);
58040+
58041+ s_tmp->user_transitions = uidlist;
58042+ }
58043+
58044+ if (s_tmp->group_trans_num) {
58045+ gid_t *gidlist;
58046+
58047+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
58048+ if (gidlist == NULL)
58049+ return ERR_PTR(-ENOMEM);
58050+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
58051+ return ERR_PTR(-EFAULT);
58052+
58053+ s_tmp->group_transitions = gidlist;
58054+ }
58055+
58056+ /* set up object hash table */
58057+ num_objs = count_user_objs(ghash.first);
58058+
58059+ s_tmp->obj_hash_size = num_objs;
58060+ s_tmp->obj_hash =
58061+ (struct acl_object_label **)
58062+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
58063+
58064+ if (!s_tmp->obj_hash)
58065+ return ERR_PTR(-ENOMEM);
58066+
58067+ memset(s_tmp->obj_hash, 0,
58068+ s_tmp->obj_hash_size *
58069+ sizeof (struct acl_object_label *));
58070+
58071+ /* add in objects */
58072+ err = copy_user_objs(ghash.first, s_tmp, role);
58073+
58074+ if (err)
58075+ return ERR_PTR(err);
58076+
58077+ /* set pointer for parent subject */
58078+ if (s_tmp->parent_subject) {
58079+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
58080+
58081+ if (IS_ERR(s_tmp2))
58082+ return s_tmp2;
58083+
58084+ s_tmp->parent_subject = s_tmp2;
58085+ }
58086+
58087+ /* add in ip acls */
58088+
58089+ if (!s_tmp->ip_num) {
58090+ s_tmp->ips = NULL;
58091+ goto insert;
58092+ }
58093+
58094+ i_tmp =
58095+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
58096+ sizeof (struct acl_ip_label *));
58097+
58098+ if (!i_tmp)
58099+ return ERR_PTR(-ENOMEM);
58100+
58101+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
58102+ *(i_tmp + i_num) =
58103+ (struct acl_ip_label *)
58104+ acl_alloc(sizeof (struct acl_ip_label));
58105+ if (!*(i_tmp + i_num))
58106+ return ERR_PTR(-ENOMEM);
58107+
58108+ if (copy_from_user
58109+ (&i_utmp2, s_tmp->ips + i_num,
58110+ sizeof (struct acl_ip_label *)))
58111+ return ERR_PTR(-EFAULT);
58112+
58113+ if (copy_from_user
58114+ (*(i_tmp + i_num), i_utmp2,
58115+ sizeof (struct acl_ip_label)))
58116+ return ERR_PTR(-EFAULT);
58117+
58118+ if ((*(i_tmp + i_num))->iface == NULL)
58119+ continue;
58120+
58121+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
58122+ if (!len || len >= IFNAMSIZ)
58123+ return ERR_PTR(-EINVAL);
58124+ tmp = acl_alloc(len);
58125+ if (tmp == NULL)
58126+ return ERR_PTR(-ENOMEM);
58127+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
58128+ return ERR_PTR(-EFAULT);
58129+ (*(i_tmp + i_num))->iface = tmp;
58130+ }
58131+
58132+ s_tmp->ips = i_tmp;
58133+
58134+insert:
58135+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
58136+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
58137+ return ERR_PTR(-ENOMEM);
58138+
58139+ return s_tmp;
58140+}
58141+
58142+static int
58143+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
58144+{
58145+ struct acl_subject_label s_pre;
58146+ struct acl_subject_label * ret;
58147+ int err;
58148+
58149+ while (userp) {
58150+ if (copy_from_user(&s_pre, userp,
58151+ sizeof (struct acl_subject_label)))
58152+ return -EFAULT;
58153+
58154+ ret = do_copy_user_subj(userp, role, NULL);
58155+
58156+ err = PTR_ERR(ret);
58157+ if (IS_ERR(ret))
58158+ return err;
58159+
58160+ insert_acl_subj_label(ret, role);
58161+
58162+ userp = s_pre.prev;
58163+ }
58164+
58165+ return 0;
58166+}
58167+
58168+static int
58169+copy_user_acl(struct gr_arg *arg)
58170+{
58171+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
58172+ struct acl_subject_label *subj_list;
58173+ struct sprole_pw *sptmp;
58174+ struct gr_hash_struct *ghash;
58175+ uid_t *domainlist;
58176+ unsigned int r_num;
58177+ unsigned int len;
58178+ char *tmp;
58179+ int err = 0;
58180+ __u16 i;
58181+ __u32 num_subjs;
58182+
58183+ /* we need a default and kernel role */
58184+ if (arg->role_db.num_roles < 2)
58185+ return -EINVAL;
58186+
58187+ /* copy special role authentication info from userspace */
58188+
58189+ num_sprole_pws = arg->num_sprole_pws;
58190+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
58191+
58192+ if (!acl_special_roles && num_sprole_pws)
58193+ return -ENOMEM;
58194+
58195+ for (i = 0; i < num_sprole_pws; i++) {
58196+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
58197+ if (!sptmp)
58198+ return -ENOMEM;
58199+ if (copy_from_user(sptmp, arg->sprole_pws + i,
58200+ sizeof (struct sprole_pw)))
58201+ return -EFAULT;
58202+
58203+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
58204+
58205+ if (!len || len >= GR_SPROLE_LEN)
58206+ return -EINVAL;
58207+
58208+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58209+ return -ENOMEM;
58210+
58211+ if (copy_from_user(tmp, sptmp->rolename, len))
58212+ return -EFAULT;
58213+
58214+ tmp[len-1] = '\0';
58215+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58216+ printk(KERN_ALERT "Copying special role %s\n", tmp);
58217+#endif
58218+ sptmp->rolename = tmp;
58219+ acl_special_roles[i] = sptmp;
58220+ }
58221+
58222+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
58223+
58224+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
58225+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
58226+
58227+ if (!r_tmp)
58228+ return -ENOMEM;
58229+
58230+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
58231+ sizeof (struct acl_role_label *)))
58232+ return -EFAULT;
58233+
58234+ if (copy_from_user(r_tmp, r_utmp2,
58235+ sizeof (struct acl_role_label)))
58236+ return -EFAULT;
58237+
58238+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
58239+
58240+ if (!len || len >= PATH_MAX)
58241+ return -EINVAL;
58242+
58243+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58244+ return -ENOMEM;
58245+
58246+ if (copy_from_user(tmp, r_tmp->rolename, len))
58247+ return -EFAULT;
58248+
58249+ tmp[len-1] = '\0';
58250+ r_tmp->rolename = tmp;
58251+
58252+ if (!strcmp(r_tmp->rolename, "default")
58253+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
58254+ default_role = r_tmp;
58255+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
58256+ kernel_role = r_tmp;
58257+ }
58258+
58259+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
58260+ return -ENOMEM;
58261+
58262+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
58263+ return -EFAULT;
58264+
58265+ r_tmp->hash = ghash;
58266+
58267+ num_subjs = count_user_subjs(r_tmp->hash->first);
58268+
58269+ r_tmp->subj_hash_size = num_subjs;
58270+ r_tmp->subj_hash =
58271+ (struct acl_subject_label **)
58272+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
58273+
58274+ if (!r_tmp->subj_hash)
58275+ return -ENOMEM;
58276+
58277+ err = copy_user_allowedips(r_tmp);
58278+ if (err)
58279+ return err;
58280+
58281+ /* copy domain info */
58282+ if (r_tmp->domain_children != NULL) {
58283+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
58284+ if (domainlist == NULL)
58285+ return -ENOMEM;
58286+
58287+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
58288+ return -EFAULT;
58289+
58290+ r_tmp->domain_children = domainlist;
58291+ }
58292+
58293+ err = copy_user_transitions(r_tmp);
58294+ if (err)
58295+ return err;
58296+
58297+ memset(r_tmp->subj_hash, 0,
58298+ r_tmp->subj_hash_size *
58299+ sizeof (struct acl_subject_label *));
58300+
58301+ /* acquire the list of subjects, then NULL out
58302+ the list prior to parsing the subjects for this role,
58303+ as during this parsing the list is replaced with a list
58304+ of *nested* subjects for the role
58305+ */
58306+ subj_list = r_tmp->hash->first;
58307+
58308+ /* set nested subject list to null */
58309+ r_tmp->hash->first = NULL;
58310+
58311+ err = copy_user_subjs(subj_list, r_tmp);
58312+
58313+ if (err)
58314+ return err;
58315+
58316+ insert_acl_role_label(r_tmp);
58317+ }
58318+
58319+ if (default_role == NULL || kernel_role == NULL)
58320+ return -EINVAL;
58321+
58322+ return err;
58323+}
58324+
58325+static int
58326+gracl_init(struct gr_arg *args)
58327+{
58328+ int error = 0;
58329+
58330+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
58331+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
58332+
58333+ if (init_variables(args)) {
58334+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
58335+ error = -ENOMEM;
58336+ free_variables();
58337+ goto out;
58338+ }
58339+
58340+ error = copy_user_acl(args);
58341+ free_init_variables();
58342+ if (error) {
58343+ free_variables();
58344+ goto out;
58345+ }
58346+
58347+ if ((error = gr_set_acls(0))) {
58348+ free_variables();
58349+ goto out;
58350+ }
58351+
58352+ pax_open_kernel();
58353+ gr_status |= GR_READY;
58354+ pax_close_kernel();
58355+
58356+ out:
58357+ return error;
58358+}
58359+
58360+/* derived from glibc fnmatch() 0: match, 1: no match*/
58361+
58362+static int
58363+glob_match(const char *p, const char *n)
58364+{
58365+ char c;
58366+
58367+ while ((c = *p++) != '\0') {
58368+ switch (c) {
58369+ case '?':
58370+ if (*n == '\0')
58371+ return 1;
58372+ else if (*n == '/')
58373+ return 1;
58374+ break;
58375+ case '\\':
58376+ if (*n != c)
58377+ return 1;
58378+ break;
58379+ case '*':
58380+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
58381+ if (*n == '/')
58382+ return 1;
58383+ else if (c == '?') {
58384+ if (*n == '\0')
58385+ return 1;
58386+ else
58387+ ++n;
58388+ }
58389+ }
58390+ if (c == '\0') {
58391+ return 0;
58392+ } else {
58393+ const char *endp;
58394+
58395+ if ((endp = strchr(n, '/')) == NULL)
58396+ endp = n + strlen(n);
58397+
58398+ if (c == '[') {
58399+ for (--p; n < endp; ++n)
58400+ if (!glob_match(p, n))
58401+ return 0;
58402+ } else if (c == '/') {
58403+ while (*n != '\0' && *n != '/')
58404+ ++n;
58405+ if (*n == '/' && !glob_match(p, n + 1))
58406+ return 0;
58407+ } else {
58408+ for (--p; n < endp; ++n)
58409+ if (*n == c && !glob_match(p, n))
58410+ return 0;
58411+ }
58412+
58413+ return 1;
58414+ }
58415+ case '[':
58416+ {
58417+ int not;
58418+ char cold;
58419+
58420+ if (*n == '\0' || *n == '/')
58421+ return 1;
58422+
58423+ not = (*p == '!' || *p == '^');
58424+ if (not)
58425+ ++p;
58426+
58427+ c = *p++;
58428+ for (;;) {
58429+ unsigned char fn = (unsigned char)*n;
58430+
58431+ if (c == '\0')
58432+ return 1;
58433+ else {
58434+ if (c == fn)
58435+ goto matched;
58436+ cold = c;
58437+ c = *p++;
58438+
58439+ if (c == '-' && *p != ']') {
58440+ unsigned char cend = *p++;
58441+
58442+ if (cend == '\0')
58443+ return 1;
58444+
58445+ if (cold <= fn && fn <= cend)
58446+ goto matched;
58447+
58448+ c = *p++;
58449+ }
58450+ }
58451+
58452+ if (c == ']')
58453+ break;
58454+ }
58455+ if (!not)
58456+ return 1;
58457+ break;
58458+ matched:
58459+ while (c != ']') {
58460+ if (c == '\0')
58461+ return 1;
58462+
58463+ c = *p++;
58464+ }
58465+ if (not)
58466+ return 1;
58467+ }
58468+ break;
58469+ default:
58470+ if (c != *n)
58471+ return 1;
58472+ }
58473+
58474+ ++n;
58475+ }
58476+
58477+ if (*n == '\0')
58478+ return 0;
58479+
58480+ if (*n == '/')
58481+ return 0;
58482+
58483+ return 1;
58484+}
58485+
58486+static struct acl_object_label *
58487+chk_glob_label(struct acl_object_label *globbed,
58488+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
58489+{
58490+ struct acl_object_label *tmp;
58491+
58492+ if (*path == NULL)
58493+ *path = gr_to_filename_nolock(dentry, mnt);
58494+
58495+ tmp = globbed;
58496+
58497+ while (tmp) {
58498+ if (!glob_match(tmp->filename, *path))
58499+ return tmp;
58500+ tmp = tmp->next;
58501+ }
58502+
58503+ return NULL;
58504+}
58505+
58506+static struct acl_object_label *
58507+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58508+ const ino_t curr_ino, const dev_t curr_dev,
58509+ const struct acl_subject_label *subj, char **path, const int checkglob)
58510+{
58511+ struct acl_subject_label *tmpsubj;
58512+ struct acl_object_label *retval;
58513+ struct acl_object_label *retval2;
58514+
58515+ tmpsubj = (struct acl_subject_label *) subj;
58516+ read_lock(&gr_inode_lock);
58517+ do {
58518+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
58519+ if (retval) {
58520+ if (checkglob && retval->globbed) {
58521+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
58522+ if (retval2)
58523+ retval = retval2;
58524+ }
58525+ break;
58526+ }
58527+ } while ((tmpsubj = tmpsubj->parent_subject));
58528+ read_unlock(&gr_inode_lock);
58529+
58530+ return retval;
58531+}
58532+
58533+static __inline__ struct acl_object_label *
58534+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58535+ struct dentry *curr_dentry,
58536+ const struct acl_subject_label *subj, char **path, const int checkglob)
58537+{
58538+ int newglob = checkglob;
58539+ ino_t inode;
58540+ dev_t device;
58541+
58542+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
58543+ as we don't want a / * rule to match instead of the / object
58544+ don't do this for create lookups that call this function though, since they're looking up
58545+ on the parent and thus need globbing checks on all paths
58546+ */
58547+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
58548+ newglob = GR_NO_GLOB;
58549+
58550+ spin_lock(&curr_dentry->d_lock);
58551+ inode = curr_dentry->d_inode->i_ino;
58552+ device = __get_dev(curr_dentry);
58553+ spin_unlock(&curr_dentry->d_lock);
58554+
58555+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
58556+}
58557+
58558+#ifdef CONFIG_HUGETLBFS
58559+static inline bool
58560+is_hugetlbfs_mnt(const struct vfsmount *mnt)
58561+{
58562+ int i;
58563+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
58564+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
58565+ return true;
58566+ }
58567+
58568+ return false;
58569+}
58570+#endif
58571+
58572+static struct acl_object_label *
58573+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58574+ const struct acl_subject_label *subj, char *path, const int checkglob)
58575+{
58576+ struct dentry *dentry = (struct dentry *) l_dentry;
58577+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58578+ struct mount *real_mnt = real_mount(mnt);
58579+ struct acl_object_label *retval;
58580+ struct dentry *parent;
58581+
58582+ write_seqlock(&rename_lock);
58583+ br_read_lock(&vfsmount_lock);
58584+
58585+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
58586+#ifdef CONFIG_NET
58587+ mnt == sock_mnt ||
58588+#endif
58589+#ifdef CONFIG_HUGETLBFS
58590+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
58591+#endif
58592+ /* ignore Eric Biederman */
58593+ IS_PRIVATE(l_dentry->d_inode))) {
58594+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58595+ goto out;
58596+ }
58597+
58598+ for (;;) {
58599+ if (dentry == real_root.dentry && mnt == real_root.mnt)
58600+ break;
58601+
58602+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58603+ if (!mnt_has_parent(real_mnt))
58604+ break;
58605+
58606+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58607+ if (retval != NULL)
58608+ goto out;
58609+
58610+ dentry = real_mnt->mnt_mountpoint;
58611+ real_mnt = real_mnt->mnt_parent;
58612+ mnt = &real_mnt->mnt;
58613+ continue;
58614+ }
58615+
58616+ parent = dentry->d_parent;
58617+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58618+ if (retval != NULL)
58619+ goto out;
58620+
58621+ dentry = parent;
58622+ }
58623+
58624+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58625+
58626+ /* real_root is pinned so we don't have to hold a reference */
58627+ if (retval == NULL)
58628+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
58629+out:
58630+ br_read_unlock(&vfsmount_lock);
58631+ write_sequnlock(&rename_lock);
58632+
58633+ BUG_ON(retval == NULL);
58634+
58635+ return retval;
58636+}
58637+
58638+static __inline__ struct acl_object_label *
58639+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58640+ const struct acl_subject_label *subj)
58641+{
58642+ char *path = NULL;
58643+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
58644+}
58645+
58646+static __inline__ struct acl_object_label *
58647+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58648+ const struct acl_subject_label *subj)
58649+{
58650+ char *path = NULL;
58651+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
58652+}
58653+
58654+static __inline__ struct acl_object_label *
58655+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58656+ const struct acl_subject_label *subj, char *path)
58657+{
58658+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
58659+}
58660+
58661+static struct acl_subject_label *
58662+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58663+ const struct acl_role_label *role)
58664+{
58665+ struct dentry *dentry = (struct dentry *) l_dentry;
58666+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58667+ struct mount *real_mnt = real_mount(mnt);
58668+ struct acl_subject_label *retval;
58669+ struct dentry *parent;
58670+
58671+ write_seqlock(&rename_lock);
58672+ br_read_lock(&vfsmount_lock);
58673+
58674+ for (;;) {
58675+ if (dentry == real_root.dentry && mnt == real_root.mnt)
58676+ break;
58677+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58678+ if (!mnt_has_parent(real_mnt))
58679+ break;
58680+
58681+ spin_lock(&dentry->d_lock);
58682+ read_lock(&gr_inode_lock);
58683+ retval =
58684+ lookup_acl_subj_label(dentry->d_inode->i_ino,
58685+ __get_dev(dentry), role);
58686+ read_unlock(&gr_inode_lock);
58687+ spin_unlock(&dentry->d_lock);
58688+ if (retval != NULL)
58689+ goto out;
58690+
58691+ dentry = real_mnt->mnt_mountpoint;
58692+ real_mnt = real_mnt->mnt_parent;
58693+ mnt = &real_mnt->mnt;
58694+ continue;
58695+ }
58696+
58697+ spin_lock(&dentry->d_lock);
58698+ read_lock(&gr_inode_lock);
58699+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58700+ __get_dev(dentry), role);
58701+ read_unlock(&gr_inode_lock);
58702+ parent = dentry->d_parent;
58703+ spin_unlock(&dentry->d_lock);
58704+
58705+ if (retval != NULL)
58706+ goto out;
58707+
58708+ dentry = parent;
58709+ }
58710+
58711+ spin_lock(&dentry->d_lock);
58712+ read_lock(&gr_inode_lock);
58713+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58714+ __get_dev(dentry), role);
58715+ read_unlock(&gr_inode_lock);
58716+ spin_unlock(&dentry->d_lock);
58717+
58718+ if (unlikely(retval == NULL)) {
58719+ /* real_root is pinned, we don't need to hold a reference */
58720+ read_lock(&gr_inode_lock);
58721+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
58722+ __get_dev(real_root.dentry), role);
58723+ read_unlock(&gr_inode_lock);
58724+ }
58725+out:
58726+ br_read_unlock(&vfsmount_lock);
58727+ write_sequnlock(&rename_lock);
58728+
58729+ BUG_ON(retval == NULL);
58730+
58731+ return retval;
58732+}
58733+
58734+static void
58735+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
58736+{
58737+ struct task_struct *task = current;
58738+ const struct cred *cred = current_cred();
58739+
58740+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58741+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58742+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58743+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58744+
58745+ return;
58746+}
58747+
58748+static void
58749+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
58750+{
58751+ struct task_struct *task = current;
58752+ const struct cred *cred = current_cred();
58753+
58754+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58755+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58756+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58757+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
58758+
58759+ return;
58760+}
58761+
58762+static void
58763+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
58764+{
58765+ struct task_struct *task = current;
58766+ const struct cred *cred = current_cred();
58767+
58768+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58769+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58770+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58771+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
58772+
58773+ return;
58774+}
58775+
58776+__u32
58777+gr_search_file(const struct dentry * dentry, const __u32 mode,
58778+ const struct vfsmount * mnt)
58779+{
58780+ __u32 retval = mode;
58781+ struct acl_subject_label *curracl;
58782+ struct acl_object_label *currobj;
58783+
58784+ if (unlikely(!(gr_status & GR_READY)))
58785+ return (mode & ~GR_AUDITS);
58786+
58787+ curracl = current->acl;
58788+
58789+ currobj = chk_obj_label(dentry, mnt, curracl);
58790+ retval = currobj->mode & mode;
58791+
58792+ /* if we're opening a specified transfer file for writing
58793+ (e.g. /dev/initctl), then transfer our role to init
58794+ */
58795+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
58796+ current->role->roletype & GR_ROLE_PERSIST)) {
58797+ struct task_struct *task = init_pid_ns.child_reaper;
58798+
58799+ if (task->role != current->role) {
58800+ task->acl_sp_role = 0;
58801+ task->acl_role_id = current->acl_role_id;
58802+ task->role = current->role;
58803+ rcu_read_lock();
58804+ read_lock(&grsec_exec_file_lock);
58805+ gr_apply_subject_to_task(task);
58806+ read_unlock(&grsec_exec_file_lock);
58807+ rcu_read_unlock();
58808+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
58809+ }
58810+ }
58811+
58812+ if (unlikely
58813+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
58814+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
58815+ __u32 new_mode = mode;
58816+
58817+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58818+
58819+ retval = new_mode;
58820+
58821+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
58822+ new_mode |= GR_INHERIT;
58823+
58824+ if (!(mode & GR_NOLEARN))
58825+ gr_log_learn(dentry, mnt, new_mode);
58826+ }
58827+
58828+ return retval;
58829+}
58830+
58831+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
58832+ const struct dentry *parent,
58833+ const struct vfsmount *mnt)
58834+{
58835+ struct name_entry *match;
58836+ struct acl_object_label *matchpo;
58837+ struct acl_subject_label *curracl;
58838+ char *path;
58839+
58840+ if (unlikely(!(gr_status & GR_READY)))
58841+ return NULL;
58842+
58843+ preempt_disable();
58844+ path = gr_to_filename_rbac(new_dentry, mnt);
58845+ match = lookup_name_entry_create(path);
58846+
58847+ curracl = current->acl;
58848+
58849+ if (match) {
58850+ read_lock(&gr_inode_lock);
58851+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
58852+ read_unlock(&gr_inode_lock);
58853+
58854+ if (matchpo) {
58855+ preempt_enable();
58856+ return matchpo;
58857+ }
58858+ }
58859+
58860+ // lookup parent
58861+
58862+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
58863+
58864+ preempt_enable();
58865+ return matchpo;
58866+}
58867+
58868+__u32
58869+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58870+ const struct vfsmount * mnt, const __u32 mode)
58871+{
58872+ struct acl_object_label *matchpo;
58873+ __u32 retval;
58874+
58875+ if (unlikely(!(gr_status & GR_READY)))
58876+ return (mode & ~GR_AUDITS);
58877+
58878+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
58879+
58880+ retval = matchpo->mode & mode;
58881+
58882+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58883+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58884+ __u32 new_mode = mode;
58885+
58886+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58887+
58888+ gr_log_learn(new_dentry, mnt, new_mode);
58889+ return new_mode;
58890+ }
58891+
58892+ return retval;
58893+}
58894+
58895+__u32
58896+gr_check_link(const struct dentry * new_dentry,
58897+ const struct dentry * parent_dentry,
58898+ const struct vfsmount * parent_mnt,
58899+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58900+{
58901+ struct acl_object_label *obj;
58902+ __u32 oldmode, newmode;
58903+ __u32 needmode;
58904+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58905+ GR_DELETE | GR_INHERIT;
58906+
58907+ if (unlikely(!(gr_status & GR_READY)))
58908+ return (GR_CREATE | GR_LINK);
58909+
58910+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58911+ oldmode = obj->mode;
58912+
58913+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58914+ newmode = obj->mode;
58915+
58916+ needmode = newmode & checkmodes;
58917+
58918+ // old name for hardlink must have at least the permissions of the new name
58919+ if ((oldmode & needmode) != needmode)
58920+ goto bad;
58921+
58922+ // if old name had restrictions/auditing, make sure the new name does as well
58923+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58924+
58925+ // don't allow hardlinking of suid/sgid/fcapped files without permission
58926+ if (is_privileged_binary(old_dentry))
58927+ needmode |= GR_SETID;
58928+
58929+ if ((newmode & needmode) != needmode)
58930+ goto bad;
58931+
58932+ // enforce minimum permissions
58933+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58934+ return newmode;
58935+bad:
58936+ needmode = oldmode;
58937+ if (is_privileged_binary(old_dentry))
58938+ needmode |= GR_SETID;
58939+
58940+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58941+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58942+ return (GR_CREATE | GR_LINK);
58943+ } else if (newmode & GR_SUPPRESS)
58944+ return GR_SUPPRESS;
58945+ else
58946+ return 0;
58947+}
58948+
58949+int
58950+gr_check_hidden_task(const struct task_struct *task)
58951+{
58952+ if (unlikely(!(gr_status & GR_READY)))
58953+ return 0;
58954+
58955+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58956+ return 1;
58957+
58958+ return 0;
58959+}
58960+
58961+int
58962+gr_check_protected_task(const struct task_struct *task)
58963+{
58964+ if (unlikely(!(gr_status & GR_READY) || !task))
58965+ return 0;
58966+
58967+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58968+ task->acl != current->acl)
58969+ return 1;
58970+
58971+ return 0;
58972+}
58973+
58974+int
58975+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58976+{
58977+ struct task_struct *p;
58978+ int ret = 0;
58979+
58980+ if (unlikely(!(gr_status & GR_READY) || !pid))
58981+ return ret;
58982+
58983+ read_lock(&tasklist_lock);
58984+ do_each_pid_task(pid, type, p) {
58985+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58986+ p->acl != current->acl) {
58987+ ret = 1;
58988+ goto out;
58989+ }
58990+ } while_each_pid_task(pid, type, p);
58991+out:
58992+ read_unlock(&tasklist_lock);
58993+
58994+ return ret;
58995+}
58996+
58997+void
58998+gr_copy_label(struct task_struct *tsk)
58999+{
59000+ tsk->signal->used_accept = 0;
59001+ tsk->acl_sp_role = 0;
59002+ tsk->acl_role_id = current->acl_role_id;
59003+ tsk->acl = current->acl;
59004+ tsk->role = current->role;
59005+ tsk->signal->curr_ip = current->signal->curr_ip;
59006+ tsk->signal->saved_ip = current->signal->saved_ip;
59007+ if (current->exec_file)
59008+ get_file(current->exec_file);
59009+ tsk->exec_file = current->exec_file;
59010+ tsk->is_writable = current->is_writable;
59011+ if (unlikely(current->signal->used_accept)) {
59012+ current->signal->curr_ip = 0;
59013+ current->signal->saved_ip = 0;
59014+ }
59015+
59016+ return;
59017+}
59018+
59019+static void
59020+gr_set_proc_res(struct task_struct *task)
59021+{
59022+ struct acl_subject_label *proc;
59023+ unsigned short i;
59024+
59025+ proc = task->acl;
59026+
59027+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
59028+ return;
59029+
59030+ for (i = 0; i < RLIM_NLIMITS; i++) {
59031+ if (!(proc->resmask & (1 << i)))
59032+ continue;
59033+
59034+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
59035+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
59036+ }
59037+
59038+ return;
59039+}
59040+
59041+extern int __gr_process_user_ban(struct user_struct *user);
59042+
59043+int
59044+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
59045+{
59046+ unsigned int i;
59047+ __u16 num;
59048+ uid_t *uidlist;
59049+ uid_t curuid;
59050+ int realok = 0;
59051+ int effectiveok = 0;
59052+ int fsok = 0;
59053+ uid_t globalreal, globaleffective, globalfs;
59054+
59055+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59056+ struct user_struct *user;
59057+
59058+ if (!uid_valid(real))
59059+ goto skipit;
59060+
59061+ /* find user based on global namespace */
59062+
59063+ globalreal = GR_GLOBAL_UID(real);
59064+
59065+ user = find_user(make_kuid(&init_user_ns, globalreal));
59066+ if (user == NULL)
59067+ goto skipit;
59068+
59069+ if (__gr_process_user_ban(user)) {
59070+ /* for find_user */
59071+ free_uid(user);
59072+ return 1;
59073+ }
59074+
59075+ /* for find_user */
59076+ free_uid(user);
59077+
59078+skipit:
59079+#endif
59080+
59081+ if (unlikely(!(gr_status & GR_READY)))
59082+ return 0;
59083+
59084+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59085+ gr_log_learn_uid_change(real, effective, fs);
59086+
59087+ num = current->acl->user_trans_num;
59088+ uidlist = current->acl->user_transitions;
59089+
59090+ if (uidlist == NULL)
59091+ return 0;
59092+
59093+ if (!uid_valid(real)) {
59094+ realok = 1;
59095+ globalreal = (uid_t)-1;
59096+ } else {
59097+ globalreal = GR_GLOBAL_UID(real);
59098+ }
59099+ if (!uid_valid(effective)) {
59100+ effectiveok = 1;
59101+ globaleffective = (uid_t)-1;
59102+ } else {
59103+ globaleffective = GR_GLOBAL_UID(effective);
59104+ }
59105+ if (!uid_valid(fs)) {
59106+ fsok = 1;
59107+ globalfs = (uid_t)-1;
59108+ } else {
59109+ globalfs = GR_GLOBAL_UID(fs);
59110+ }
59111+
59112+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
59113+ for (i = 0; i < num; i++) {
59114+ curuid = uidlist[i];
59115+ if (globalreal == curuid)
59116+ realok = 1;
59117+ if (globaleffective == curuid)
59118+ effectiveok = 1;
59119+ if (globalfs == curuid)
59120+ fsok = 1;
59121+ }
59122+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
59123+ for (i = 0; i < num; i++) {
59124+ curuid = uidlist[i];
59125+ if (globalreal == curuid)
59126+ break;
59127+ if (globaleffective == curuid)
59128+ break;
59129+ if (globalfs == curuid)
59130+ break;
59131+ }
59132+ /* not in deny list */
59133+ if (i == num) {
59134+ realok = 1;
59135+ effectiveok = 1;
59136+ fsok = 1;
59137+ }
59138+ }
59139+
59140+ if (realok && effectiveok && fsok)
59141+ return 0;
59142+ else {
59143+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
59144+ return 1;
59145+ }
59146+}
59147+
59148+int
59149+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
59150+{
59151+ unsigned int i;
59152+ __u16 num;
59153+ gid_t *gidlist;
59154+ gid_t curgid;
59155+ int realok = 0;
59156+ int effectiveok = 0;
59157+ int fsok = 0;
59158+ gid_t globalreal, globaleffective, globalfs;
59159+
59160+ if (unlikely(!(gr_status & GR_READY)))
59161+ return 0;
59162+
59163+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59164+ gr_log_learn_gid_change(real, effective, fs);
59165+
59166+ num = current->acl->group_trans_num;
59167+ gidlist = current->acl->group_transitions;
59168+
59169+ if (gidlist == NULL)
59170+ return 0;
59171+
59172+ if (!gid_valid(real)) {
59173+ realok = 1;
59174+ globalreal = (gid_t)-1;
59175+ } else {
59176+ globalreal = GR_GLOBAL_GID(real);
59177+ }
59178+ if (!gid_valid(effective)) {
59179+ effectiveok = 1;
59180+ globaleffective = (gid_t)-1;
59181+ } else {
59182+ globaleffective = GR_GLOBAL_GID(effective);
59183+ }
59184+ if (!gid_valid(fs)) {
59185+ fsok = 1;
59186+ globalfs = (gid_t)-1;
59187+ } else {
59188+ globalfs = GR_GLOBAL_GID(fs);
59189+ }
59190+
59191+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
59192+ for (i = 0; i < num; i++) {
59193+ curgid = gidlist[i];
59194+ if (globalreal == curgid)
59195+ realok = 1;
59196+ if (globaleffective == curgid)
59197+ effectiveok = 1;
59198+ if (globalfs == curgid)
59199+ fsok = 1;
59200+ }
59201+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
59202+ for (i = 0; i < num; i++) {
59203+ curgid = gidlist[i];
59204+ if (globalreal == curgid)
59205+ break;
59206+ if (globaleffective == curgid)
59207+ break;
59208+ if (globalfs == curgid)
59209+ break;
59210+ }
59211+ /* not in deny list */
59212+ if (i == num) {
59213+ realok = 1;
59214+ effectiveok = 1;
59215+ fsok = 1;
59216+ }
59217+ }
59218+
59219+ if (realok && effectiveok && fsok)
59220+ return 0;
59221+ else {
59222+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
59223+ return 1;
59224+ }
59225+}
59226+
59227+extern int gr_acl_is_capable(const int cap);
59228+
59229+void
59230+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
59231+{
59232+ struct acl_role_label *role = task->role;
59233+ struct acl_subject_label *subj = NULL;
59234+ struct acl_object_label *obj;
59235+ struct file *filp;
59236+ uid_t uid;
59237+ gid_t gid;
59238+
59239+ if (unlikely(!(gr_status & GR_READY)))
59240+ return;
59241+
59242+ uid = GR_GLOBAL_UID(kuid);
59243+ gid = GR_GLOBAL_GID(kgid);
59244+
59245+ filp = task->exec_file;
59246+
59247+ /* kernel process, we'll give them the kernel role */
59248+ if (unlikely(!filp)) {
59249+ task->role = kernel_role;
59250+ task->acl = kernel_role->root_label;
59251+ return;
59252+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
59253+ role = lookup_acl_role_label(task, uid, gid);
59254+
59255+ /* don't change the role if we're not a privileged process */
59256+ if (role && task->role != role &&
59257+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
59258+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
59259+ return;
59260+
59261+ /* perform subject lookup in possibly new role
59262+ we can use this result below in the case where role == task->role
59263+ */
59264+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
59265+
59266+ /* if we changed uid/gid, but result in the same role
59267+ and are using inheritance, don't lose the inherited subject
59268+ if current subject is other than what normal lookup
59269+ would result in, we arrived via inheritance, don't
59270+ lose subject
59271+ */
59272+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
59273+ (subj == task->acl)))
59274+ task->acl = subj;
59275+
59276+ task->role = role;
59277+
59278+ task->is_writable = 0;
59279+
59280+ /* ignore additional mmap checks for processes that are writable
59281+ by the default ACL */
59282+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59283+ if (unlikely(obj->mode & GR_WRITE))
59284+ task->is_writable = 1;
59285+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59286+ if (unlikely(obj->mode & GR_WRITE))
59287+ task->is_writable = 1;
59288+
59289+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59290+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
59291+#endif
59292+
59293+ gr_set_proc_res(task);
59294+
59295+ return;
59296+}
59297+
59298+int
59299+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
59300+ const int unsafe_flags)
59301+{
59302+ struct task_struct *task = current;
59303+ struct acl_subject_label *newacl;
59304+ struct acl_object_label *obj;
59305+ __u32 retmode;
59306+
59307+ if (unlikely(!(gr_status & GR_READY)))
59308+ return 0;
59309+
59310+ newacl = chk_subj_label(dentry, mnt, task->role);
59311+
59312+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
59313+ did an exec
59314+ */
59315+ rcu_read_lock();
59316+ read_lock(&tasklist_lock);
59317+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
59318+ (task->parent->acl->mode & GR_POVERRIDE))) {
59319+ read_unlock(&tasklist_lock);
59320+ rcu_read_unlock();
59321+ goto skip_check;
59322+ }
59323+ read_unlock(&tasklist_lock);
59324+ rcu_read_unlock();
59325+
59326+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
59327+ !(task->role->roletype & GR_ROLE_GOD) &&
59328+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
59329+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
59330+ if (unsafe_flags & LSM_UNSAFE_SHARE)
59331+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
59332+ else
59333+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
59334+ return -EACCES;
59335+ }
59336+
59337+skip_check:
59338+
59339+ obj = chk_obj_label(dentry, mnt, task->acl);
59340+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
59341+
59342+ if (!(task->acl->mode & GR_INHERITLEARN) &&
59343+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
59344+ if (obj->nested)
59345+ task->acl = obj->nested;
59346+ else
59347+ task->acl = newacl;
59348+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
59349+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
59350+
59351+ task->is_writable = 0;
59352+
59353+ /* ignore additional mmap checks for processes that are writable
59354+ by the default ACL */
59355+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
59356+ if (unlikely(obj->mode & GR_WRITE))
59357+ task->is_writable = 1;
59358+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
59359+ if (unlikely(obj->mode & GR_WRITE))
59360+ task->is_writable = 1;
59361+
59362+ gr_set_proc_res(task);
59363+
59364+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59365+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
59366+#endif
59367+ return 0;
59368+}
59369+
59370+/* always called with valid inodev ptr */
59371+static void
59372+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
59373+{
59374+ struct acl_object_label *matchpo;
59375+ struct acl_subject_label *matchps;
59376+ struct acl_subject_label *subj;
59377+ struct acl_role_label *role;
59378+ unsigned int x;
59379+
59380+ FOR_EACH_ROLE_START(role)
59381+ FOR_EACH_SUBJECT_START(role, subj, x)
59382+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
59383+ matchpo->mode |= GR_DELETED;
59384+ FOR_EACH_SUBJECT_END(subj,x)
59385+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
59386+ /* nested subjects aren't in the role's subj_hash table */
59387+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
59388+ matchpo->mode |= GR_DELETED;
59389+ FOR_EACH_NESTED_SUBJECT_END(subj)
59390+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
59391+ matchps->mode |= GR_DELETED;
59392+ FOR_EACH_ROLE_END(role)
59393+
59394+ inodev->nentry->deleted = 1;
59395+
59396+ return;
59397+}
59398+
59399+void
59400+gr_handle_delete(const ino_t ino, const dev_t dev)
59401+{
59402+ struct inodev_entry *inodev;
59403+
59404+ if (unlikely(!(gr_status & GR_READY)))
59405+ return;
59406+
59407+ write_lock(&gr_inode_lock);
59408+ inodev = lookup_inodev_entry(ino, dev);
59409+ if (inodev != NULL)
59410+ do_handle_delete(inodev, ino, dev);
59411+ write_unlock(&gr_inode_lock);
59412+
59413+ return;
59414+}
59415+
59416+static void
59417+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
59418+ const ino_t newinode, const dev_t newdevice,
59419+ struct acl_subject_label *subj)
59420+{
59421+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
59422+ struct acl_object_label *match;
59423+
59424+ match = subj->obj_hash[index];
59425+
59426+ while (match && (match->inode != oldinode ||
59427+ match->device != olddevice ||
59428+ !(match->mode & GR_DELETED)))
59429+ match = match->next;
59430+
59431+ if (match && (match->inode == oldinode)
59432+ && (match->device == olddevice)
59433+ && (match->mode & GR_DELETED)) {
59434+ if (match->prev == NULL) {
59435+ subj->obj_hash[index] = match->next;
59436+ if (match->next != NULL)
59437+ match->next->prev = NULL;
59438+ } else {
59439+ match->prev->next = match->next;
59440+ if (match->next != NULL)
59441+ match->next->prev = match->prev;
59442+ }
59443+ match->prev = NULL;
59444+ match->next = NULL;
59445+ match->inode = newinode;
59446+ match->device = newdevice;
59447+ match->mode &= ~GR_DELETED;
59448+
59449+ insert_acl_obj_label(match, subj);
59450+ }
59451+
59452+ return;
59453+}
59454+
59455+static void
59456+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
59457+ const ino_t newinode, const dev_t newdevice,
59458+ struct acl_role_label *role)
59459+{
59460+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
59461+ struct acl_subject_label *match;
59462+
59463+ match = role->subj_hash[index];
59464+
59465+ while (match && (match->inode != oldinode ||
59466+ match->device != olddevice ||
59467+ !(match->mode & GR_DELETED)))
59468+ match = match->next;
59469+
59470+ if (match && (match->inode == oldinode)
59471+ && (match->device == olddevice)
59472+ && (match->mode & GR_DELETED)) {
59473+ if (match->prev == NULL) {
59474+ role->subj_hash[index] = match->next;
59475+ if (match->next != NULL)
59476+ match->next->prev = NULL;
59477+ } else {
59478+ match->prev->next = match->next;
59479+ if (match->next != NULL)
59480+ match->next->prev = match->prev;
59481+ }
59482+ match->prev = NULL;
59483+ match->next = NULL;
59484+ match->inode = newinode;
59485+ match->device = newdevice;
59486+ match->mode &= ~GR_DELETED;
59487+
59488+ insert_acl_subj_label(match, role);
59489+ }
59490+
59491+ return;
59492+}
59493+
59494+static void
59495+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
59496+ const ino_t newinode, const dev_t newdevice)
59497+{
59498+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
59499+ struct inodev_entry *match;
59500+
59501+ match = inodev_set.i_hash[index];
59502+
59503+ while (match && (match->nentry->inode != oldinode ||
59504+ match->nentry->device != olddevice || !match->nentry->deleted))
59505+ match = match->next;
59506+
59507+ if (match && (match->nentry->inode == oldinode)
59508+ && (match->nentry->device == olddevice) &&
59509+ match->nentry->deleted) {
59510+ if (match->prev == NULL) {
59511+ inodev_set.i_hash[index] = match->next;
59512+ if (match->next != NULL)
59513+ match->next->prev = NULL;
59514+ } else {
59515+ match->prev->next = match->next;
59516+ if (match->next != NULL)
59517+ match->next->prev = match->prev;
59518+ }
59519+ match->prev = NULL;
59520+ match->next = NULL;
59521+ match->nentry->inode = newinode;
59522+ match->nentry->device = newdevice;
59523+ match->nentry->deleted = 0;
59524+
59525+ insert_inodev_entry(match);
59526+ }
59527+
59528+ return;
59529+}
59530+
59531+static void
59532+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
59533+{
59534+ struct acl_subject_label *subj;
59535+ struct acl_role_label *role;
59536+ unsigned int x;
59537+
59538+ FOR_EACH_ROLE_START(role)
59539+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
59540+
59541+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
59542+ if ((subj->inode == ino) && (subj->device == dev)) {
59543+ subj->inode = ino;
59544+ subj->device = dev;
59545+ }
59546+ /* nested subjects aren't in the role's subj_hash table */
59547+ update_acl_obj_label(matchn->inode, matchn->device,
59548+ ino, dev, subj);
59549+ FOR_EACH_NESTED_SUBJECT_END(subj)
59550+ FOR_EACH_SUBJECT_START(role, subj, x)
59551+ update_acl_obj_label(matchn->inode, matchn->device,
59552+ ino, dev, subj);
59553+ FOR_EACH_SUBJECT_END(subj,x)
59554+ FOR_EACH_ROLE_END(role)
59555+
59556+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
59557+
59558+ return;
59559+}
59560+
59561+static void
59562+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
59563+ const struct vfsmount *mnt)
59564+{
59565+ ino_t ino = dentry->d_inode->i_ino;
59566+ dev_t dev = __get_dev(dentry);
59567+
59568+ __do_handle_create(matchn, ino, dev);
59569+
59570+ return;
59571+}
59572+
59573+void
59574+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59575+{
59576+ struct name_entry *matchn;
59577+
59578+ if (unlikely(!(gr_status & GR_READY)))
59579+ return;
59580+
59581+ preempt_disable();
59582+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
59583+
59584+ if (unlikely((unsigned long)matchn)) {
59585+ write_lock(&gr_inode_lock);
59586+ do_handle_create(matchn, dentry, mnt);
59587+ write_unlock(&gr_inode_lock);
59588+ }
59589+ preempt_enable();
59590+
59591+ return;
59592+}
59593+
59594+void
59595+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59596+{
59597+ struct name_entry *matchn;
59598+
59599+ if (unlikely(!(gr_status & GR_READY)))
59600+ return;
59601+
59602+ preempt_disable();
59603+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
59604+
59605+ if (unlikely((unsigned long)matchn)) {
59606+ write_lock(&gr_inode_lock);
59607+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
59608+ write_unlock(&gr_inode_lock);
59609+ }
59610+ preempt_enable();
59611+
59612+ return;
59613+}
59614+
59615+void
59616+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59617+ struct dentry *old_dentry,
59618+ struct dentry *new_dentry,
59619+ struct vfsmount *mnt, const __u8 replace)
59620+{
59621+ struct name_entry *matchn;
59622+ struct inodev_entry *inodev;
59623+ struct inode *inode = new_dentry->d_inode;
59624+ ino_t old_ino = old_dentry->d_inode->i_ino;
59625+ dev_t old_dev = __get_dev(old_dentry);
59626+
59627+ /* vfs_rename swaps the name and parent link for old_dentry and
59628+ new_dentry
59629+ at this point, old_dentry has the new name, parent link, and inode
59630+ for the renamed file
59631+ if a file is being replaced by a rename, new_dentry has the inode
59632+ and name for the replaced file
59633+ */
59634+
59635+ if (unlikely(!(gr_status & GR_READY)))
59636+ return;
59637+
59638+ preempt_disable();
59639+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
59640+
59641+ /* we wouldn't have to check d_inode if it weren't for
59642+ NFS silly-renaming
59643+ */
59644+
59645+ write_lock(&gr_inode_lock);
59646+ if (unlikely(replace && inode)) {
59647+ ino_t new_ino = inode->i_ino;
59648+ dev_t new_dev = __get_dev(new_dentry);
59649+
59650+ inodev = lookup_inodev_entry(new_ino, new_dev);
59651+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
59652+ do_handle_delete(inodev, new_ino, new_dev);
59653+ }
59654+
59655+ inodev = lookup_inodev_entry(old_ino, old_dev);
59656+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
59657+ do_handle_delete(inodev, old_ino, old_dev);
59658+
59659+ if (unlikely((unsigned long)matchn))
59660+ do_handle_create(matchn, old_dentry, mnt);
59661+
59662+ write_unlock(&gr_inode_lock);
59663+ preempt_enable();
59664+
59665+ return;
59666+}
59667+
59668+static int
59669+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
59670+ unsigned char **sum)
59671+{
59672+ struct acl_role_label *r;
59673+ struct role_allowed_ip *ipp;
59674+ struct role_transition *trans;
59675+ unsigned int i;
59676+ int found = 0;
59677+ u32 curr_ip = current->signal->curr_ip;
59678+
59679+ current->signal->saved_ip = curr_ip;
59680+
59681+ /* check transition table */
59682+
59683+ for (trans = current->role->transitions; trans; trans = trans->next) {
59684+ if (!strcmp(rolename, trans->rolename)) {
59685+ found = 1;
59686+ break;
59687+ }
59688+ }
59689+
59690+ if (!found)
59691+ return 0;
59692+
59693+ /* handle special roles that do not require authentication
59694+ and check ip */
59695+
59696+ FOR_EACH_ROLE_START(r)
59697+ if (!strcmp(rolename, r->rolename) &&
59698+ (r->roletype & GR_ROLE_SPECIAL)) {
59699+ found = 0;
59700+ if (r->allowed_ips != NULL) {
59701+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
59702+ if ((ntohl(curr_ip) & ipp->netmask) ==
59703+ (ntohl(ipp->addr) & ipp->netmask))
59704+ found = 1;
59705+ }
59706+ } else
59707+ found = 2;
59708+ if (!found)
59709+ return 0;
59710+
59711+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
59712+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
59713+ *salt = NULL;
59714+ *sum = NULL;
59715+ return 1;
59716+ }
59717+ }
59718+ FOR_EACH_ROLE_END(r)
59719+
59720+ for (i = 0; i < num_sprole_pws; i++) {
59721+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
59722+ *salt = acl_special_roles[i]->salt;
59723+ *sum = acl_special_roles[i]->sum;
59724+ return 1;
59725+ }
59726+ }
59727+
59728+ return 0;
59729+}
59730+
59731+static void
59732+assign_special_role(char *rolename)
59733+{
59734+ struct acl_object_label *obj;
59735+ struct acl_role_label *r;
59736+ struct acl_role_label *assigned = NULL;
59737+ struct task_struct *tsk;
59738+ struct file *filp;
59739+
59740+ FOR_EACH_ROLE_START(r)
59741+ if (!strcmp(rolename, r->rolename) &&
59742+ (r->roletype & GR_ROLE_SPECIAL)) {
59743+ assigned = r;
59744+ break;
59745+ }
59746+ FOR_EACH_ROLE_END(r)
59747+
59748+ if (!assigned)
59749+ return;
59750+
59751+ read_lock(&tasklist_lock);
59752+ read_lock(&grsec_exec_file_lock);
59753+
59754+ tsk = current->real_parent;
59755+ if (tsk == NULL)
59756+ goto out_unlock;
59757+
59758+ filp = tsk->exec_file;
59759+ if (filp == NULL)
59760+ goto out_unlock;
59761+
59762+ tsk->is_writable = 0;
59763+
59764+ tsk->acl_sp_role = 1;
59765+ tsk->acl_role_id = ++acl_sp_role_value;
59766+ tsk->role = assigned;
59767+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
59768+
59769+ /* ignore additional mmap checks for processes that are writable
59770+ by the default ACL */
59771+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59772+ if (unlikely(obj->mode & GR_WRITE))
59773+ tsk->is_writable = 1;
59774+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
59775+ if (unlikely(obj->mode & GR_WRITE))
59776+ tsk->is_writable = 1;
59777+
59778+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59779+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
59780+#endif
59781+
59782+out_unlock:
59783+ read_unlock(&grsec_exec_file_lock);
59784+ read_unlock(&tasklist_lock);
59785+ return;
59786+}
59787+
59788+int gr_check_secure_terminal(struct task_struct *task)
59789+{
59790+ struct task_struct *p, *p2, *p3;
59791+ struct files_struct *files;
59792+ struct fdtable *fdt;
59793+ struct file *our_file = NULL, *file;
59794+ int i;
59795+
59796+ if (task->signal->tty == NULL)
59797+ return 1;
59798+
59799+ files = get_files_struct(task);
59800+ if (files != NULL) {
59801+ rcu_read_lock();
59802+ fdt = files_fdtable(files);
59803+ for (i=0; i < fdt->max_fds; i++) {
59804+ file = fcheck_files(files, i);
59805+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
59806+ get_file(file);
59807+ our_file = file;
59808+ }
59809+ }
59810+ rcu_read_unlock();
59811+ put_files_struct(files);
59812+ }
59813+
59814+ if (our_file == NULL)
59815+ return 1;
59816+
59817+ read_lock(&tasklist_lock);
59818+ do_each_thread(p2, p) {
59819+ files = get_files_struct(p);
59820+ if (files == NULL ||
59821+ (p->signal && p->signal->tty == task->signal->tty)) {
59822+ if (files != NULL)
59823+ put_files_struct(files);
59824+ continue;
59825+ }
59826+ rcu_read_lock();
59827+ fdt = files_fdtable(files);
59828+ for (i=0; i < fdt->max_fds; i++) {
59829+ file = fcheck_files(files, i);
59830+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
59831+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
59832+ p3 = task;
59833+ while (task_pid_nr(p3) > 0) {
59834+ if (p3 == p)
59835+ break;
59836+ p3 = p3->real_parent;
59837+ }
59838+ if (p3 == p)
59839+ break;
59840+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
59841+ gr_handle_alertkill(p);
59842+ rcu_read_unlock();
59843+ put_files_struct(files);
59844+ read_unlock(&tasklist_lock);
59845+ fput(our_file);
59846+ return 0;
59847+ }
59848+ }
59849+ rcu_read_unlock();
59850+ put_files_struct(files);
59851+ } while_each_thread(p2, p);
59852+ read_unlock(&tasklist_lock);
59853+
59854+ fput(our_file);
59855+ return 1;
59856+}
59857+
59858+static int gr_rbac_disable(void *unused)
59859+{
59860+ pax_open_kernel();
59861+ gr_status &= ~GR_READY;
59862+ pax_close_kernel();
59863+
59864+ return 0;
59865+}
59866+
59867+ssize_t
59868+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
59869+{
59870+ struct gr_arg_wrapper uwrap;
59871+ unsigned char *sprole_salt = NULL;
59872+ unsigned char *sprole_sum = NULL;
59873+ int error = sizeof (struct gr_arg_wrapper);
59874+ int error2 = 0;
59875+
59876+ mutex_lock(&gr_dev_mutex);
59877+
59878+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
59879+ error = -EPERM;
59880+ goto out;
59881+ }
59882+
59883+ if (count != sizeof (struct gr_arg_wrapper)) {
59884+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
59885+ error = -EINVAL;
59886+ goto out;
59887+ }
59888+
59889+
59890+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
59891+ gr_auth_expires = 0;
59892+ gr_auth_attempts = 0;
59893+ }
59894+
59895+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
59896+ error = -EFAULT;
59897+ goto out;
59898+ }
59899+
59900+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
59901+ error = -EINVAL;
59902+ goto out;
59903+ }
59904+
59905+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
59906+ error = -EFAULT;
59907+ goto out;
59908+ }
59909+
59910+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59911+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59912+ time_after(gr_auth_expires, get_seconds())) {
59913+ error = -EBUSY;
59914+ goto out;
59915+ }
59916+
59917+ /* if non-root trying to do anything other than use a special role,
59918+ do not attempt authentication, do not count towards authentication
59919+ locking
59920+ */
59921+
59922+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
59923+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59924+ gr_is_global_nonroot(current_uid())) {
59925+ error = -EPERM;
59926+ goto out;
59927+ }
59928+
59929+ /* ensure pw and special role name are null terminated */
59930+
59931+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
59932+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
59933+
59934+ /* Okay.
59935+ * We have our enough of the argument structure..(we have yet
59936+ * to copy_from_user the tables themselves) . Copy the tables
59937+ * only if we need them, i.e. for loading operations. */
59938+
59939+ switch (gr_usermode->mode) {
59940+ case GR_STATUS:
59941+ if (gr_status & GR_READY) {
59942+ error = 1;
59943+ if (!gr_check_secure_terminal(current))
59944+ error = 3;
59945+ } else
59946+ error = 2;
59947+ goto out;
59948+ case GR_SHUTDOWN:
59949+ if ((gr_status & GR_READY)
59950+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59951+ stop_machine(gr_rbac_disable, NULL, NULL);
59952+ free_variables();
59953+ memset(gr_usermode, 0, sizeof (struct gr_arg));
59954+ memset(gr_system_salt, 0, GR_SALT_LEN);
59955+ memset(gr_system_sum, 0, GR_SHA_LEN);
59956+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59957+ } else if (gr_status & GR_READY) {
59958+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59959+ error = -EPERM;
59960+ } else {
59961+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59962+ error = -EAGAIN;
59963+ }
59964+ break;
59965+ case GR_ENABLE:
59966+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59967+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59968+ else {
59969+ if (gr_status & GR_READY)
59970+ error = -EAGAIN;
59971+ else
59972+ error = error2;
59973+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59974+ }
59975+ break;
59976+ case GR_RELOAD:
59977+ if (!(gr_status & GR_READY)) {
59978+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59979+ error = -EAGAIN;
59980+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59981+ stop_machine(gr_rbac_disable, NULL, NULL);
59982+ free_variables();
59983+ error2 = gracl_init(gr_usermode);
59984+ if (!error2)
59985+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
59986+ else {
59987+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59988+ error = error2;
59989+ }
59990+ } else {
59991+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59992+ error = -EPERM;
59993+ }
59994+ break;
59995+ case GR_SEGVMOD:
59996+ if (unlikely(!(gr_status & GR_READY))) {
59997+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
59998+ error = -EAGAIN;
59999+ break;
60000+ }
60001+
60002+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
60003+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
60004+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
60005+ struct acl_subject_label *segvacl;
60006+ segvacl =
60007+ lookup_acl_subj_label(gr_usermode->segv_inode,
60008+ gr_usermode->segv_device,
60009+ current->role);
60010+ if (segvacl) {
60011+ segvacl->crashes = 0;
60012+ segvacl->expires = 0;
60013+ }
60014+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
60015+ gr_remove_uid(gr_usermode->segv_uid);
60016+ }
60017+ } else {
60018+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
60019+ error = -EPERM;
60020+ }
60021+ break;
60022+ case GR_SPROLE:
60023+ case GR_SPROLEPAM:
60024+ if (unlikely(!(gr_status & GR_READY))) {
60025+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
60026+ error = -EAGAIN;
60027+ break;
60028+ }
60029+
60030+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
60031+ current->role->expires = 0;
60032+ current->role->auth_attempts = 0;
60033+ }
60034+
60035+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
60036+ time_after(current->role->expires, get_seconds())) {
60037+ error = -EBUSY;
60038+ goto out;
60039+ }
60040+
60041+ if (lookup_special_role_auth
60042+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
60043+ && ((!sprole_salt && !sprole_sum)
60044+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
60045+ char *p = "";
60046+ assign_special_role(gr_usermode->sp_role);
60047+ read_lock(&tasklist_lock);
60048+ if (current->real_parent)
60049+ p = current->real_parent->role->rolename;
60050+ read_unlock(&tasklist_lock);
60051+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
60052+ p, acl_sp_role_value);
60053+ } else {
60054+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
60055+ error = -EPERM;
60056+ if(!(current->role->auth_attempts++))
60057+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
60058+
60059+ goto out;
60060+ }
60061+ break;
60062+ case GR_UNSPROLE:
60063+ if (unlikely(!(gr_status & GR_READY))) {
60064+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
60065+ error = -EAGAIN;
60066+ break;
60067+ }
60068+
60069+ if (current->role->roletype & GR_ROLE_SPECIAL) {
60070+ char *p = "";
60071+ int i = 0;
60072+
60073+ read_lock(&tasklist_lock);
60074+ if (current->real_parent) {
60075+ p = current->real_parent->role->rolename;
60076+ i = current->real_parent->acl_role_id;
60077+ }
60078+ read_unlock(&tasklist_lock);
60079+
60080+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
60081+ gr_set_acls(1);
60082+ } else {
60083+ error = -EPERM;
60084+ goto out;
60085+ }
60086+ break;
60087+ default:
60088+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
60089+ error = -EINVAL;
60090+ break;
60091+ }
60092+
60093+ if (error != -EPERM)
60094+ goto out;
60095+
60096+ if(!(gr_auth_attempts++))
60097+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
60098+
60099+ out:
60100+ mutex_unlock(&gr_dev_mutex);
60101+ return error;
60102+}
60103+
60104+/* must be called with
60105+ rcu_read_lock();
60106+ read_lock(&tasklist_lock);
60107+ read_lock(&grsec_exec_file_lock);
60108+*/
60109+int gr_apply_subject_to_task(struct task_struct *task)
60110+{
60111+ struct acl_object_label *obj;
60112+ char *tmpname;
60113+ struct acl_subject_label *tmpsubj;
60114+ struct file *filp;
60115+ struct name_entry *nmatch;
60116+
60117+ filp = task->exec_file;
60118+ if (filp == NULL)
60119+ return 0;
60120+
60121+ /* the following is to apply the correct subject
60122+ on binaries running when the RBAC system
60123+ is enabled, when the binaries have been
60124+ replaced or deleted since their execution
60125+ -----
60126+ when the RBAC system starts, the inode/dev
60127+ from exec_file will be one the RBAC system
60128+ is unaware of. It only knows the inode/dev
60129+ of the present file on disk, or the absence
60130+ of it.
60131+ */
60132+ preempt_disable();
60133+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
60134+
60135+ nmatch = lookup_name_entry(tmpname);
60136+ preempt_enable();
60137+ tmpsubj = NULL;
60138+ if (nmatch) {
60139+ if (nmatch->deleted)
60140+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
60141+ else
60142+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
60143+ if (tmpsubj != NULL)
60144+ task->acl = tmpsubj;
60145+ }
60146+ if (tmpsubj == NULL)
60147+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
60148+ task->role);
60149+ if (task->acl) {
60150+ task->is_writable = 0;
60151+ /* ignore additional mmap checks for processes that are writable
60152+ by the default ACL */
60153+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60154+ if (unlikely(obj->mode & GR_WRITE))
60155+ task->is_writable = 1;
60156+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60157+ if (unlikely(obj->mode & GR_WRITE))
60158+ task->is_writable = 1;
60159+
60160+ gr_set_proc_res(task);
60161+
60162+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60163+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60164+#endif
60165+ } else {
60166+ return 1;
60167+ }
60168+
60169+ return 0;
60170+}
60171+
60172+int
60173+gr_set_acls(const int type)
60174+{
60175+ struct task_struct *task, *task2;
60176+ struct acl_role_label *role = current->role;
60177+ __u16 acl_role_id = current->acl_role_id;
60178+ const struct cred *cred;
60179+ int ret;
60180+
60181+ rcu_read_lock();
60182+ read_lock(&tasklist_lock);
60183+ read_lock(&grsec_exec_file_lock);
60184+ do_each_thread(task2, task) {
60185+ /* check to see if we're called from the exit handler,
60186+ if so, only replace ACLs that have inherited the admin
60187+ ACL */
60188+
60189+ if (type && (task->role != role ||
60190+ task->acl_role_id != acl_role_id))
60191+ continue;
60192+
60193+ task->acl_role_id = 0;
60194+ task->acl_sp_role = 0;
60195+
60196+ if (task->exec_file) {
60197+ cred = __task_cred(task);
60198+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
60199+ ret = gr_apply_subject_to_task(task);
60200+ if (ret) {
60201+ read_unlock(&grsec_exec_file_lock);
60202+ read_unlock(&tasklist_lock);
60203+ rcu_read_unlock();
60204+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
60205+ return ret;
60206+ }
60207+ } else {
60208+ // it's a kernel process
60209+ task->role = kernel_role;
60210+ task->acl = kernel_role->root_label;
60211+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
60212+ task->acl->mode &= ~GR_PROCFIND;
60213+#endif
60214+ }
60215+ } while_each_thread(task2, task);
60216+ read_unlock(&grsec_exec_file_lock);
60217+ read_unlock(&tasklist_lock);
60218+ rcu_read_unlock();
60219+
60220+ return 0;
60221+}
60222+
60223+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
60224+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
60225+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
60226+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
60227+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
60228+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
60229+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
60230+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
60231+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
60232+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
60233+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
60234+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
60235+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
60236+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
60237+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
60238+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
60239+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
60240+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
60241+};
60242+
60243+void
60244+gr_learn_resource(const struct task_struct *task,
60245+ const int res, const unsigned long wanted, const int gt)
60246+{
60247+ struct acl_subject_label *acl;
60248+ const struct cred *cred;
60249+
60250+ if (unlikely((gr_status & GR_READY) &&
60251+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
60252+ goto skip_reslog;
60253+
60254+ gr_log_resource(task, res, wanted, gt);
60255+skip_reslog:
60256+
60257+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
60258+ return;
60259+
60260+ acl = task->acl;
60261+
60262+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
60263+ !(acl->resmask & (1 << (unsigned short) res))))
60264+ return;
60265+
60266+ if (wanted >= acl->res[res].rlim_cur) {
60267+ unsigned long res_add;
60268+
60269+ res_add = wanted + res_learn_bumps[res];
60270+
60271+ acl->res[res].rlim_cur = res_add;
60272+
60273+ if (wanted > acl->res[res].rlim_max)
60274+ acl->res[res].rlim_max = res_add;
60275+
60276+ /* only log the subject filename, since resource logging is supported for
60277+ single-subject learning only */
60278+ rcu_read_lock();
60279+ cred = __task_cred(task);
60280+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60281+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
60282+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
60283+ "", (unsigned long) res, &task->signal->saved_ip);
60284+ rcu_read_unlock();
60285+ }
60286+
60287+ return;
60288+}
60289+EXPORT_SYMBOL(gr_learn_resource);
60290+#endif
60291+
60292+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
60293+void
60294+pax_set_initial_flags(struct linux_binprm *bprm)
60295+{
60296+ struct task_struct *task = current;
60297+ struct acl_subject_label *proc;
60298+ unsigned long flags;
60299+
60300+ if (unlikely(!(gr_status & GR_READY)))
60301+ return;
60302+
60303+ flags = pax_get_flags(task);
60304+
60305+ proc = task->acl;
60306+
60307+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
60308+ flags &= ~MF_PAX_PAGEEXEC;
60309+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
60310+ flags &= ~MF_PAX_SEGMEXEC;
60311+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
60312+ flags &= ~MF_PAX_RANDMMAP;
60313+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
60314+ flags &= ~MF_PAX_EMUTRAMP;
60315+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
60316+ flags &= ~MF_PAX_MPROTECT;
60317+
60318+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
60319+ flags |= MF_PAX_PAGEEXEC;
60320+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
60321+ flags |= MF_PAX_SEGMEXEC;
60322+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
60323+ flags |= MF_PAX_RANDMMAP;
60324+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
60325+ flags |= MF_PAX_EMUTRAMP;
60326+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
60327+ flags |= MF_PAX_MPROTECT;
60328+
60329+ pax_set_flags(task, flags);
60330+
60331+ return;
60332+}
60333+#endif
60334+
60335+int
60336+gr_handle_proc_ptrace(struct task_struct *task)
60337+{
60338+ struct file *filp;
60339+ struct task_struct *tmp = task;
60340+ struct task_struct *curtemp = current;
60341+ __u32 retmode;
60342+
60343+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60344+ if (unlikely(!(gr_status & GR_READY)))
60345+ return 0;
60346+#endif
60347+
60348+ read_lock(&tasklist_lock);
60349+ read_lock(&grsec_exec_file_lock);
60350+ filp = task->exec_file;
60351+
60352+ while (task_pid_nr(tmp) > 0) {
60353+ if (tmp == curtemp)
60354+ break;
60355+ tmp = tmp->real_parent;
60356+ }
60357+
60358+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
60359+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
60360+ read_unlock(&grsec_exec_file_lock);
60361+ read_unlock(&tasklist_lock);
60362+ return 1;
60363+ }
60364+
60365+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60366+ if (!(gr_status & GR_READY)) {
60367+ read_unlock(&grsec_exec_file_lock);
60368+ read_unlock(&tasklist_lock);
60369+ return 0;
60370+ }
60371+#endif
60372+
60373+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
60374+ read_unlock(&grsec_exec_file_lock);
60375+ read_unlock(&tasklist_lock);
60376+
60377+ if (retmode & GR_NOPTRACE)
60378+ return 1;
60379+
60380+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
60381+ && (current->acl != task->acl || (current->acl != current->role->root_label
60382+ && task_pid_nr(current) != task_pid_nr(task))))
60383+ return 1;
60384+
60385+ return 0;
60386+}
60387+
60388+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
60389+{
60390+ if (unlikely(!(gr_status & GR_READY)))
60391+ return;
60392+
60393+ if (!(current->role->roletype & GR_ROLE_GOD))
60394+ return;
60395+
60396+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
60397+ p->role->rolename, gr_task_roletype_to_char(p),
60398+ p->acl->filename);
60399+}
60400+
60401+int
60402+gr_handle_ptrace(struct task_struct *task, const long request)
60403+{
60404+ struct task_struct *tmp = task;
60405+ struct task_struct *curtemp = current;
60406+ __u32 retmode;
60407+
60408+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60409+ if (unlikely(!(gr_status & GR_READY)))
60410+ return 0;
60411+#endif
60412+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
60413+ read_lock(&tasklist_lock);
60414+ while (task_pid_nr(tmp) > 0) {
60415+ if (tmp == curtemp)
60416+ break;
60417+ tmp = tmp->real_parent;
60418+ }
60419+
60420+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
60421+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
60422+ read_unlock(&tasklist_lock);
60423+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60424+ return 1;
60425+ }
60426+ read_unlock(&tasklist_lock);
60427+ }
60428+
60429+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60430+ if (!(gr_status & GR_READY))
60431+ return 0;
60432+#endif
60433+
60434+ read_lock(&grsec_exec_file_lock);
60435+ if (unlikely(!task->exec_file)) {
60436+ read_unlock(&grsec_exec_file_lock);
60437+ return 0;
60438+ }
60439+
60440+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
60441+ read_unlock(&grsec_exec_file_lock);
60442+
60443+ if (retmode & GR_NOPTRACE) {
60444+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60445+ return 1;
60446+ }
60447+
60448+ if (retmode & GR_PTRACERD) {
60449+ switch (request) {
60450+ case PTRACE_SEIZE:
60451+ case PTRACE_POKETEXT:
60452+ case PTRACE_POKEDATA:
60453+ case PTRACE_POKEUSR:
60454+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
60455+ case PTRACE_SETREGS:
60456+ case PTRACE_SETFPREGS:
60457+#endif
60458+#ifdef CONFIG_X86
60459+ case PTRACE_SETFPXREGS:
60460+#endif
60461+#ifdef CONFIG_ALTIVEC
60462+ case PTRACE_SETVRREGS:
60463+#endif
60464+ return 1;
60465+ default:
60466+ return 0;
60467+ }
60468+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
60469+ !(current->role->roletype & GR_ROLE_GOD) &&
60470+ (current->acl != task->acl)) {
60471+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60472+ return 1;
60473+ }
60474+
60475+ return 0;
60476+}
60477+
60478+static int is_writable_mmap(const struct file *filp)
60479+{
60480+ struct task_struct *task = current;
60481+ struct acl_object_label *obj, *obj2;
60482+
60483+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
60484+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
60485+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60486+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
60487+ task->role->root_label);
60488+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
60489+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
60490+ return 1;
60491+ }
60492+ }
60493+ return 0;
60494+}
60495+
60496+int
60497+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
60498+{
60499+ __u32 mode;
60500+
60501+ if (unlikely(!file || !(prot & PROT_EXEC)))
60502+ return 1;
60503+
60504+ if (is_writable_mmap(file))
60505+ return 0;
60506+
60507+ mode =
60508+ gr_search_file(file->f_path.dentry,
60509+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60510+ file->f_path.mnt);
60511+
60512+ if (!gr_tpe_allow(file))
60513+ return 0;
60514+
60515+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60516+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60517+ return 0;
60518+ } else if (unlikely(!(mode & GR_EXEC))) {
60519+ return 0;
60520+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60521+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60522+ return 1;
60523+ }
60524+
60525+ return 1;
60526+}
60527+
60528+int
60529+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
60530+{
60531+ __u32 mode;
60532+
60533+ if (unlikely(!file || !(prot & PROT_EXEC)))
60534+ return 1;
60535+
60536+ if (is_writable_mmap(file))
60537+ return 0;
60538+
60539+ mode =
60540+ gr_search_file(file->f_path.dentry,
60541+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60542+ file->f_path.mnt);
60543+
60544+ if (!gr_tpe_allow(file))
60545+ return 0;
60546+
60547+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60548+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60549+ return 0;
60550+ } else if (unlikely(!(mode & GR_EXEC))) {
60551+ return 0;
60552+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60553+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60554+ return 1;
60555+ }
60556+
60557+ return 1;
60558+}
60559+
60560+void
60561+gr_acl_handle_psacct(struct task_struct *task, const long code)
60562+{
60563+ unsigned long runtime;
60564+ unsigned long cputime;
60565+ unsigned int wday, cday;
60566+ __u8 whr, chr;
60567+ __u8 wmin, cmin;
60568+ __u8 wsec, csec;
60569+ struct timespec timeval;
60570+
60571+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
60572+ !(task->acl->mode & GR_PROCACCT)))
60573+ return;
60574+
60575+ do_posix_clock_monotonic_gettime(&timeval);
60576+ runtime = timeval.tv_sec - task->start_time.tv_sec;
60577+ wday = runtime / (3600 * 24);
60578+ runtime -= wday * (3600 * 24);
60579+ whr = runtime / 3600;
60580+ runtime -= whr * 3600;
60581+ wmin = runtime / 60;
60582+ runtime -= wmin * 60;
60583+ wsec = runtime;
60584+
60585+ cputime = (task->utime + task->stime) / HZ;
60586+ cday = cputime / (3600 * 24);
60587+ cputime -= cday * (3600 * 24);
60588+ chr = cputime / 3600;
60589+ cputime -= chr * 3600;
60590+ cmin = cputime / 60;
60591+ cputime -= cmin * 60;
60592+ csec = cputime;
60593+
60594+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
60595+
60596+ return;
60597+}
60598+
60599+void gr_set_kernel_label(struct task_struct *task)
60600+{
60601+ if (gr_status & GR_READY) {
60602+ task->role = kernel_role;
60603+ task->acl = kernel_role->root_label;
60604+ }
60605+ return;
60606+}
60607+
60608+#ifdef CONFIG_TASKSTATS
60609+int gr_is_taskstats_denied(int pid)
60610+{
60611+ struct task_struct *task;
60612+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60613+ const struct cred *cred;
60614+#endif
60615+ int ret = 0;
60616+
60617+ /* restrict taskstats viewing to un-chrooted root users
60618+ who have the 'view' subject flag if the RBAC system is enabled
60619+ */
60620+
60621+ rcu_read_lock();
60622+ read_lock(&tasklist_lock);
60623+ task = find_task_by_vpid(pid);
60624+ if (task) {
60625+#ifdef CONFIG_GRKERNSEC_CHROOT
60626+ if (proc_is_chrooted(task))
60627+ ret = -EACCES;
60628+#endif
60629+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60630+ cred = __task_cred(task);
60631+#ifdef CONFIG_GRKERNSEC_PROC_USER
60632+ if (gr_is_global_nonroot(cred->uid))
60633+ ret = -EACCES;
60634+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60635+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
60636+ ret = -EACCES;
60637+#endif
60638+#endif
60639+ if (gr_status & GR_READY) {
60640+ if (!(task->acl->mode & GR_VIEW))
60641+ ret = -EACCES;
60642+ }
60643+ } else
60644+ ret = -ENOENT;
60645+
60646+ read_unlock(&tasklist_lock);
60647+ rcu_read_unlock();
60648+
60649+ return ret;
60650+}
60651+#endif
60652+
60653+/* AUXV entries are filled via a descendant of search_binary_handler
60654+ after we've already applied the subject for the target
60655+*/
60656+int gr_acl_enable_at_secure(void)
60657+{
60658+ if (unlikely(!(gr_status & GR_READY)))
60659+ return 0;
60660+
60661+ if (current->acl->mode & GR_ATSECURE)
60662+ return 1;
60663+
60664+ return 0;
60665+}
60666+
60667+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
60668+{
60669+ struct task_struct *task = current;
60670+ struct dentry *dentry = file->f_path.dentry;
60671+ struct vfsmount *mnt = file->f_path.mnt;
60672+ struct acl_object_label *obj, *tmp;
60673+ struct acl_subject_label *subj;
60674+ unsigned int bufsize;
60675+ int is_not_root;
60676+ char *path;
60677+ dev_t dev = __get_dev(dentry);
60678+
60679+ if (unlikely(!(gr_status & GR_READY)))
60680+ return 1;
60681+
60682+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60683+ return 1;
60684+
60685+ /* ignore Eric Biederman */
60686+ if (IS_PRIVATE(dentry->d_inode))
60687+ return 1;
60688+
60689+ subj = task->acl;
60690+ read_lock(&gr_inode_lock);
60691+ do {
60692+ obj = lookup_acl_obj_label(ino, dev, subj);
60693+ if (obj != NULL) {
60694+ read_unlock(&gr_inode_lock);
60695+ return (obj->mode & GR_FIND) ? 1 : 0;
60696+ }
60697+ } while ((subj = subj->parent_subject));
60698+ read_unlock(&gr_inode_lock);
60699+
60700+ /* this is purely an optimization since we're looking for an object
60701+ for the directory we're doing a readdir on
60702+ if it's possible for any globbed object to match the entry we're
60703+ filling into the directory, then the object we find here will be
60704+ an anchor point with attached globbed objects
60705+ */
60706+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
60707+ if (obj->globbed == NULL)
60708+ return (obj->mode & GR_FIND) ? 1 : 0;
60709+
60710+ is_not_root = ((obj->filename[0] == '/') &&
60711+ (obj->filename[1] == '\0')) ? 0 : 1;
60712+ bufsize = PAGE_SIZE - namelen - is_not_root;
60713+
60714+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
60715+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
60716+ return 1;
60717+
60718+ preempt_disable();
60719+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
60720+ bufsize);
60721+
60722+ bufsize = strlen(path);
60723+
60724+ /* if base is "/", don't append an additional slash */
60725+ if (is_not_root)
60726+ *(path + bufsize) = '/';
60727+ memcpy(path + bufsize + is_not_root, name, namelen);
60728+ *(path + bufsize + namelen + is_not_root) = '\0';
60729+
60730+ tmp = obj->globbed;
60731+ while (tmp) {
60732+ if (!glob_match(tmp->filename, path)) {
60733+ preempt_enable();
60734+ return (tmp->mode & GR_FIND) ? 1 : 0;
60735+ }
60736+ tmp = tmp->next;
60737+ }
60738+ preempt_enable();
60739+ return (obj->mode & GR_FIND) ? 1 : 0;
60740+}
60741+
60742+void gr_put_exec_file(struct task_struct *task)
60743+{
60744+ struct file *filp;
60745+
60746+ write_lock(&grsec_exec_file_lock);
60747+ filp = task->exec_file;
60748+ task->exec_file = NULL;
60749+ write_unlock(&grsec_exec_file_lock);
60750+
60751+ if (filp)
60752+ fput(filp);
60753+
60754+ return;
60755+}
60756+
60757+
60758+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60759+EXPORT_SYMBOL(gr_acl_is_enabled);
60760+#endif
60761+EXPORT_SYMBOL(gr_set_kernel_label);
60762+#ifdef CONFIG_SECURITY
60763+EXPORT_SYMBOL(gr_check_user_change);
60764+EXPORT_SYMBOL(gr_check_group_change);
60765+#endif
60766+
60767diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60768new file mode 100644
60769index 0000000..34fefda
60770--- /dev/null
60771+++ b/grsecurity/gracl_alloc.c
60772@@ -0,0 +1,105 @@
60773+#include <linux/kernel.h>
60774+#include <linux/mm.h>
60775+#include <linux/slab.h>
60776+#include <linux/vmalloc.h>
60777+#include <linux/gracl.h>
60778+#include <linux/grsecurity.h>
60779+
60780+static unsigned long alloc_stack_next = 1;
60781+static unsigned long alloc_stack_size = 1;
60782+static void **alloc_stack;
60783+
60784+static __inline__ int
60785+alloc_pop(void)
60786+{
60787+ if (alloc_stack_next == 1)
60788+ return 0;
60789+
60790+ kfree(alloc_stack[alloc_stack_next - 2]);
60791+
60792+ alloc_stack_next--;
60793+
60794+ return 1;
60795+}
60796+
60797+static __inline__ int
60798+alloc_push(void *buf)
60799+{
60800+ if (alloc_stack_next >= alloc_stack_size)
60801+ return 1;
60802+
60803+ alloc_stack[alloc_stack_next - 1] = buf;
60804+
60805+ alloc_stack_next++;
60806+
60807+ return 0;
60808+}
60809+
60810+void *
60811+acl_alloc(unsigned long len)
60812+{
60813+ void *ret = NULL;
60814+
60815+ if (!len || len > PAGE_SIZE)
60816+ goto out;
60817+
60818+ ret = kmalloc(len, GFP_KERNEL);
60819+
60820+ if (ret) {
60821+ if (alloc_push(ret)) {
60822+ kfree(ret);
60823+ ret = NULL;
60824+ }
60825+ }
60826+
60827+out:
60828+ return ret;
60829+}
60830+
60831+void *
60832+acl_alloc_num(unsigned long num, unsigned long len)
60833+{
60834+ if (!len || (num > (PAGE_SIZE / len)))
60835+ return NULL;
60836+
60837+ return acl_alloc(num * len);
60838+}
60839+
60840+void
60841+acl_free_all(void)
60842+{
60843+ if (gr_acl_is_enabled() || !alloc_stack)
60844+ return;
60845+
60846+ while (alloc_pop()) ;
60847+
60848+ if (alloc_stack) {
60849+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60850+ kfree(alloc_stack);
60851+ else
60852+ vfree(alloc_stack);
60853+ }
60854+
60855+ alloc_stack = NULL;
60856+ alloc_stack_size = 1;
60857+ alloc_stack_next = 1;
60858+
60859+ return;
60860+}
60861+
60862+int
60863+acl_alloc_stack_init(unsigned long size)
60864+{
60865+ if ((size * sizeof (void *)) <= PAGE_SIZE)
60866+ alloc_stack =
60867+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60868+ else
60869+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
60870+
60871+ alloc_stack_size = size;
60872+
60873+ if (!alloc_stack)
60874+ return 0;
60875+ else
60876+ return 1;
60877+}
60878diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60879new file mode 100644
60880index 0000000..bdd51ea
60881--- /dev/null
60882+++ b/grsecurity/gracl_cap.c
60883@@ -0,0 +1,110 @@
60884+#include <linux/kernel.h>
60885+#include <linux/module.h>
60886+#include <linux/sched.h>
60887+#include <linux/gracl.h>
60888+#include <linux/grsecurity.h>
60889+#include <linux/grinternal.h>
60890+
60891+extern const char *captab_log[];
60892+extern int captab_log_entries;
60893+
60894+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
60895+{
60896+ struct acl_subject_label *curracl;
60897+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60898+ kernel_cap_t cap_audit = __cap_empty_set;
60899+
60900+ if (!gr_acl_is_enabled())
60901+ return 1;
60902+
60903+ curracl = task->acl;
60904+
60905+ cap_drop = curracl->cap_lower;
60906+ cap_mask = curracl->cap_mask;
60907+ cap_audit = curracl->cap_invert_audit;
60908+
60909+ while ((curracl = curracl->parent_subject)) {
60910+ /* if the cap isn't specified in the current computed mask but is specified in the
60911+ current level subject, and is lowered in the current level subject, then add
60912+ it to the set of dropped capabilities
60913+ otherwise, add the current level subject's mask to the current computed mask
60914+ */
60915+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60916+ cap_raise(cap_mask, cap);
60917+ if (cap_raised(curracl->cap_lower, cap))
60918+ cap_raise(cap_drop, cap);
60919+ if (cap_raised(curracl->cap_invert_audit, cap))
60920+ cap_raise(cap_audit, cap);
60921+ }
60922+ }
60923+
60924+ if (!cap_raised(cap_drop, cap)) {
60925+ if (cap_raised(cap_audit, cap))
60926+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60927+ return 1;
60928+ }
60929+
60930+ curracl = task->acl;
60931+
60932+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60933+ && cap_raised(cred->cap_effective, cap)) {
60934+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60935+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
60936+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
60937+ gr_to_filename(task->exec_file->f_path.dentry,
60938+ task->exec_file->f_path.mnt) : curracl->filename,
60939+ curracl->filename, 0UL,
60940+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
60941+ return 1;
60942+ }
60943+
60944+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
60945+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
60946+
60947+ return 0;
60948+}
60949+
60950+int
60951+gr_acl_is_capable(const int cap)
60952+{
60953+ return gr_task_acl_is_capable(current, current_cred(), cap);
60954+}
60955+
60956+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
60957+{
60958+ struct acl_subject_label *curracl;
60959+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60960+
60961+ if (!gr_acl_is_enabled())
60962+ return 1;
60963+
60964+ curracl = task->acl;
60965+
60966+ cap_drop = curracl->cap_lower;
60967+ cap_mask = curracl->cap_mask;
60968+
60969+ while ((curracl = curracl->parent_subject)) {
60970+ /* if the cap isn't specified in the current computed mask but is specified in the
60971+ current level subject, and is lowered in the current level subject, then add
60972+ it to the set of dropped capabilities
60973+ otherwise, add the current level subject's mask to the current computed mask
60974+ */
60975+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60976+ cap_raise(cap_mask, cap);
60977+ if (cap_raised(curracl->cap_lower, cap))
60978+ cap_raise(cap_drop, cap);
60979+ }
60980+ }
60981+
60982+ if (!cap_raised(cap_drop, cap))
60983+ return 1;
60984+
60985+ return 0;
60986+}
60987+
60988+int
60989+gr_acl_is_capable_nolog(const int cap)
60990+{
60991+ return gr_task_acl_is_capable_nolog(current, cap);
60992+}
60993+
60994diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
60995new file mode 100644
60996index 0000000..a340c17
60997--- /dev/null
60998+++ b/grsecurity/gracl_fs.c
60999@@ -0,0 +1,431 @@
61000+#include <linux/kernel.h>
61001+#include <linux/sched.h>
61002+#include <linux/types.h>
61003+#include <linux/fs.h>
61004+#include <linux/file.h>
61005+#include <linux/stat.h>
61006+#include <linux/grsecurity.h>
61007+#include <linux/grinternal.h>
61008+#include <linux/gracl.h>
61009+
61010+umode_t
61011+gr_acl_umask(void)
61012+{
61013+ if (unlikely(!gr_acl_is_enabled()))
61014+ return 0;
61015+
61016+ return current->role->umask;
61017+}
61018+
61019+__u32
61020+gr_acl_handle_hidden_file(const struct dentry * dentry,
61021+ const struct vfsmount * mnt)
61022+{
61023+ __u32 mode;
61024+
61025+ if (unlikely(!dentry->d_inode))
61026+ return GR_FIND;
61027+
61028+ mode =
61029+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
61030+
61031+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
61032+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61033+ return mode;
61034+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
61035+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61036+ return 0;
61037+ } else if (unlikely(!(mode & GR_FIND)))
61038+ return 0;
61039+
61040+ return GR_FIND;
61041+}
61042+
61043+__u32
61044+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
61045+ int acc_mode)
61046+{
61047+ __u32 reqmode = GR_FIND;
61048+ __u32 mode;
61049+
61050+ if (unlikely(!dentry->d_inode))
61051+ return reqmode;
61052+
61053+ if (acc_mode & MAY_APPEND)
61054+ reqmode |= GR_APPEND;
61055+ else if (acc_mode & MAY_WRITE)
61056+ reqmode |= GR_WRITE;
61057+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
61058+ reqmode |= GR_READ;
61059+
61060+ mode =
61061+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61062+ mnt);
61063+
61064+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61065+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61066+ reqmode & GR_READ ? " reading" : "",
61067+ reqmode & GR_WRITE ? " writing" : reqmode &
61068+ GR_APPEND ? " appending" : "");
61069+ return reqmode;
61070+ } else
61071+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61072+ {
61073+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61074+ reqmode & GR_READ ? " reading" : "",
61075+ reqmode & GR_WRITE ? " writing" : reqmode &
61076+ GR_APPEND ? " appending" : "");
61077+ return 0;
61078+ } else if (unlikely((mode & reqmode) != reqmode))
61079+ return 0;
61080+
61081+ return reqmode;
61082+}
61083+
61084+__u32
61085+gr_acl_handle_creat(const struct dentry * dentry,
61086+ const struct dentry * p_dentry,
61087+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
61088+ const int imode)
61089+{
61090+ __u32 reqmode = GR_WRITE | GR_CREATE;
61091+ __u32 mode;
61092+
61093+ if (acc_mode & MAY_APPEND)
61094+ reqmode |= GR_APPEND;
61095+ // if a directory was required or the directory already exists, then
61096+ // don't count this open as a read
61097+ if ((acc_mode & MAY_READ) &&
61098+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
61099+ reqmode |= GR_READ;
61100+ if ((open_flags & O_CREAT) &&
61101+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
61102+ reqmode |= GR_SETID;
61103+
61104+ mode =
61105+ gr_check_create(dentry, p_dentry, p_mnt,
61106+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61107+
61108+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61109+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61110+ reqmode & GR_READ ? " reading" : "",
61111+ reqmode & GR_WRITE ? " writing" : reqmode &
61112+ GR_APPEND ? " appending" : "");
61113+ return reqmode;
61114+ } else
61115+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61116+ {
61117+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61118+ reqmode & GR_READ ? " reading" : "",
61119+ reqmode & GR_WRITE ? " writing" : reqmode &
61120+ GR_APPEND ? " appending" : "");
61121+ return 0;
61122+ } else if (unlikely((mode & reqmode) != reqmode))
61123+ return 0;
61124+
61125+ return reqmode;
61126+}
61127+
61128+__u32
61129+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
61130+ const int fmode)
61131+{
61132+ __u32 mode, reqmode = GR_FIND;
61133+
61134+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
61135+ reqmode |= GR_EXEC;
61136+ if (fmode & S_IWOTH)
61137+ reqmode |= GR_WRITE;
61138+ if (fmode & S_IROTH)
61139+ reqmode |= GR_READ;
61140+
61141+ mode =
61142+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61143+ mnt);
61144+
61145+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61146+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61147+ reqmode & GR_READ ? " reading" : "",
61148+ reqmode & GR_WRITE ? " writing" : "",
61149+ reqmode & GR_EXEC ? " executing" : "");
61150+ return reqmode;
61151+ } else
61152+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61153+ {
61154+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61155+ reqmode & GR_READ ? " reading" : "",
61156+ reqmode & GR_WRITE ? " writing" : "",
61157+ reqmode & GR_EXEC ? " executing" : "");
61158+ return 0;
61159+ } else if (unlikely((mode & reqmode) != reqmode))
61160+ return 0;
61161+
61162+ return reqmode;
61163+}
61164+
61165+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
61166+{
61167+ __u32 mode;
61168+
61169+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
61170+
61171+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61172+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
61173+ return mode;
61174+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61175+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
61176+ return 0;
61177+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
61178+ return 0;
61179+
61180+ return (reqmode);
61181+}
61182+
61183+__u32
61184+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
61185+{
61186+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
61187+}
61188+
61189+__u32
61190+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
61191+{
61192+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
61193+}
61194+
61195+__u32
61196+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
61197+{
61198+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
61199+}
61200+
61201+__u32
61202+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
61203+{
61204+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
61205+}
61206+
61207+__u32
61208+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
61209+ umode_t *modeptr)
61210+{
61211+ umode_t mode;
61212+
61213+ *modeptr &= ~gr_acl_umask();
61214+ mode = *modeptr;
61215+
61216+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
61217+ return 1;
61218+
61219+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
61220+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
61221+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
61222+ GR_CHMOD_ACL_MSG);
61223+ } else {
61224+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
61225+ }
61226+}
61227+
61228+__u32
61229+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
61230+{
61231+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
61232+}
61233+
61234+__u32
61235+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
61236+{
61237+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
61238+}
61239+
61240+__u32
61241+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
61242+{
61243+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
61244+}
61245+
61246+__u32
61247+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
61248+{
61249+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
61250+ GR_UNIXCONNECT_ACL_MSG);
61251+}
61252+
61253+/* hardlinks require at minimum create and link permission,
61254+ any additional privilege required is based on the
61255+ privilege of the file being linked to
61256+*/
61257+__u32
61258+gr_acl_handle_link(const struct dentry * new_dentry,
61259+ const struct dentry * parent_dentry,
61260+ const struct vfsmount * parent_mnt,
61261+ const struct dentry * old_dentry,
61262+ const struct vfsmount * old_mnt, const struct filename *to)
61263+{
61264+ __u32 mode;
61265+ __u32 needmode = GR_CREATE | GR_LINK;
61266+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
61267+
61268+ mode =
61269+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
61270+ old_mnt);
61271+
61272+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
61273+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
61274+ return mode;
61275+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61276+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
61277+ return 0;
61278+ } else if (unlikely((mode & needmode) != needmode))
61279+ return 0;
61280+
61281+ return 1;
61282+}
61283+
61284+__u32
61285+gr_acl_handle_symlink(const struct dentry * new_dentry,
61286+ const struct dentry * parent_dentry,
61287+ const struct vfsmount * parent_mnt, const struct filename *from)
61288+{
61289+ __u32 needmode = GR_WRITE | GR_CREATE;
61290+ __u32 mode;
61291+
61292+ mode =
61293+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
61294+ GR_CREATE | GR_AUDIT_CREATE |
61295+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
61296+
61297+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
61298+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
61299+ return mode;
61300+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61301+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
61302+ return 0;
61303+ } else if (unlikely((mode & needmode) != needmode))
61304+ return 0;
61305+
61306+ return (GR_WRITE | GR_CREATE);
61307+}
61308+
61309+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
61310+{
61311+ __u32 mode;
61312+
61313+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61314+
61315+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61316+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
61317+ return mode;
61318+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61319+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
61320+ return 0;
61321+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
61322+ return 0;
61323+
61324+ return (reqmode);
61325+}
61326+
61327+__u32
61328+gr_acl_handle_mknod(const struct dentry * new_dentry,
61329+ const struct dentry * parent_dentry,
61330+ const struct vfsmount * parent_mnt,
61331+ const int mode)
61332+{
61333+ __u32 reqmode = GR_WRITE | GR_CREATE;
61334+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
61335+ reqmode |= GR_SETID;
61336+
61337+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61338+ reqmode, GR_MKNOD_ACL_MSG);
61339+}
61340+
61341+__u32
61342+gr_acl_handle_mkdir(const struct dentry *new_dentry,
61343+ const struct dentry *parent_dentry,
61344+ const struct vfsmount *parent_mnt)
61345+{
61346+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61347+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
61348+}
61349+
61350+#define RENAME_CHECK_SUCCESS(old, new) \
61351+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
61352+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
61353+
61354+int
61355+gr_acl_handle_rename(struct dentry *new_dentry,
61356+ struct dentry *parent_dentry,
61357+ const struct vfsmount *parent_mnt,
61358+ struct dentry *old_dentry,
61359+ struct inode *old_parent_inode,
61360+ struct vfsmount *old_mnt, const struct filename *newname)
61361+{
61362+ __u32 comp1, comp2;
61363+ int error = 0;
61364+
61365+ if (unlikely(!gr_acl_is_enabled()))
61366+ return 0;
61367+
61368+ if (!new_dentry->d_inode) {
61369+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
61370+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
61371+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
61372+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
61373+ GR_DELETE | GR_AUDIT_DELETE |
61374+ GR_AUDIT_READ | GR_AUDIT_WRITE |
61375+ GR_SUPPRESS, old_mnt);
61376+ } else {
61377+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
61378+ GR_CREATE | GR_DELETE |
61379+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
61380+ GR_AUDIT_READ | GR_AUDIT_WRITE |
61381+ GR_SUPPRESS, parent_mnt);
61382+ comp2 =
61383+ gr_search_file(old_dentry,
61384+ GR_READ | GR_WRITE | GR_AUDIT_READ |
61385+ GR_DELETE | GR_AUDIT_DELETE |
61386+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
61387+ }
61388+
61389+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
61390+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
61391+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
61392+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
61393+ && !(comp2 & GR_SUPPRESS)) {
61394+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
61395+ error = -EACCES;
61396+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
61397+ error = -EACCES;
61398+
61399+ return error;
61400+}
61401+
61402+void
61403+gr_acl_handle_exit(void)
61404+{
61405+ u16 id;
61406+ char *rolename;
61407+
61408+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
61409+ !(current->role->roletype & GR_ROLE_PERSIST))) {
61410+ id = current->acl_role_id;
61411+ rolename = current->role->rolename;
61412+ gr_set_acls(1);
61413+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
61414+ }
61415+
61416+ gr_put_exec_file(current);
61417+ return;
61418+}
61419+
61420+int
61421+gr_acl_handle_procpidmem(const struct task_struct *task)
61422+{
61423+ if (unlikely(!gr_acl_is_enabled()))
61424+ return 0;
61425+
61426+ if (task != current && task->acl->mode & GR_PROTPROCFD)
61427+ return -EACCES;
61428+
61429+ return 0;
61430+}
61431diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
61432new file mode 100644
61433index 0000000..4699807
61434--- /dev/null
61435+++ b/grsecurity/gracl_ip.c
61436@@ -0,0 +1,384 @@
61437+#include <linux/kernel.h>
61438+#include <asm/uaccess.h>
61439+#include <asm/errno.h>
61440+#include <net/sock.h>
61441+#include <linux/file.h>
61442+#include <linux/fs.h>
61443+#include <linux/net.h>
61444+#include <linux/in.h>
61445+#include <linux/skbuff.h>
61446+#include <linux/ip.h>
61447+#include <linux/udp.h>
61448+#include <linux/types.h>
61449+#include <linux/sched.h>
61450+#include <linux/netdevice.h>
61451+#include <linux/inetdevice.h>
61452+#include <linux/gracl.h>
61453+#include <linux/grsecurity.h>
61454+#include <linux/grinternal.h>
61455+
61456+#define GR_BIND 0x01
61457+#define GR_CONNECT 0x02
61458+#define GR_INVERT 0x04
61459+#define GR_BINDOVERRIDE 0x08
61460+#define GR_CONNECTOVERRIDE 0x10
61461+#define GR_SOCK_FAMILY 0x20
61462+
61463+static const char * gr_protocols[IPPROTO_MAX] = {
61464+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
61465+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
61466+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
61467+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
61468+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
61469+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
61470+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
61471+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
61472+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
61473+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
61474+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
61475+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
61476+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
61477+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
61478+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
61479+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
61480+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
61481+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
61482+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
61483+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
61484+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
61485+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
61486+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
61487+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
61488+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
61489+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
61490+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
61491+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
61492+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
61493+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
61494+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
61495+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
61496+ };
61497+
61498+static const char * gr_socktypes[SOCK_MAX] = {
61499+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
61500+ "unknown:7", "unknown:8", "unknown:9", "packet"
61501+ };
61502+
61503+static const char * gr_sockfamilies[AF_MAX+1] = {
61504+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
61505+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
61506+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
61507+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
61508+ };
61509+
61510+const char *
61511+gr_proto_to_name(unsigned char proto)
61512+{
61513+ return gr_protocols[proto];
61514+}
61515+
61516+const char *
61517+gr_socktype_to_name(unsigned char type)
61518+{
61519+ return gr_socktypes[type];
61520+}
61521+
61522+const char *
61523+gr_sockfamily_to_name(unsigned char family)
61524+{
61525+ return gr_sockfamilies[family];
61526+}
61527+
61528+int
61529+gr_search_socket(const int domain, const int type, const int protocol)
61530+{
61531+ struct acl_subject_label *curr;
61532+ const struct cred *cred = current_cred();
61533+
61534+ if (unlikely(!gr_acl_is_enabled()))
61535+ goto exit;
61536+
61537+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
61538+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
61539+ goto exit; // let the kernel handle it
61540+
61541+ curr = current->acl;
61542+
61543+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
61544+ /* the family is allowed, if this is PF_INET allow it only if
61545+ the extra sock type/protocol checks pass */
61546+ if (domain == PF_INET)
61547+ goto inet_check;
61548+ goto exit;
61549+ } else {
61550+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61551+ __u32 fakeip = 0;
61552+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61553+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
61554+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
61555+ gr_to_filename(current->exec_file->f_path.dentry,
61556+ current->exec_file->f_path.mnt) :
61557+ curr->filename, curr->filename,
61558+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
61559+ &current->signal->saved_ip);
61560+ goto exit;
61561+ }
61562+ goto exit_fail;
61563+ }
61564+
61565+inet_check:
61566+ /* the rest of this checking is for IPv4 only */
61567+ if (!curr->ips)
61568+ goto exit;
61569+
61570+ if ((curr->ip_type & (1 << type)) &&
61571+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
61572+ goto exit;
61573+
61574+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61575+ /* we don't place acls on raw sockets , and sometimes
61576+ dgram/ip sockets are opened for ioctl and not
61577+ bind/connect, so we'll fake a bind learn log */
61578+ if (type == SOCK_RAW || type == SOCK_PACKET) {
61579+ __u32 fakeip = 0;
61580+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61581+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
61582+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
61583+ gr_to_filename(current->exec_file->f_path.dentry,
61584+ current->exec_file->f_path.mnt) :
61585+ curr->filename, curr->filename,
61586+ &fakeip, 0, type,
61587+ protocol, GR_CONNECT, &current->signal->saved_ip);
61588+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
61589+ __u32 fakeip = 0;
61590+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61591+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
61592+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
61593+ gr_to_filename(current->exec_file->f_path.dentry,
61594+ current->exec_file->f_path.mnt) :
61595+ curr->filename, curr->filename,
61596+ &fakeip, 0, type,
61597+ protocol, GR_BIND, &current->signal->saved_ip);
61598+ }
61599+ /* we'll log when they use connect or bind */
61600+ goto exit;
61601+ }
61602+
61603+exit_fail:
61604+ if (domain == PF_INET)
61605+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
61606+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
61607+ else
61608+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
61609+ gr_socktype_to_name(type), protocol);
61610+
61611+ return 0;
61612+exit:
61613+ return 1;
61614+}
61615+
61616+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
61617+{
61618+ if ((ip->mode & mode) &&
61619+ (ip_port >= ip->low) &&
61620+ (ip_port <= ip->high) &&
61621+ ((ntohl(ip_addr) & our_netmask) ==
61622+ (ntohl(our_addr) & our_netmask))
61623+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
61624+ && (ip->type & (1 << type))) {
61625+ if (ip->mode & GR_INVERT)
61626+ return 2; // specifically denied
61627+ else
61628+ return 1; // allowed
61629+ }
61630+
61631+ return 0; // not specifically allowed, may continue parsing
61632+}
61633+
61634+static int
61635+gr_search_connectbind(const int full_mode, struct sock *sk,
61636+ struct sockaddr_in *addr, const int type)
61637+{
61638+ char iface[IFNAMSIZ] = {0};
61639+ struct acl_subject_label *curr;
61640+ struct acl_ip_label *ip;
61641+ struct inet_sock *isk;
61642+ struct net_device *dev;
61643+ struct in_device *idev;
61644+ unsigned long i;
61645+ int ret;
61646+ int mode = full_mode & (GR_BIND | GR_CONNECT);
61647+ __u32 ip_addr = 0;
61648+ __u32 our_addr;
61649+ __u32 our_netmask;
61650+ char *p;
61651+ __u16 ip_port = 0;
61652+ const struct cred *cred = current_cred();
61653+
61654+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
61655+ return 0;
61656+
61657+ curr = current->acl;
61658+ isk = inet_sk(sk);
61659+
61660+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
61661+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
61662+ addr->sin_addr.s_addr = curr->inaddr_any_override;
61663+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
61664+ struct sockaddr_in saddr;
61665+ int err;
61666+
61667+ saddr.sin_family = AF_INET;
61668+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
61669+ saddr.sin_port = isk->inet_sport;
61670+
61671+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61672+ if (err)
61673+ return err;
61674+
61675+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61676+ if (err)
61677+ return err;
61678+ }
61679+
61680+ if (!curr->ips)
61681+ return 0;
61682+
61683+ ip_addr = addr->sin_addr.s_addr;
61684+ ip_port = ntohs(addr->sin_port);
61685+
61686+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61687+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61688+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
61689+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
61690+ gr_to_filename(current->exec_file->f_path.dentry,
61691+ current->exec_file->f_path.mnt) :
61692+ curr->filename, curr->filename,
61693+ &ip_addr, ip_port, type,
61694+ sk->sk_protocol, mode, &current->signal->saved_ip);
61695+ return 0;
61696+ }
61697+
61698+ for (i = 0; i < curr->ip_num; i++) {
61699+ ip = *(curr->ips + i);
61700+ if (ip->iface != NULL) {
61701+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
61702+ p = strchr(iface, ':');
61703+ if (p != NULL)
61704+ *p = '\0';
61705+ dev = dev_get_by_name(sock_net(sk), iface);
61706+ if (dev == NULL)
61707+ continue;
61708+ idev = in_dev_get(dev);
61709+ if (idev == NULL) {
61710+ dev_put(dev);
61711+ continue;
61712+ }
61713+ rcu_read_lock();
61714+ for_ifa(idev) {
61715+ if (!strcmp(ip->iface, ifa->ifa_label)) {
61716+ our_addr = ifa->ifa_address;
61717+ our_netmask = 0xffffffff;
61718+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61719+ if (ret == 1) {
61720+ rcu_read_unlock();
61721+ in_dev_put(idev);
61722+ dev_put(dev);
61723+ return 0;
61724+ } else if (ret == 2) {
61725+ rcu_read_unlock();
61726+ in_dev_put(idev);
61727+ dev_put(dev);
61728+ goto denied;
61729+ }
61730+ }
61731+ } endfor_ifa(idev);
61732+ rcu_read_unlock();
61733+ in_dev_put(idev);
61734+ dev_put(dev);
61735+ } else {
61736+ our_addr = ip->addr;
61737+ our_netmask = ip->netmask;
61738+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61739+ if (ret == 1)
61740+ return 0;
61741+ else if (ret == 2)
61742+ goto denied;
61743+ }
61744+ }
61745+
61746+denied:
61747+ if (mode == GR_BIND)
61748+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61749+ else if (mode == GR_CONNECT)
61750+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61751+
61752+ return -EACCES;
61753+}
61754+
61755+int
61756+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61757+{
61758+ /* always allow disconnection of dgram sockets with connect */
61759+ if (addr->sin_family == AF_UNSPEC)
61760+ return 0;
61761+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61762+}
61763+
61764+int
61765+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61766+{
61767+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61768+}
61769+
61770+int gr_search_listen(struct socket *sock)
61771+{
61772+ struct sock *sk = sock->sk;
61773+ struct sockaddr_in addr;
61774+
61775+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
61776+ addr.sin_port = inet_sk(sk)->inet_sport;
61777+
61778+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61779+}
61780+
61781+int gr_search_accept(struct socket *sock)
61782+{
61783+ struct sock *sk = sock->sk;
61784+ struct sockaddr_in addr;
61785+
61786+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
61787+ addr.sin_port = inet_sk(sk)->inet_sport;
61788+
61789+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61790+}
61791+
61792+int
61793+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61794+{
61795+ if (addr)
61796+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61797+ else {
61798+ struct sockaddr_in sin;
61799+ const struct inet_sock *inet = inet_sk(sk);
61800+
61801+ sin.sin_addr.s_addr = inet->inet_daddr;
61802+ sin.sin_port = inet->inet_dport;
61803+
61804+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61805+ }
61806+}
61807+
61808+int
61809+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61810+{
61811+ struct sockaddr_in sin;
61812+
61813+ if (unlikely(skb->len < sizeof (struct udphdr)))
61814+ return 0; // skip this packet
61815+
61816+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61817+ sin.sin_port = udp_hdr(skb)->source;
61818+
61819+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61820+}
61821diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61822new file mode 100644
61823index 0000000..25f54ef
61824--- /dev/null
61825+++ b/grsecurity/gracl_learn.c
61826@@ -0,0 +1,207 @@
61827+#include <linux/kernel.h>
61828+#include <linux/mm.h>
61829+#include <linux/sched.h>
61830+#include <linux/poll.h>
61831+#include <linux/string.h>
61832+#include <linux/file.h>
61833+#include <linux/types.h>
61834+#include <linux/vmalloc.h>
61835+#include <linux/grinternal.h>
61836+
61837+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61838+ size_t count, loff_t *ppos);
61839+extern int gr_acl_is_enabled(void);
61840+
61841+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61842+static int gr_learn_attached;
61843+
61844+/* use a 512k buffer */
61845+#define LEARN_BUFFER_SIZE (512 * 1024)
61846+
61847+static DEFINE_SPINLOCK(gr_learn_lock);
61848+static DEFINE_MUTEX(gr_learn_user_mutex);
61849+
61850+/* we need to maintain two buffers, so that the kernel context of grlearn
61851+ uses a semaphore around the userspace copying, and the other kernel contexts
61852+ use a spinlock when copying into the buffer, since they cannot sleep
61853+*/
61854+static char *learn_buffer;
61855+static char *learn_buffer_user;
61856+static int learn_buffer_len;
61857+static int learn_buffer_user_len;
61858+
61859+static ssize_t
61860+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61861+{
61862+ DECLARE_WAITQUEUE(wait, current);
61863+ ssize_t retval = 0;
61864+
61865+ add_wait_queue(&learn_wait, &wait);
61866+ set_current_state(TASK_INTERRUPTIBLE);
61867+ do {
61868+ mutex_lock(&gr_learn_user_mutex);
61869+ spin_lock(&gr_learn_lock);
61870+ if (learn_buffer_len)
61871+ break;
61872+ spin_unlock(&gr_learn_lock);
61873+ mutex_unlock(&gr_learn_user_mutex);
61874+ if (file->f_flags & O_NONBLOCK) {
61875+ retval = -EAGAIN;
61876+ goto out;
61877+ }
61878+ if (signal_pending(current)) {
61879+ retval = -ERESTARTSYS;
61880+ goto out;
61881+ }
61882+
61883+ schedule();
61884+ } while (1);
61885+
61886+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61887+ learn_buffer_user_len = learn_buffer_len;
61888+ retval = learn_buffer_len;
61889+ learn_buffer_len = 0;
61890+
61891+ spin_unlock(&gr_learn_lock);
61892+
61893+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61894+ retval = -EFAULT;
61895+
61896+ mutex_unlock(&gr_learn_user_mutex);
61897+out:
61898+ set_current_state(TASK_RUNNING);
61899+ remove_wait_queue(&learn_wait, &wait);
61900+ return retval;
61901+}
61902+
61903+static unsigned int
61904+poll_learn(struct file * file, poll_table * wait)
61905+{
61906+ poll_wait(file, &learn_wait, wait);
61907+
61908+ if (learn_buffer_len)
61909+ return (POLLIN | POLLRDNORM);
61910+
61911+ return 0;
61912+}
61913+
61914+void
61915+gr_clear_learn_entries(void)
61916+{
61917+ char *tmp;
61918+
61919+ mutex_lock(&gr_learn_user_mutex);
61920+ spin_lock(&gr_learn_lock);
61921+ tmp = learn_buffer;
61922+ learn_buffer = NULL;
61923+ spin_unlock(&gr_learn_lock);
61924+ if (tmp)
61925+ vfree(tmp);
61926+ if (learn_buffer_user != NULL) {
61927+ vfree(learn_buffer_user);
61928+ learn_buffer_user = NULL;
61929+ }
61930+ learn_buffer_len = 0;
61931+ mutex_unlock(&gr_learn_user_mutex);
61932+
61933+ return;
61934+}
61935+
61936+void
61937+gr_add_learn_entry(const char *fmt, ...)
61938+{
61939+ va_list args;
61940+ unsigned int len;
61941+
61942+ if (!gr_learn_attached)
61943+ return;
61944+
61945+ spin_lock(&gr_learn_lock);
61946+
61947+ /* leave a gap at the end so we know when it's "full" but don't have to
61948+ compute the exact length of the string we're trying to append
61949+ */
61950+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
61951+ spin_unlock(&gr_learn_lock);
61952+ wake_up_interruptible(&learn_wait);
61953+ return;
61954+ }
61955+ if (learn_buffer == NULL) {
61956+ spin_unlock(&gr_learn_lock);
61957+ return;
61958+ }
61959+
61960+ va_start(args, fmt);
61961+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
61962+ va_end(args);
61963+
61964+ learn_buffer_len += len + 1;
61965+
61966+ spin_unlock(&gr_learn_lock);
61967+ wake_up_interruptible(&learn_wait);
61968+
61969+ return;
61970+}
61971+
61972+static int
61973+open_learn(struct inode *inode, struct file *file)
61974+{
61975+ if (file->f_mode & FMODE_READ && gr_learn_attached)
61976+ return -EBUSY;
61977+ if (file->f_mode & FMODE_READ) {
61978+ int retval = 0;
61979+ mutex_lock(&gr_learn_user_mutex);
61980+ if (learn_buffer == NULL)
61981+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
61982+ if (learn_buffer_user == NULL)
61983+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
61984+ if (learn_buffer == NULL) {
61985+ retval = -ENOMEM;
61986+ goto out_error;
61987+ }
61988+ if (learn_buffer_user == NULL) {
61989+ retval = -ENOMEM;
61990+ goto out_error;
61991+ }
61992+ learn_buffer_len = 0;
61993+ learn_buffer_user_len = 0;
61994+ gr_learn_attached = 1;
61995+out_error:
61996+ mutex_unlock(&gr_learn_user_mutex);
61997+ return retval;
61998+ }
61999+ return 0;
62000+}
62001+
62002+static int
62003+close_learn(struct inode *inode, struct file *file)
62004+{
62005+ if (file->f_mode & FMODE_READ) {
62006+ char *tmp = NULL;
62007+ mutex_lock(&gr_learn_user_mutex);
62008+ spin_lock(&gr_learn_lock);
62009+ tmp = learn_buffer;
62010+ learn_buffer = NULL;
62011+ spin_unlock(&gr_learn_lock);
62012+ if (tmp)
62013+ vfree(tmp);
62014+ if (learn_buffer_user != NULL) {
62015+ vfree(learn_buffer_user);
62016+ learn_buffer_user = NULL;
62017+ }
62018+ learn_buffer_len = 0;
62019+ learn_buffer_user_len = 0;
62020+ gr_learn_attached = 0;
62021+ mutex_unlock(&gr_learn_user_mutex);
62022+ }
62023+
62024+ return 0;
62025+}
62026+
62027+const struct file_operations grsec_fops = {
62028+ .read = read_learn,
62029+ .write = write_grsec_handler,
62030+ .open = open_learn,
62031+ .release = close_learn,
62032+ .poll = poll_learn,
62033+};
62034diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
62035new file mode 100644
62036index 0000000..39645c9
62037--- /dev/null
62038+++ b/grsecurity/gracl_res.c
62039@@ -0,0 +1,68 @@
62040+#include <linux/kernel.h>
62041+#include <linux/sched.h>
62042+#include <linux/gracl.h>
62043+#include <linux/grinternal.h>
62044+
62045+static const char *restab_log[] = {
62046+ [RLIMIT_CPU] = "RLIMIT_CPU",
62047+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
62048+ [RLIMIT_DATA] = "RLIMIT_DATA",
62049+ [RLIMIT_STACK] = "RLIMIT_STACK",
62050+ [RLIMIT_CORE] = "RLIMIT_CORE",
62051+ [RLIMIT_RSS] = "RLIMIT_RSS",
62052+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
62053+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
62054+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
62055+ [RLIMIT_AS] = "RLIMIT_AS",
62056+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
62057+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
62058+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
62059+ [RLIMIT_NICE] = "RLIMIT_NICE",
62060+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
62061+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
62062+ [GR_CRASH_RES] = "RLIMIT_CRASH"
62063+};
62064+
62065+void
62066+gr_log_resource(const struct task_struct *task,
62067+ const int res, const unsigned long wanted, const int gt)
62068+{
62069+ const struct cred *cred;
62070+ unsigned long rlim;
62071+
62072+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
62073+ return;
62074+
62075+ // not yet supported resource
62076+ if (unlikely(!restab_log[res]))
62077+ return;
62078+
62079+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
62080+ rlim = task_rlimit_max(task, res);
62081+ else
62082+ rlim = task_rlimit(task, res);
62083+
62084+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
62085+ return;
62086+
62087+ rcu_read_lock();
62088+ cred = __task_cred(task);
62089+
62090+ if (res == RLIMIT_NPROC &&
62091+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
62092+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
62093+ goto out_rcu_unlock;
62094+ else if (res == RLIMIT_MEMLOCK &&
62095+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
62096+ goto out_rcu_unlock;
62097+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
62098+ goto out_rcu_unlock;
62099+ rcu_read_unlock();
62100+
62101+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
62102+
62103+ return;
62104+out_rcu_unlock:
62105+ rcu_read_unlock();
62106+ return;
62107+}
62108diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
62109new file mode 100644
62110index 0000000..8c8fc9d
62111--- /dev/null
62112+++ b/grsecurity/gracl_segv.c
62113@@ -0,0 +1,303 @@
62114+#include <linux/kernel.h>
62115+#include <linux/mm.h>
62116+#include <asm/uaccess.h>
62117+#include <asm/errno.h>
62118+#include <asm/mman.h>
62119+#include <net/sock.h>
62120+#include <linux/file.h>
62121+#include <linux/fs.h>
62122+#include <linux/net.h>
62123+#include <linux/in.h>
62124+#include <linux/slab.h>
62125+#include <linux/types.h>
62126+#include <linux/sched.h>
62127+#include <linux/timer.h>
62128+#include <linux/gracl.h>
62129+#include <linux/grsecurity.h>
62130+#include <linux/grinternal.h>
62131+
62132+static struct crash_uid *uid_set;
62133+static unsigned short uid_used;
62134+static DEFINE_SPINLOCK(gr_uid_lock);
62135+extern rwlock_t gr_inode_lock;
62136+extern struct acl_subject_label *
62137+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
62138+ struct acl_role_label *role);
62139+
62140+#ifdef CONFIG_BTRFS_FS
62141+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
62142+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
62143+#endif
62144+
62145+static inline dev_t __get_dev(const struct dentry *dentry)
62146+{
62147+#ifdef CONFIG_BTRFS_FS
62148+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
62149+ return get_btrfs_dev_from_inode(dentry->d_inode);
62150+ else
62151+#endif
62152+ return dentry->d_inode->i_sb->s_dev;
62153+}
62154+
62155+int
62156+gr_init_uidset(void)
62157+{
62158+ uid_set =
62159+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
62160+ uid_used = 0;
62161+
62162+ return uid_set ? 1 : 0;
62163+}
62164+
62165+void
62166+gr_free_uidset(void)
62167+{
62168+ if (uid_set)
62169+ kfree(uid_set);
62170+
62171+ return;
62172+}
62173+
62174+int
62175+gr_find_uid(const uid_t uid)
62176+{
62177+ struct crash_uid *tmp = uid_set;
62178+ uid_t buid;
62179+ int low = 0, high = uid_used - 1, mid;
62180+
62181+ while (high >= low) {
62182+ mid = (low + high) >> 1;
62183+ buid = tmp[mid].uid;
62184+ if (buid == uid)
62185+ return mid;
62186+ if (buid > uid)
62187+ high = mid - 1;
62188+ if (buid < uid)
62189+ low = mid + 1;
62190+ }
62191+
62192+ return -1;
62193+}
62194+
62195+static __inline__ void
62196+gr_insertsort(void)
62197+{
62198+ unsigned short i, j;
62199+ struct crash_uid index;
62200+
62201+ for (i = 1; i < uid_used; i++) {
62202+ index = uid_set[i];
62203+ j = i;
62204+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
62205+ uid_set[j] = uid_set[j - 1];
62206+ j--;
62207+ }
62208+ uid_set[j] = index;
62209+ }
62210+
62211+ return;
62212+}
62213+
62214+static __inline__ void
62215+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
62216+{
62217+ int loc;
62218+ uid_t uid = GR_GLOBAL_UID(kuid);
62219+
62220+ if (uid_used == GR_UIDTABLE_MAX)
62221+ return;
62222+
62223+ loc = gr_find_uid(uid);
62224+
62225+ if (loc >= 0) {
62226+ uid_set[loc].expires = expires;
62227+ return;
62228+ }
62229+
62230+ uid_set[uid_used].uid = uid;
62231+ uid_set[uid_used].expires = expires;
62232+ uid_used++;
62233+
62234+ gr_insertsort();
62235+
62236+ return;
62237+}
62238+
62239+void
62240+gr_remove_uid(const unsigned short loc)
62241+{
62242+ unsigned short i;
62243+
62244+ for (i = loc + 1; i < uid_used; i++)
62245+ uid_set[i - 1] = uid_set[i];
62246+
62247+ uid_used--;
62248+
62249+ return;
62250+}
62251+
62252+int
62253+gr_check_crash_uid(const kuid_t kuid)
62254+{
62255+ int loc;
62256+ int ret = 0;
62257+ uid_t uid;
62258+
62259+ if (unlikely(!gr_acl_is_enabled()))
62260+ return 0;
62261+
62262+ uid = GR_GLOBAL_UID(kuid);
62263+
62264+ spin_lock(&gr_uid_lock);
62265+ loc = gr_find_uid(uid);
62266+
62267+ if (loc < 0)
62268+ goto out_unlock;
62269+
62270+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
62271+ gr_remove_uid(loc);
62272+ else
62273+ ret = 1;
62274+
62275+out_unlock:
62276+ spin_unlock(&gr_uid_lock);
62277+ return ret;
62278+}
62279+
62280+static __inline__ int
62281+proc_is_setxid(const struct cred *cred)
62282+{
62283+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
62284+ !uid_eq(cred->uid, cred->fsuid))
62285+ return 1;
62286+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
62287+ !gid_eq(cred->gid, cred->fsgid))
62288+ return 1;
62289+
62290+ return 0;
62291+}
62292+
62293+extern int gr_fake_force_sig(int sig, struct task_struct *t);
62294+
62295+void
62296+gr_handle_crash(struct task_struct *task, const int sig)
62297+{
62298+ struct acl_subject_label *curr;
62299+ struct task_struct *tsk, *tsk2;
62300+ const struct cred *cred;
62301+ const struct cred *cred2;
62302+
62303+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
62304+ return;
62305+
62306+ if (unlikely(!gr_acl_is_enabled()))
62307+ return;
62308+
62309+ curr = task->acl;
62310+
62311+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
62312+ return;
62313+
62314+ if (time_before_eq(curr->expires, get_seconds())) {
62315+ curr->expires = 0;
62316+ curr->crashes = 0;
62317+ }
62318+
62319+ curr->crashes++;
62320+
62321+ if (!curr->expires)
62322+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
62323+
62324+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62325+ time_after(curr->expires, get_seconds())) {
62326+ rcu_read_lock();
62327+ cred = __task_cred(task);
62328+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
62329+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62330+ spin_lock(&gr_uid_lock);
62331+ gr_insert_uid(cred->uid, curr->expires);
62332+ spin_unlock(&gr_uid_lock);
62333+ curr->expires = 0;
62334+ curr->crashes = 0;
62335+ read_lock(&tasklist_lock);
62336+ do_each_thread(tsk2, tsk) {
62337+ cred2 = __task_cred(tsk);
62338+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
62339+ gr_fake_force_sig(SIGKILL, tsk);
62340+ } while_each_thread(tsk2, tsk);
62341+ read_unlock(&tasklist_lock);
62342+ } else {
62343+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62344+ read_lock(&tasklist_lock);
62345+ read_lock(&grsec_exec_file_lock);
62346+ do_each_thread(tsk2, tsk) {
62347+ if (likely(tsk != task)) {
62348+ // if this thread has the same subject as the one that triggered
62349+ // RES_CRASH and it's the same binary, kill it
62350+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
62351+ gr_fake_force_sig(SIGKILL, tsk);
62352+ }
62353+ } while_each_thread(tsk2, tsk);
62354+ read_unlock(&grsec_exec_file_lock);
62355+ read_unlock(&tasklist_lock);
62356+ }
62357+ rcu_read_unlock();
62358+ }
62359+
62360+ return;
62361+}
62362+
62363+int
62364+gr_check_crash_exec(const struct file *filp)
62365+{
62366+ struct acl_subject_label *curr;
62367+
62368+ if (unlikely(!gr_acl_is_enabled()))
62369+ return 0;
62370+
62371+ read_lock(&gr_inode_lock);
62372+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
62373+ __get_dev(filp->f_path.dentry),
62374+ current->role);
62375+ read_unlock(&gr_inode_lock);
62376+
62377+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
62378+ (!curr->crashes && !curr->expires))
62379+ return 0;
62380+
62381+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62382+ time_after(curr->expires, get_seconds()))
62383+ return 1;
62384+ else if (time_before_eq(curr->expires, get_seconds())) {
62385+ curr->crashes = 0;
62386+ curr->expires = 0;
62387+ }
62388+
62389+ return 0;
62390+}
62391+
62392+void
62393+gr_handle_alertkill(struct task_struct *task)
62394+{
62395+ struct acl_subject_label *curracl;
62396+ __u32 curr_ip;
62397+ struct task_struct *p, *p2;
62398+
62399+ if (unlikely(!gr_acl_is_enabled()))
62400+ return;
62401+
62402+ curracl = task->acl;
62403+ curr_ip = task->signal->curr_ip;
62404+
62405+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
62406+ read_lock(&tasklist_lock);
62407+ do_each_thread(p2, p) {
62408+ if (p->signal->curr_ip == curr_ip)
62409+ gr_fake_force_sig(SIGKILL, p);
62410+ } while_each_thread(p2, p);
62411+ read_unlock(&tasklist_lock);
62412+ } else if (curracl->mode & GR_KILLPROC)
62413+ gr_fake_force_sig(SIGKILL, task);
62414+
62415+ return;
62416+}
62417diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
62418new file mode 100644
62419index 0000000..98011b0
62420--- /dev/null
62421+++ b/grsecurity/gracl_shm.c
62422@@ -0,0 +1,40 @@
62423+#include <linux/kernel.h>
62424+#include <linux/mm.h>
62425+#include <linux/sched.h>
62426+#include <linux/file.h>
62427+#include <linux/ipc.h>
62428+#include <linux/gracl.h>
62429+#include <linux/grsecurity.h>
62430+#include <linux/grinternal.h>
62431+
62432+int
62433+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62434+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
62435+{
62436+ struct task_struct *task;
62437+
62438+ if (!gr_acl_is_enabled())
62439+ return 1;
62440+
62441+ rcu_read_lock();
62442+ read_lock(&tasklist_lock);
62443+
62444+ task = find_task_by_vpid(shm_cprid);
62445+
62446+ if (unlikely(!task))
62447+ task = find_task_by_vpid(shm_lapid);
62448+
62449+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
62450+ (task_pid_nr(task) == shm_lapid)) &&
62451+ (task->acl->mode & GR_PROTSHM) &&
62452+ (task->acl != current->acl))) {
62453+ read_unlock(&tasklist_lock);
62454+ rcu_read_unlock();
62455+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
62456+ return 0;
62457+ }
62458+ read_unlock(&tasklist_lock);
62459+ rcu_read_unlock();
62460+
62461+ return 1;
62462+}
62463diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
62464new file mode 100644
62465index 0000000..bc0be01
62466--- /dev/null
62467+++ b/grsecurity/grsec_chdir.c
62468@@ -0,0 +1,19 @@
62469+#include <linux/kernel.h>
62470+#include <linux/sched.h>
62471+#include <linux/fs.h>
62472+#include <linux/file.h>
62473+#include <linux/grsecurity.h>
62474+#include <linux/grinternal.h>
62475+
62476+void
62477+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
62478+{
62479+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62480+ if ((grsec_enable_chdir && grsec_enable_group &&
62481+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
62482+ !grsec_enable_group)) {
62483+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
62484+ }
62485+#endif
62486+ return;
62487+}
62488diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
62489new file mode 100644
62490index 0000000..6d2de57
62491--- /dev/null
62492+++ b/grsecurity/grsec_chroot.c
62493@@ -0,0 +1,357 @@
62494+#include <linux/kernel.h>
62495+#include <linux/module.h>
62496+#include <linux/sched.h>
62497+#include <linux/file.h>
62498+#include <linux/fs.h>
62499+#include <linux/mount.h>
62500+#include <linux/types.h>
62501+#include "../fs/mount.h"
62502+#include <linux/grsecurity.h>
62503+#include <linux/grinternal.h>
62504+
62505+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
62506+{
62507+#ifdef CONFIG_GRKERNSEC
62508+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
62509+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
62510+ task->gr_is_chrooted = 1;
62511+ else
62512+ task->gr_is_chrooted = 0;
62513+
62514+ task->gr_chroot_dentry = path->dentry;
62515+#endif
62516+ return;
62517+}
62518+
62519+void gr_clear_chroot_entries(struct task_struct *task)
62520+{
62521+#ifdef CONFIG_GRKERNSEC
62522+ task->gr_is_chrooted = 0;
62523+ task->gr_chroot_dentry = NULL;
62524+#endif
62525+ return;
62526+}
62527+
62528+int
62529+gr_handle_chroot_unix(const pid_t pid)
62530+{
62531+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62532+ struct task_struct *p;
62533+
62534+ if (unlikely(!grsec_enable_chroot_unix))
62535+ return 1;
62536+
62537+ if (likely(!proc_is_chrooted(current)))
62538+ return 1;
62539+
62540+ rcu_read_lock();
62541+ read_lock(&tasklist_lock);
62542+ p = find_task_by_vpid_unrestricted(pid);
62543+ if (unlikely(p && !have_same_root(current, p))) {
62544+ read_unlock(&tasklist_lock);
62545+ rcu_read_unlock();
62546+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
62547+ return 0;
62548+ }
62549+ read_unlock(&tasklist_lock);
62550+ rcu_read_unlock();
62551+#endif
62552+ return 1;
62553+}
62554+
62555+int
62556+gr_handle_chroot_nice(void)
62557+{
62558+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62559+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
62560+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
62561+ return -EPERM;
62562+ }
62563+#endif
62564+ return 0;
62565+}
62566+
62567+int
62568+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
62569+{
62570+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62571+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
62572+ && proc_is_chrooted(current)) {
62573+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
62574+ return -EACCES;
62575+ }
62576+#endif
62577+ return 0;
62578+}
62579+
62580+int
62581+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
62582+{
62583+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62584+ struct task_struct *p;
62585+ int ret = 0;
62586+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
62587+ return ret;
62588+
62589+ read_lock(&tasklist_lock);
62590+ do_each_pid_task(pid, type, p) {
62591+ if (!have_same_root(current, p)) {
62592+ ret = 1;
62593+ goto out;
62594+ }
62595+ } while_each_pid_task(pid, type, p);
62596+out:
62597+ read_unlock(&tasklist_lock);
62598+ return ret;
62599+#endif
62600+ return 0;
62601+}
62602+
62603+int
62604+gr_pid_is_chrooted(struct task_struct *p)
62605+{
62606+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62607+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
62608+ return 0;
62609+
62610+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
62611+ !have_same_root(current, p)) {
62612+ return 1;
62613+ }
62614+#endif
62615+ return 0;
62616+}
62617+
62618+EXPORT_SYMBOL(gr_pid_is_chrooted);
62619+
62620+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
62621+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
62622+{
62623+ struct path path, currentroot;
62624+ int ret = 0;
62625+
62626+ path.dentry = (struct dentry *)u_dentry;
62627+ path.mnt = (struct vfsmount *)u_mnt;
62628+ get_fs_root(current->fs, &currentroot);
62629+ if (path_is_under(&path, &currentroot))
62630+ ret = 1;
62631+ path_put(&currentroot);
62632+
62633+ return ret;
62634+}
62635+#endif
62636+
62637+int
62638+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
62639+{
62640+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62641+ if (!grsec_enable_chroot_fchdir)
62642+ return 1;
62643+
62644+ if (!proc_is_chrooted(current))
62645+ return 1;
62646+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
62647+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
62648+ return 0;
62649+ }
62650+#endif
62651+ return 1;
62652+}
62653+
62654+int
62655+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62656+ const time_t shm_createtime)
62657+{
62658+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62659+ struct task_struct *p;
62660+ time_t starttime;
62661+
62662+ if (unlikely(!grsec_enable_chroot_shmat))
62663+ return 1;
62664+
62665+ if (likely(!proc_is_chrooted(current)))
62666+ return 1;
62667+
62668+ rcu_read_lock();
62669+ read_lock(&tasklist_lock);
62670+
62671+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
62672+ starttime = p->start_time.tv_sec;
62673+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
62674+ if (have_same_root(current, p)) {
62675+ goto allow;
62676+ } else {
62677+ read_unlock(&tasklist_lock);
62678+ rcu_read_unlock();
62679+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62680+ return 0;
62681+ }
62682+ }
62683+ /* creator exited, pid reuse, fall through to next check */
62684+ }
62685+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
62686+ if (unlikely(!have_same_root(current, p))) {
62687+ read_unlock(&tasklist_lock);
62688+ rcu_read_unlock();
62689+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62690+ return 0;
62691+ }
62692+ }
62693+
62694+allow:
62695+ read_unlock(&tasklist_lock);
62696+ rcu_read_unlock();
62697+#endif
62698+ return 1;
62699+}
62700+
62701+void
62702+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
62703+{
62704+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62705+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
62706+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
62707+#endif
62708+ return;
62709+}
62710+
62711+int
62712+gr_handle_chroot_mknod(const struct dentry *dentry,
62713+ const struct vfsmount *mnt, const int mode)
62714+{
62715+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62716+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
62717+ proc_is_chrooted(current)) {
62718+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
62719+ return -EPERM;
62720+ }
62721+#endif
62722+ return 0;
62723+}
62724+
62725+int
62726+gr_handle_chroot_mount(const struct dentry *dentry,
62727+ const struct vfsmount *mnt, const char *dev_name)
62728+{
62729+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62730+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62731+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
62732+ return -EPERM;
62733+ }
62734+#endif
62735+ return 0;
62736+}
62737+
62738+int
62739+gr_handle_chroot_pivot(void)
62740+{
62741+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62742+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62743+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62744+ return -EPERM;
62745+ }
62746+#endif
62747+ return 0;
62748+}
62749+
62750+int
62751+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62752+{
62753+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62754+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62755+ !gr_is_outside_chroot(dentry, mnt)) {
62756+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62757+ return -EPERM;
62758+ }
62759+#endif
62760+ return 0;
62761+}
62762+
62763+extern const char *captab_log[];
62764+extern int captab_log_entries;
62765+
62766+int
62767+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
62768+{
62769+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62770+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
62771+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62772+ if (cap_raised(chroot_caps, cap)) {
62773+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
62774+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
62775+ }
62776+ return 0;
62777+ }
62778+ }
62779+#endif
62780+ return 1;
62781+}
62782+
62783+int
62784+gr_chroot_is_capable(const int cap)
62785+{
62786+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62787+ return gr_task_chroot_is_capable(current, current_cred(), cap);
62788+#endif
62789+ return 1;
62790+}
62791+
62792+int
62793+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
62794+{
62795+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62796+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
62797+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62798+ if (cap_raised(chroot_caps, cap)) {
62799+ return 0;
62800+ }
62801+ }
62802+#endif
62803+ return 1;
62804+}
62805+
62806+int
62807+gr_chroot_is_capable_nolog(const int cap)
62808+{
62809+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62810+ return gr_task_chroot_is_capable_nolog(current, cap);
62811+#endif
62812+ return 1;
62813+}
62814+
62815+int
62816+gr_handle_chroot_sysctl(const int op)
62817+{
62818+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62819+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
62820+ proc_is_chrooted(current))
62821+ return -EACCES;
62822+#endif
62823+ return 0;
62824+}
62825+
62826+void
62827+gr_handle_chroot_chdir(struct path *path)
62828+{
62829+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62830+ if (grsec_enable_chroot_chdir)
62831+ set_fs_pwd(current->fs, path);
62832+#endif
62833+ return;
62834+}
62835+
62836+int
62837+gr_handle_chroot_chmod(const struct dentry *dentry,
62838+ const struct vfsmount *mnt, const int mode)
62839+{
62840+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62841+ /* allow chmod +s on directories, but not files */
62842+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62843+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62844+ proc_is_chrooted(current)) {
62845+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62846+ return -EPERM;
62847+ }
62848+#endif
62849+ return 0;
62850+}
62851diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62852new file mode 100644
62853index 0000000..207d409
62854--- /dev/null
62855+++ b/grsecurity/grsec_disabled.c
62856@@ -0,0 +1,434 @@
62857+#include <linux/kernel.h>
62858+#include <linux/module.h>
62859+#include <linux/sched.h>
62860+#include <linux/file.h>
62861+#include <linux/fs.h>
62862+#include <linux/kdev_t.h>
62863+#include <linux/net.h>
62864+#include <linux/in.h>
62865+#include <linux/ip.h>
62866+#include <linux/skbuff.h>
62867+#include <linux/sysctl.h>
62868+
62869+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62870+void
62871+pax_set_initial_flags(struct linux_binprm *bprm)
62872+{
62873+ return;
62874+}
62875+#endif
62876+
62877+#ifdef CONFIG_SYSCTL
62878+__u32
62879+gr_handle_sysctl(const struct ctl_table * table, const int op)
62880+{
62881+ return 0;
62882+}
62883+#endif
62884+
62885+#ifdef CONFIG_TASKSTATS
62886+int gr_is_taskstats_denied(int pid)
62887+{
62888+ return 0;
62889+}
62890+#endif
62891+
62892+int
62893+gr_acl_is_enabled(void)
62894+{
62895+ return 0;
62896+}
62897+
62898+void
62899+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62900+{
62901+ return;
62902+}
62903+
62904+int
62905+gr_handle_rawio(const struct inode *inode)
62906+{
62907+ return 0;
62908+}
62909+
62910+void
62911+gr_acl_handle_psacct(struct task_struct *task, const long code)
62912+{
62913+ return;
62914+}
62915+
62916+int
62917+gr_handle_ptrace(struct task_struct *task, const long request)
62918+{
62919+ return 0;
62920+}
62921+
62922+int
62923+gr_handle_proc_ptrace(struct task_struct *task)
62924+{
62925+ return 0;
62926+}
62927+
62928+int
62929+gr_set_acls(const int type)
62930+{
62931+ return 0;
62932+}
62933+
62934+int
62935+gr_check_hidden_task(const struct task_struct *tsk)
62936+{
62937+ return 0;
62938+}
62939+
62940+int
62941+gr_check_protected_task(const struct task_struct *task)
62942+{
62943+ return 0;
62944+}
62945+
62946+int
62947+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
62948+{
62949+ return 0;
62950+}
62951+
62952+void
62953+gr_copy_label(struct task_struct *tsk)
62954+{
62955+ return;
62956+}
62957+
62958+void
62959+gr_set_pax_flags(struct task_struct *task)
62960+{
62961+ return;
62962+}
62963+
62964+int
62965+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
62966+ const int unsafe_share)
62967+{
62968+ return 0;
62969+}
62970+
62971+void
62972+gr_handle_delete(const ino_t ino, const dev_t dev)
62973+{
62974+ return;
62975+}
62976+
62977+void
62978+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
62979+{
62980+ return;
62981+}
62982+
62983+void
62984+gr_handle_crash(struct task_struct *task, const int sig)
62985+{
62986+ return;
62987+}
62988+
62989+int
62990+gr_check_crash_exec(const struct file *filp)
62991+{
62992+ return 0;
62993+}
62994+
62995+int
62996+gr_check_crash_uid(const kuid_t uid)
62997+{
62998+ return 0;
62999+}
63000+
63001+void
63002+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
63003+ struct dentry *old_dentry,
63004+ struct dentry *new_dentry,
63005+ struct vfsmount *mnt, const __u8 replace)
63006+{
63007+ return;
63008+}
63009+
63010+int
63011+gr_search_socket(const int family, const int type, const int protocol)
63012+{
63013+ return 1;
63014+}
63015+
63016+int
63017+gr_search_connectbind(const int mode, const struct socket *sock,
63018+ const struct sockaddr_in *addr)
63019+{
63020+ return 0;
63021+}
63022+
63023+void
63024+gr_handle_alertkill(struct task_struct *task)
63025+{
63026+ return;
63027+}
63028+
63029+__u32
63030+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
63031+{
63032+ return 1;
63033+}
63034+
63035+__u32
63036+gr_acl_handle_hidden_file(const struct dentry * dentry,
63037+ const struct vfsmount * mnt)
63038+{
63039+ return 1;
63040+}
63041+
63042+__u32
63043+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
63044+ int acc_mode)
63045+{
63046+ return 1;
63047+}
63048+
63049+__u32
63050+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
63051+{
63052+ return 1;
63053+}
63054+
63055+__u32
63056+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
63057+{
63058+ return 1;
63059+}
63060+
63061+int
63062+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
63063+ unsigned int *vm_flags)
63064+{
63065+ return 1;
63066+}
63067+
63068+__u32
63069+gr_acl_handle_truncate(const struct dentry * dentry,
63070+ const struct vfsmount * mnt)
63071+{
63072+ return 1;
63073+}
63074+
63075+__u32
63076+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
63077+{
63078+ return 1;
63079+}
63080+
63081+__u32
63082+gr_acl_handle_access(const struct dentry * dentry,
63083+ const struct vfsmount * mnt, const int fmode)
63084+{
63085+ return 1;
63086+}
63087+
63088+__u32
63089+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
63090+ umode_t *mode)
63091+{
63092+ return 1;
63093+}
63094+
63095+__u32
63096+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
63097+{
63098+ return 1;
63099+}
63100+
63101+__u32
63102+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
63103+{
63104+ return 1;
63105+}
63106+
63107+void
63108+grsecurity_init(void)
63109+{
63110+ return;
63111+}
63112+
63113+umode_t gr_acl_umask(void)
63114+{
63115+ return 0;
63116+}
63117+
63118+__u32
63119+gr_acl_handle_mknod(const struct dentry * new_dentry,
63120+ const struct dentry * parent_dentry,
63121+ const struct vfsmount * parent_mnt,
63122+ const int mode)
63123+{
63124+ return 1;
63125+}
63126+
63127+__u32
63128+gr_acl_handle_mkdir(const struct dentry * new_dentry,
63129+ const struct dentry * parent_dentry,
63130+ const struct vfsmount * parent_mnt)
63131+{
63132+ return 1;
63133+}
63134+
63135+__u32
63136+gr_acl_handle_symlink(const struct dentry * new_dentry,
63137+ const struct dentry * parent_dentry,
63138+ const struct vfsmount * parent_mnt, const struct filename *from)
63139+{
63140+ return 1;
63141+}
63142+
63143+__u32
63144+gr_acl_handle_link(const struct dentry * new_dentry,
63145+ const struct dentry * parent_dentry,
63146+ const struct vfsmount * parent_mnt,
63147+ const struct dentry * old_dentry,
63148+ const struct vfsmount * old_mnt, const struct filename *to)
63149+{
63150+ return 1;
63151+}
63152+
63153+int
63154+gr_acl_handle_rename(const struct dentry *new_dentry,
63155+ const struct dentry *parent_dentry,
63156+ const struct vfsmount *parent_mnt,
63157+ const struct dentry *old_dentry,
63158+ const struct inode *old_parent_inode,
63159+ const struct vfsmount *old_mnt, const struct filename *newname)
63160+{
63161+ return 0;
63162+}
63163+
63164+int
63165+gr_acl_handle_filldir(const struct file *file, const char *name,
63166+ const int namelen, const ino_t ino)
63167+{
63168+ return 1;
63169+}
63170+
63171+int
63172+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63173+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
63174+{
63175+ return 1;
63176+}
63177+
63178+int
63179+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
63180+{
63181+ return 0;
63182+}
63183+
63184+int
63185+gr_search_accept(const struct socket *sock)
63186+{
63187+ return 0;
63188+}
63189+
63190+int
63191+gr_search_listen(const struct socket *sock)
63192+{
63193+ return 0;
63194+}
63195+
63196+int
63197+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
63198+{
63199+ return 0;
63200+}
63201+
63202+__u32
63203+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
63204+{
63205+ return 1;
63206+}
63207+
63208+__u32
63209+gr_acl_handle_creat(const struct dentry * dentry,
63210+ const struct dentry * p_dentry,
63211+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
63212+ const int imode)
63213+{
63214+ return 1;
63215+}
63216+
63217+void
63218+gr_acl_handle_exit(void)
63219+{
63220+ return;
63221+}
63222+
63223+int
63224+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
63225+{
63226+ return 1;
63227+}
63228+
63229+void
63230+gr_set_role_label(const kuid_t uid, const kgid_t gid)
63231+{
63232+ return;
63233+}
63234+
63235+int
63236+gr_acl_handle_procpidmem(const struct task_struct *task)
63237+{
63238+ return 0;
63239+}
63240+
63241+int
63242+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
63243+{
63244+ return 0;
63245+}
63246+
63247+int
63248+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
63249+{
63250+ return 0;
63251+}
63252+
63253+void
63254+gr_set_kernel_label(struct task_struct *task)
63255+{
63256+ return;
63257+}
63258+
63259+int
63260+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
63261+{
63262+ return 0;
63263+}
63264+
63265+int
63266+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
63267+{
63268+ return 0;
63269+}
63270+
63271+int gr_acl_enable_at_secure(void)
63272+{
63273+ return 0;
63274+}
63275+
63276+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
63277+{
63278+ return dentry->d_inode->i_sb->s_dev;
63279+}
63280+
63281+void gr_put_exec_file(struct task_struct *task)
63282+{
63283+ return;
63284+}
63285+
63286+EXPORT_SYMBOL(gr_set_kernel_label);
63287+#ifdef CONFIG_SECURITY
63288+EXPORT_SYMBOL(gr_check_user_change);
63289+EXPORT_SYMBOL(gr_check_group_change);
63290+#endif
63291diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
63292new file mode 100644
63293index 0000000..abfa971
63294--- /dev/null
63295+++ b/grsecurity/grsec_exec.c
63296@@ -0,0 +1,174 @@
63297+#include <linux/kernel.h>
63298+#include <linux/sched.h>
63299+#include <linux/file.h>
63300+#include <linux/binfmts.h>
63301+#include <linux/fs.h>
63302+#include <linux/types.h>
63303+#include <linux/grdefs.h>
63304+#include <linux/grsecurity.h>
63305+#include <linux/grinternal.h>
63306+#include <linux/capability.h>
63307+#include <linux/module.h>
63308+
63309+#include <asm/uaccess.h>
63310+
63311+#ifdef CONFIG_GRKERNSEC_EXECLOG
63312+static char gr_exec_arg_buf[132];
63313+static DEFINE_MUTEX(gr_exec_arg_mutex);
63314+#endif
63315+
63316+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
63317+
63318+void
63319+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
63320+{
63321+#ifdef CONFIG_GRKERNSEC_EXECLOG
63322+ char *grarg = gr_exec_arg_buf;
63323+ unsigned int i, x, execlen = 0;
63324+ char c;
63325+
63326+ if (!((grsec_enable_execlog && grsec_enable_group &&
63327+ in_group_p(grsec_audit_gid))
63328+ || (grsec_enable_execlog && !grsec_enable_group)))
63329+ return;
63330+
63331+ mutex_lock(&gr_exec_arg_mutex);
63332+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
63333+
63334+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
63335+ const char __user *p;
63336+ unsigned int len;
63337+
63338+ p = get_user_arg_ptr(argv, i);
63339+ if (IS_ERR(p))
63340+ goto log;
63341+
63342+ len = strnlen_user(p, 128 - execlen);
63343+ if (len > 128 - execlen)
63344+ len = 128 - execlen;
63345+ else if (len > 0)
63346+ len--;
63347+ if (copy_from_user(grarg + execlen, p, len))
63348+ goto log;
63349+
63350+ /* rewrite unprintable characters */
63351+ for (x = 0; x < len; x++) {
63352+ c = *(grarg + execlen + x);
63353+ if (c < 32 || c > 126)
63354+ *(grarg + execlen + x) = ' ';
63355+ }
63356+
63357+ execlen += len;
63358+ *(grarg + execlen) = ' ';
63359+ *(grarg + execlen + 1) = '\0';
63360+ execlen++;
63361+ }
63362+
63363+ log:
63364+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63365+ bprm->file->f_path.mnt, grarg);
63366+ mutex_unlock(&gr_exec_arg_mutex);
63367+#endif
63368+ return;
63369+}
63370+
63371+#ifdef CONFIG_GRKERNSEC
63372+extern int gr_acl_is_capable(const int cap);
63373+extern int gr_acl_is_capable_nolog(const int cap);
63374+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
63375+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
63376+extern int gr_chroot_is_capable(const int cap);
63377+extern int gr_chroot_is_capable_nolog(const int cap);
63378+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
63379+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
63380+#endif
63381+
63382+const char *captab_log[] = {
63383+ "CAP_CHOWN",
63384+ "CAP_DAC_OVERRIDE",
63385+ "CAP_DAC_READ_SEARCH",
63386+ "CAP_FOWNER",
63387+ "CAP_FSETID",
63388+ "CAP_KILL",
63389+ "CAP_SETGID",
63390+ "CAP_SETUID",
63391+ "CAP_SETPCAP",
63392+ "CAP_LINUX_IMMUTABLE",
63393+ "CAP_NET_BIND_SERVICE",
63394+ "CAP_NET_BROADCAST",
63395+ "CAP_NET_ADMIN",
63396+ "CAP_NET_RAW",
63397+ "CAP_IPC_LOCK",
63398+ "CAP_IPC_OWNER",
63399+ "CAP_SYS_MODULE",
63400+ "CAP_SYS_RAWIO",
63401+ "CAP_SYS_CHROOT",
63402+ "CAP_SYS_PTRACE",
63403+ "CAP_SYS_PACCT",
63404+ "CAP_SYS_ADMIN",
63405+ "CAP_SYS_BOOT",
63406+ "CAP_SYS_NICE",
63407+ "CAP_SYS_RESOURCE",
63408+ "CAP_SYS_TIME",
63409+ "CAP_SYS_TTY_CONFIG",
63410+ "CAP_MKNOD",
63411+ "CAP_LEASE",
63412+ "CAP_AUDIT_WRITE",
63413+ "CAP_AUDIT_CONTROL",
63414+ "CAP_SETFCAP",
63415+ "CAP_MAC_OVERRIDE",
63416+ "CAP_MAC_ADMIN",
63417+ "CAP_SYSLOG",
63418+ "CAP_WAKE_ALARM"
63419+};
63420+
63421+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
63422+
63423+int gr_is_capable(const int cap)
63424+{
63425+#ifdef CONFIG_GRKERNSEC
63426+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
63427+ return 1;
63428+ return 0;
63429+#else
63430+ return 1;
63431+#endif
63432+}
63433+
63434+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
63435+{
63436+#ifdef CONFIG_GRKERNSEC
63437+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
63438+ return 1;
63439+ return 0;
63440+#else
63441+ return 1;
63442+#endif
63443+}
63444+
63445+int gr_is_capable_nolog(const int cap)
63446+{
63447+#ifdef CONFIG_GRKERNSEC
63448+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
63449+ return 1;
63450+ return 0;
63451+#else
63452+ return 1;
63453+#endif
63454+}
63455+
63456+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
63457+{
63458+#ifdef CONFIG_GRKERNSEC
63459+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
63460+ return 1;
63461+ return 0;
63462+#else
63463+ return 1;
63464+#endif
63465+}
63466+
63467+EXPORT_SYMBOL(gr_is_capable);
63468+EXPORT_SYMBOL(gr_is_capable_nolog);
63469+EXPORT_SYMBOL(gr_task_is_capable);
63470+EXPORT_SYMBOL(gr_task_is_capable_nolog);
63471diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
63472new file mode 100644
63473index 0000000..06cc6ea
63474--- /dev/null
63475+++ b/grsecurity/grsec_fifo.c
63476@@ -0,0 +1,24 @@
63477+#include <linux/kernel.h>
63478+#include <linux/sched.h>
63479+#include <linux/fs.h>
63480+#include <linux/file.h>
63481+#include <linux/grinternal.h>
63482+
63483+int
63484+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
63485+ const struct dentry *dir, const int flag, const int acc_mode)
63486+{
63487+#ifdef CONFIG_GRKERNSEC_FIFO
63488+ const struct cred *cred = current_cred();
63489+
63490+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
63491+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
63492+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
63493+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
63494+ if (!inode_permission(dentry->d_inode, acc_mode))
63495+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
63496+ return -EACCES;
63497+ }
63498+#endif
63499+ return 0;
63500+}
63501diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
63502new file mode 100644
63503index 0000000..8ca18bf
63504--- /dev/null
63505+++ b/grsecurity/grsec_fork.c
63506@@ -0,0 +1,23 @@
63507+#include <linux/kernel.h>
63508+#include <linux/sched.h>
63509+#include <linux/grsecurity.h>
63510+#include <linux/grinternal.h>
63511+#include <linux/errno.h>
63512+
63513+void
63514+gr_log_forkfail(const int retval)
63515+{
63516+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63517+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
63518+ switch (retval) {
63519+ case -EAGAIN:
63520+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
63521+ break;
63522+ case -ENOMEM:
63523+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
63524+ break;
63525+ }
63526+ }
63527+#endif
63528+ return;
63529+}
63530diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
63531new file mode 100644
63532index 0000000..a862e9f
63533--- /dev/null
63534+++ b/grsecurity/grsec_init.c
63535@@ -0,0 +1,283 @@
63536+#include <linux/kernel.h>
63537+#include <linux/sched.h>
63538+#include <linux/mm.h>
63539+#include <linux/gracl.h>
63540+#include <linux/slab.h>
63541+#include <linux/vmalloc.h>
63542+#include <linux/percpu.h>
63543+#include <linux/module.h>
63544+
63545+int grsec_enable_ptrace_readexec;
63546+int grsec_enable_setxid;
63547+int grsec_enable_symlinkown;
63548+kgid_t grsec_symlinkown_gid;
63549+int grsec_enable_brute;
63550+int grsec_enable_link;
63551+int grsec_enable_dmesg;
63552+int grsec_enable_harden_ptrace;
63553+int grsec_enable_fifo;
63554+int grsec_enable_execlog;
63555+int grsec_enable_signal;
63556+int grsec_enable_forkfail;
63557+int grsec_enable_audit_ptrace;
63558+int grsec_enable_time;
63559+int grsec_enable_audit_textrel;
63560+int grsec_enable_group;
63561+kgid_t grsec_audit_gid;
63562+int grsec_enable_chdir;
63563+int grsec_enable_mount;
63564+int grsec_enable_rofs;
63565+int grsec_enable_chroot_findtask;
63566+int grsec_enable_chroot_mount;
63567+int grsec_enable_chroot_shmat;
63568+int grsec_enable_chroot_fchdir;
63569+int grsec_enable_chroot_double;
63570+int grsec_enable_chroot_pivot;
63571+int grsec_enable_chroot_chdir;
63572+int grsec_enable_chroot_chmod;
63573+int grsec_enable_chroot_mknod;
63574+int grsec_enable_chroot_nice;
63575+int grsec_enable_chroot_execlog;
63576+int grsec_enable_chroot_caps;
63577+int grsec_enable_chroot_sysctl;
63578+int grsec_enable_chroot_unix;
63579+int grsec_enable_tpe;
63580+kgid_t grsec_tpe_gid;
63581+int grsec_enable_blackhole;
63582+#ifdef CONFIG_IPV6_MODULE
63583+EXPORT_SYMBOL(grsec_enable_blackhole);
63584+#endif
63585+int grsec_lastack_retries;
63586+int grsec_enable_tpe_all;
63587+int grsec_enable_tpe_invert;
63588+int grsec_enable_socket_all;
63589+kgid_t grsec_socket_all_gid;
63590+int grsec_enable_socket_client;
63591+kgid_t grsec_socket_client_gid;
63592+int grsec_enable_socket_server;
63593+kgid_t grsec_socket_server_gid;
63594+int grsec_resource_logging;
63595+int grsec_disable_privio;
63596+int grsec_enable_log_rwxmaps;
63597+int grsec_lock;
63598+
63599+DEFINE_SPINLOCK(grsec_alert_lock);
63600+unsigned long grsec_alert_wtime = 0;
63601+unsigned long grsec_alert_fyet = 0;
63602+
63603+DEFINE_SPINLOCK(grsec_audit_lock);
63604+
63605+DEFINE_RWLOCK(grsec_exec_file_lock);
63606+
63607+char *gr_shared_page[4];
63608+
63609+char *gr_alert_log_fmt;
63610+char *gr_audit_log_fmt;
63611+char *gr_alert_log_buf;
63612+char *gr_audit_log_buf;
63613+
63614+extern struct gr_arg *gr_usermode;
63615+extern unsigned char *gr_system_salt;
63616+extern unsigned char *gr_system_sum;
63617+
63618+void __init
63619+grsecurity_init(void)
63620+{
63621+ int j;
63622+ /* create the per-cpu shared pages */
63623+
63624+#ifdef CONFIG_X86
63625+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
63626+#endif
63627+
63628+ for (j = 0; j < 4; j++) {
63629+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
63630+ if (gr_shared_page[j] == NULL) {
63631+ panic("Unable to allocate grsecurity shared page");
63632+ return;
63633+ }
63634+ }
63635+
63636+ /* allocate log buffers */
63637+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
63638+ if (!gr_alert_log_fmt) {
63639+ panic("Unable to allocate grsecurity alert log format buffer");
63640+ return;
63641+ }
63642+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
63643+ if (!gr_audit_log_fmt) {
63644+ panic("Unable to allocate grsecurity audit log format buffer");
63645+ return;
63646+ }
63647+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63648+ if (!gr_alert_log_buf) {
63649+ panic("Unable to allocate grsecurity alert log buffer");
63650+ return;
63651+ }
63652+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63653+ if (!gr_audit_log_buf) {
63654+ panic("Unable to allocate grsecurity audit log buffer");
63655+ return;
63656+ }
63657+
63658+ /* allocate memory for authentication structure */
63659+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
63660+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
63661+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
63662+
63663+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
63664+ panic("Unable to allocate grsecurity authentication structure");
63665+ return;
63666+ }
63667+
63668+
63669+#ifdef CONFIG_GRKERNSEC_IO
63670+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
63671+ grsec_disable_privio = 1;
63672+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63673+ grsec_disable_privio = 1;
63674+#else
63675+ grsec_disable_privio = 0;
63676+#endif
63677+#endif
63678+
63679+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63680+ /* for backward compatibility, tpe_invert always defaults to on if
63681+ enabled in the kernel
63682+ */
63683+ grsec_enable_tpe_invert = 1;
63684+#endif
63685+
63686+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63687+#ifndef CONFIG_GRKERNSEC_SYSCTL
63688+ grsec_lock = 1;
63689+#endif
63690+
63691+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63692+ grsec_enable_audit_textrel = 1;
63693+#endif
63694+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63695+ grsec_enable_log_rwxmaps = 1;
63696+#endif
63697+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63698+ grsec_enable_group = 1;
63699+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
63700+#endif
63701+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63702+ grsec_enable_ptrace_readexec = 1;
63703+#endif
63704+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63705+ grsec_enable_chdir = 1;
63706+#endif
63707+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63708+ grsec_enable_harden_ptrace = 1;
63709+#endif
63710+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63711+ grsec_enable_mount = 1;
63712+#endif
63713+#ifdef CONFIG_GRKERNSEC_LINK
63714+ grsec_enable_link = 1;
63715+#endif
63716+#ifdef CONFIG_GRKERNSEC_BRUTE
63717+ grsec_enable_brute = 1;
63718+#endif
63719+#ifdef CONFIG_GRKERNSEC_DMESG
63720+ grsec_enable_dmesg = 1;
63721+#endif
63722+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63723+ grsec_enable_blackhole = 1;
63724+ grsec_lastack_retries = 4;
63725+#endif
63726+#ifdef CONFIG_GRKERNSEC_FIFO
63727+ grsec_enable_fifo = 1;
63728+#endif
63729+#ifdef CONFIG_GRKERNSEC_EXECLOG
63730+ grsec_enable_execlog = 1;
63731+#endif
63732+#ifdef CONFIG_GRKERNSEC_SETXID
63733+ grsec_enable_setxid = 1;
63734+#endif
63735+#ifdef CONFIG_GRKERNSEC_SIGNAL
63736+ grsec_enable_signal = 1;
63737+#endif
63738+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63739+ grsec_enable_forkfail = 1;
63740+#endif
63741+#ifdef CONFIG_GRKERNSEC_TIME
63742+ grsec_enable_time = 1;
63743+#endif
63744+#ifdef CONFIG_GRKERNSEC_RESLOG
63745+ grsec_resource_logging = 1;
63746+#endif
63747+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63748+ grsec_enable_chroot_findtask = 1;
63749+#endif
63750+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63751+ grsec_enable_chroot_unix = 1;
63752+#endif
63753+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63754+ grsec_enable_chroot_mount = 1;
63755+#endif
63756+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63757+ grsec_enable_chroot_fchdir = 1;
63758+#endif
63759+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63760+ grsec_enable_chroot_shmat = 1;
63761+#endif
63762+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63763+ grsec_enable_audit_ptrace = 1;
63764+#endif
63765+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63766+ grsec_enable_chroot_double = 1;
63767+#endif
63768+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63769+ grsec_enable_chroot_pivot = 1;
63770+#endif
63771+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63772+ grsec_enable_chroot_chdir = 1;
63773+#endif
63774+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63775+ grsec_enable_chroot_chmod = 1;
63776+#endif
63777+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63778+ grsec_enable_chroot_mknod = 1;
63779+#endif
63780+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63781+ grsec_enable_chroot_nice = 1;
63782+#endif
63783+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63784+ grsec_enable_chroot_execlog = 1;
63785+#endif
63786+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63787+ grsec_enable_chroot_caps = 1;
63788+#endif
63789+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63790+ grsec_enable_chroot_sysctl = 1;
63791+#endif
63792+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
63793+ grsec_enable_symlinkown = 1;
63794+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
63795+#endif
63796+#ifdef CONFIG_GRKERNSEC_TPE
63797+ grsec_enable_tpe = 1;
63798+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
63799+#ifdef CONFIG_GRKERNSEC_TPE_ALL
63800+ grsec_enable_tpe_all = 1;
63801+#endif
63802+#endif
63803+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63804+ grsec_enable_socket_all = 1;
63805+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
63806+#endif
63807+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63808+ grsec_enable_socket_client = 1;
63809+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
63810+#endif
63811+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63812+ grsec_enable_socket_server = 1;
63813+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
63814+#endif
63815+#endif
63816+
63817+ return;
63818+}
63819diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63820new file mode 100644
63821index 0000000..5e05e20
63822--- /dev/null
63823+++ b/grsecurity/grsec_link.c
63824@@ -0,0 +1,58 @@
63825+#include <linux/kernel.h>
63826+#include <linux/sched.h>
63827+#include <linux/fs.h>
63828+#include <linux/file.h>
63829+#include <linux/grinternal.h>
63830+
63831+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
63832+{
63833+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
63834+ const struct inode *link_inode = link->dentry->d_inode;
63835+
63836+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
63837+ /* ignore root-owned links, e.g. /proc/self */
63838+ gr_is_global_nonroot(link_inode->i_uid) && target &&
63839+ !uid_eq(link_inode->i_uid, target->i_uid)) {
63840+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
63841+ return 1;
63842+ }
63843+#endif
63844+ return 0;
63845+}
63846+
63847+int
63848+gr_handle_follow_link(const struct inode *parent,
63849+ const struct inode *inode,
63850+ const struct dentry *dentry, const struct vfsmount *mnt)
63851+{
63852+#ifdef CONFIG_GRKERNSEC_LINK
63853+ const struct cred *cred = current_cred();
63854+
63855+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63856+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
63857+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
63858+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63859+ return -EACCES;
63860+ }
63861+#endif
63862+ return 0;
63863+}
63864+
63865+int
63866+gr_handle_hardlink(const struct dentry *dentry,
63867+ const struct vfsmount *mnt,
63868+ struct inode *inode, const int mode, const struct filename *to)
63869+{
63870+#ifdef CONFIG_GRKERNSEC_LINK
63871+ const struct cred *cred = current_cred();
63872+
63873+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
63874+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
63875+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63876+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
63877+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
63878+ return -EPERM;
63879+ }
63880+#endif
63881+ return 0;
63882+}
63883diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63884new file mode 100644
63885index 0000000..7c06085
63886--- /dev/null
63887+++ b/grsecurity/grsec_log.c
63888@@ -0,0 +1,326 @@
63889+#include <linux/kernel.h>
63890+#include <linux/sched.h>
63891+#include <linux/file.h>
63892+#include <linux/tty.h>
63893+#include <linux/fs.h>
63894+#include <linux/grinternal.h>
63895+
63896+#ifdef CONFIG_TREE_PREEMPT_RCU
63897+#define DISABLE_PREEMPT() preempt_disable()
63898+#define ENABLE_PREEMPT() preempt_enable()
63899+#else
63900+#define DISABLE_PREEMPT()
63901+#define ENABLE_PREEMPT()
63902+#endif
63903+
63904+#define BEGIN_LOCKS(x) \
63905+ DISABLE_PREEMPT(); \
63906+ rcu_read_lock(); \
63907+ read_lock(&tasklist_lock); \
63908+ read_lock(&grsec_exec_file_lock); \
63909+ if (x != GR_DO_AUDIT) \
63910+ spin_lock(&grsec_alert_lock); \
63911+ else \
63912+ spin_lock(&grsec_audit_lock)
63913+
63914+#define END_LOCKS(x) \
63915+ if (x != GR_DO_AUDIT) \
63916+ spin_unlock(&grsec_alert_lock); \
63917+ else \
63918+ spin_unlock(&grsec_audit_lock); \
63919+ read_unlock(&grsec_exec_file_lock); \
63920+ read_unlock(&tasklist_lock); \
63921+ rcu_read_unlock(); \
63922+ ENABLE_PREEMPT(); \
63923+ if (x == GR_DONT_AUDIT) \
63924+ gr_handle_alertkill(current)
63925+
63926+enum {
63927+ FLOODING,
63928+ NO_FLOODING
63929+};
63930+
63931+extern char *gr_alert_log_fmt;
63932+extern char *gr_audit_log_fmt;
63933+extern char *gr_alert_log_buf;
63934+extern char *gr_audit_log_buf;
63935+
63936+static int gr_log_start(int audit)
63937+{
63938+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
63939+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
63940+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63941+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
63942+ unsigned long curr_secs = get_seconds();
63943+
63944+ if (audit == GR_DO_AUDIT)
63945+ goto set_fmt;
63946+
63947+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
63948+ grsec_alert_wtime = curr_secs;
63949+ grsec_alert_fyet = 0;
63950+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
63951+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
63952+ grsec_alert_fyet++;
63953+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
63954+ grsec_alert_wtime = curr_secs;
63955+ grsec_alert_fyet++;
63956+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
63957+ return FLOODING;
63958+ }
63959+ else return FLOODING;
63960+
63961+set_fmt:
63962+#endif
63963+ memset(buf, 0, PAGE_SIZE);
63964+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
63965+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
63966+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63967+ } else if (current->signal->curr_ip) {
63968+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
63969+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
63970+ } else if (gr_acl_is_enabled()) {
63971+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
63972+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63973+ } else {
63974+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
63975+ strcpy(buf, fmt);
63976+ }
63977+
63978+ return NO_FLOODING;
63979+}
63980+
63981+static void gr_log_middle(int audit, const char *msg, va_list ap)
63982+ __attribute__ ((format (printf, 2, 0)));
63983+
63984+static void gr_log_middle(int audit, const char *msg, va_list ap)
63985+{
63986+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63987+ unsigned int len = strlen(buf);
63988+
63989+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63990+
63991+ return;
63992+}
63993+
63994+static void gr_log_middle_varargs(int audit, const char *msg, ...)
63995+ __attribute__ ((format (printf, 2, 3)));
63996+
63997+static void gr_log_middle_varargs(int audit, const char *msg, ...)
63998+{
63999+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64000+ unsigned int len = strlen(buf);
64001+ va_list ap;
64002+
64003+ va_start(ap, msg);
64004+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
64005+ va_end(ap);
64006+
64007+ return;
64008+}
64009+
64010+static void gr_log_end(int audit, int append_default)
64011+{
64012+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64013+ if (append_default) {
64014+ struct task_struct *task = current;
64015+ struct task_struct *parent = task->real_parent;
64016+ const struct cred *cred = __task_cred(task);
64017+ const struct cred *pcred = __task_cred(parent);
64018+ unsigned int len = strlen(buf);
64019+
64020+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64021+ }
64022+
64023+ printk("%s\n", buf);
64024+
64025+ return;
64026+}
64027+
64028+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
64029+{
64030+ int logtype;
64031+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
64032+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
64033+ void *voidptr = NULL;
64034+ int num1 = 0, num2 = 0;
64035+ unsigned long ulong1 = 0, ulong2 = 0;
64036+ struct dentry *dentry = NULL;
64037+ struct vfsmount *mnt = NULL;
64038+ struct file *file = NULL;
64039+ struct task_struct *task = NULL;
64040+ const struct cred *cred, *pcred;
64041+ va_list ap;
64042+
64043+ BEGIN_LOCKS(audit);
64044+ logtype = gr_log_start(audit);
64045+ if (logtype == FLOODING) {
64046+ END_LOCKS(audit);
64047+ return;
64048+ }
64049+ va_start(ap, argtypes);
64050+ switch (argtypes) {
64051+ case GR_TTYSNIFF:
64052+ task = va_arg(ap, struct task_struct *);
64053+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
64054+ break;
64055+ case GR_SYSCTL_HIDDEN:
64056+ str1 = va_arg(ap, char *);
64057+ gr_log_middle_varargs(audit, msg, result, str1);
64058+ break;
64059+ case GR_RBAC:
64060+ dentry = va_arg(ap, struct dentry *);
64061+ mnt = va_arg(ap, struct vfsmount *);
64062+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
64063+ break;
64064+ case GR_RBAC_STR:
64065+ dentry = va_arg(ap, struct dentry *);
64066+ mnt = va_arg(ap, struct vfsmount *);
64067+ str1 = va_arg(ap, char *);
64068+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
64069+ break;
64070+ case GR_STR_RBAC:
64071+ str1 = va_arg(ap, char *);
64072+ dentry = va_arg(ap, struct dentry *);
64073+ mnt = va_arg(ap, struct vfsmount *);
64074+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
64075+ break;
64076+ case GR_RBAC_MODE2:
64077+ dentry = va_arg(ap, struct dentry *);
64078+ mnt = va_arg(ap, struct vfsmount *);
64079+ str1 = va_arg(ap, char *);
64080+ str2 = va_arg(ap, char *);
64081+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
64082+ break;
64083+ case GR_RBAC_MODE3:
64084+ dentry = va_arg(ap, struct dentry *);
64085+ mnt = va_arg(ap, struct vfsmount *);
64086+ str1 = va_arg(ap, char *);
64087+ str2 = va_arg(ap, char *);
64088+ str3 = va_arg(ap, char *);
64089+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
64090+ break;
64091+ case GR_FILENAME:
64092+ dentry = va_arg(ap, struct dentry *);
64093+ mnt = va_arg(ap, struct vfsmount *);
64094+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
64095+ break;
64096+ case GR_STR_FILENAME:
64097+ str1 = va_arg(ap, char *);
64098+ dentry = va_arg(ap, struct dentry *);
64099+ mnt = va_arg(ap, struct vfsmount *);
64100+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
64101+ break;
64102+ case GR_FILENAME_STR:
64103+ dentry = va_arg(ap, struct dentry *);
64104+ mnt = va_arg(ap, struct vfsmount *);
64105+ str1 = va_arg(ap, char *);
64106+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
64107+ break;
64108+ case GR_FILENAME_TWO_INT:
64109+ dentry = va_arg(ap, struct dentry *);
64110+ mnt = va_arg(ap, struct vfsmount *);
64111+ num1 = va_arg(ap, int);
64112+ num2 = va_arg(ap, int);
64113+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
64114+ break;
64115+ case GR_FILENAME_TWO_INT_STR:
64116+ dentry = va_arg(ap, struct dentry *);
64117+ mnt = va_arg(ap, struct vfsmount *);
64118+ num1 = va_arg(ap, int);
64119+ num2 = va_arg(ap, int);
64120+ str1 = va_arg(ap, char *);
64121+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
64122+ break;
64123+ case GR_TEXTREL:
64124+ file = va_arg(ap, struct file *);
64125+ ulong1 = va_arg(ap, unsigned long);
64126+ ulong2 = va_arg(ap, unsigned long);
64127+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
64128+ break;
64129+ case GR_PTRACE:
64130+ task = va_arg(ap, struct task_struct *);
64131+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
64132+ break;
64133+ case GR_RESOURCE:
64134+ task = va_arg(ap, struct task_struct *);
64135+ cred = __task_cred(task);
64136+ pcred = __task_cred(task->real_parent);
64137+ ulong1 = va_arg(ap, unsigned long);
64138+ str1 = va_arg(ap, char *);
64139+ ulong2 = va_arg(ap, unsigned long);
64140+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64141+ break;
64142+ case GR_CAP:
64143+ task = va_arg(ap, struct task_struct *);
64144+ cred = __task_cred(task);
64145+ pcred = __task_cred(task->real_parent);
64146+ str1 = va_arg(ap, char *);
64147+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64148+ break;
64149+ case GR_SIG:
64150+ str1 = va_arg(ap, char *);
64151+ voidptr = va_arg(ap, void *);
64152+ gr_log_middle_varargs(audit, msg, str1, voidptr);
64153+ break;
64154+ case GR_SIG2:
64155+ task = va_arg(ap, struct task_struct *);
64156+ cred = __task_cred(task);
64157+ pcred = __task_cred(task->real_parent);
64158+ num1 = va_arg(ap, int);
64159+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64160+ break;
64161+ case GR_CRASH1:
64162+ task = va_arg(ap, struct task_struct *);
64163+ cred = __task_cred(task);
64164+ pcred = __task_cred(task->real_parent);
64165+ ulong1 = va_arg(ap, unsigned long);
64166+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
64167+ break;
64168+ case GR_CRASH2:
64169+ task = va_arg(ap, struct task_struct *);
64170+ cred = __task_cred(task);
64171+ pcred = __task_cred(task->real_parent);
64172+ ulong1 = va_arg(ap, unsigned long);
64173+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
64174+ break;
64175+ case GR_RWXMAP:
64176+ file = va_arg(ap, struct file *);
64177+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
64178+ break;
64179+ case GR_PSACCT:
64180+ {
64181+ unsigned int wday, cday;
64182+ __u8 whr, chr;
64183+ __u8 wmin, cmin;
64184+ __u8 wsec, csec;
64185+ char cur_tty[64] = { 0 };
64186+ char parent_tty[64] = { 0 };
64187+
64188+ task = va_arg(ap, struct task_struct *);
64189+ wday = va_arg(ap, unsigned int);
64190+ cday = va_arg(ap, unsigned int);
64191+ whr = va_arg(ap, int);
64192+ chr = va_arg(ap, int);
64193+ wmin = va_arg(ap, int);
64194+ cmin = va_arg(ap, int);
64195+ wsec = va_arg(ap, int);
64196+ csec = va_arg(ap, int);
64197+ ulong1 = va_arg(ap, unsigned long);
64198+ cred = __task_cred(task);
64199+ pcred = __task_cred(task->real_parent);
64200+
64201+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64202+ }
64203+ break;
64204+ default:
64205+ gr_log_middle(audit, msg, ap);
64206+ }
64207+ va_end(ap);
64208+ // these don't need DEFAULTSECARGS printed on the end
64209+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
64210+ gr_log_end(audit, 0);
64211+ else
64212+ gr_log_end(audit, 1);
64213+ END_LOCKS(audit);
64214+}
64215diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
64216new file mode 100644
64217index 0000000..f536303
64218--- /dev/null
64219+++ b/grsecurity/grsec_mem.c
64220@@ -0,0 +1,40 @@
64221+#include <linux/kernel.h>
64222+#include <linux/sched.h>
64223+#include <linux/mm.h>
64224+#include <linux/mman.h>
64225+#include <linux/grinternal.h>
64226+
64227+void
64228+gr_handle_ioperm(void)
64229+{
64230+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
64231+ return;
64232+}
64233+
64234+void
64235+gr_handle_iopl(void)
64236+{
64237+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
64238+ return;
64239+}
64240+
64241+void
64242+gr_handle_mem_readwrite(u64 from, u64 to)
64243+{
64244+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
64245+ return;
64246+}
64247+
64248+void
64249+gr_handle_vm86(void)
64250+{
64251+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
64252+ return;
64253+}
64254+
64255+void
64256+gr_log_badprocpid(const char *entry)
64257+{
64258+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
64259+ return;
64260+}
64261diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
64262new file mode 100644
64263index 0000000..2131422
64264--- /dev/null
64265+++ b/grsecurity/grsec_mount.c
64266@@ -0,0 +1,62 @@
64267+#include <linux/kernel.h>
64268+#include <linux/sched.h>
64269+#include <linux/mount.h>
64270+#include <linux/grsecurity.h>
64271+#include <linux/grinternal.h>
64272+
64273+void
64274+gr_log_remount(const char *devname, const int retval)
64275+{
64276+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64277+ if (grsec_enable_mount && (retval >= 0))
64278+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
64279+#endif
64280+ return;
64281+}
64282+
64283+void
64284+gr_log_unmount(const char *devname, const int retval)
64285+{
64286+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64287+ if (grsec_enable_mount && (retval >= 0))
64288+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
64289+#endif
64290+ return;
64291+}
64292+
64293+void
64294+gr_log_mount(const char *from, const char *to, const int retval)
64295+{
64296+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64297+ if (grsec_enable_mount && (retval >= 0))
64298+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
64299+#endif
64300+ return;
64301+}
64302+
64303+int
64304+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
64305+{
64306+#ifdef CONFIG_GRKERNSEC_ROFS
64307+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
64308+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
64309+ return -EPERM;
64310+ } else
64311+ return 0;
64312+#endif
64313+ return 0;
64314+}
64315+
64316+int
64317+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
64318+{
64319+#ifdef CONFIG_GRKERNSEC_ROFS
64320+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
64321+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
64322+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
64323+ return -EPERM;
64324+ } else
64325+ return 0;
64326+#endif
64327+ return 0;
64328+}
64329diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
64330new file mode 100644
64331index 0000000..a3b12a0
64332--- /dev/null
64333+++ b/grsecurity/grsec_pax.c
64334@@ -0,0 +1,36 @@
64335+#include <linux/kernel.h>
64336+#include <linux/sched.h>
64337+#include <linux/mm.h>
64338+#include <linux/file.h>
64339+#include <linux/grinternal.h>
64340+#include <linux/grsecurity.h>
64341+
64342+void
64343+gr_log_textrel(struct vm_area_struct * vma)
64344+{
64345+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64346+ if (grsec_enable_audit_textrel)
64347+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
64348+#endif
64349+ return;
64350+}
64351+
64352+void
64353+gr_log_rwxmmap(struct file *file)
64354+{
64355+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64356+ if (grsec_enable_log_rwxmaps)
64357+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
64358+#endif
64359+ return;
64360+}
64361+
64362+void
64363+gr_log_rwxmprotect(struct file *file)
64364+{
64365+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64366+ if (grsec_enable_log_rwxmaps)
64367+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
64368+#endif
64369+ return;
64370+}
64371diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
64372new file mode 100644
64373index 0000000..f7f29aa
64374--- /dev/null
64375+++ b/grsecurity/grsec_ptrace.c
64376@@ -0,0 +1,30 @@
64377+#include <linux/kernel.h>
64378+#include <linux/sched.h>
64379+#include <linux/grinternal.h>
64380+#include <linux/security.h>
64381+
64382+void
64383+gr_audit_ptrace(struct task_struct *task)
64384+{
64385+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64386+ if (grsec_enable_audit_ptrace)
64387+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
64388+#endif
64389+ return;
64390+}
64391+
64392+int
64393+gr_ptrace_readexec(struct file *file, int unsafe_flags)
64394+{
64395+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64396+ const struct dentry *dentry = file->f_path.dentry;
64397+ const struct vfsmount *mnt = file->f_path.mnt;
64398+
64399+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
64400+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
64401+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
64402+ return -EACCES;
64403+ }
64404+#endif
64405+ return 0;
64406+}
64407diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
64408new file mode 100644
64409index 0000000..e09715a
64410--- /dev/null
64411+++ b/grsecurity/grsec_sig.c
64412@@ -0,0 +1,222 @@
64413+#include <linux/kernel.h>
64414+#include <linux/sched.h>
64415+#include <linux/delay.h>
64416+#include <linux/grsecurity.h>
64417+#include <linux/grinternal.h>
64418+#include <linux/hardirq.h>
64419+
64420+char *signames[] = {
64421+ [SIGSEGV] = "Segmentation fault",
64422+ [SIGILL] = "Illegal instruction",
64423+ [SIGABRT] = "Abort",
64424+ [SIGBUS] = "Invalid alignment/Bus error"
64425+};
64426+
64427+void
64428+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
64429+{
64430+#ifdef CONFIG_GRKERNSEC_SIGNAL
64431+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
64432+ (sig == SIGABRT) || (sig == SIGBUS))) {
64433+ if (task_pid_nr(t) == task_pid_nr(current)) {
64434+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
64435+ } else {
64436+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
64437+ }
64438+ }
64439+#endif
64440+ return;
64441+}
64442+
64443+int
64444+gr_handle_signal(const struct task_struct *p, const int sig)
64445+{
64446+#ifdef CONFIG_GRKERNSEC
64447+ /* ignore the 0 signal for protected task checks */
64448+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
64449+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
64450+ return -EPERM;
64451+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
64452+ return -EPERM;
64453+ }
64454+#endif
64455+ return 0;
64456+}
64457+
64458+#ifdef CONFIG_GRKERNSEC
64459+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
64460+
64461+int gr_fake_force_sig(int sig, struct task_struct *t)
64462+{
64463+ unsigned long int flags;
64464+ int ret, blocked, ignored;
64465+ struct k_sigaction *action;
64466+
64467+ spin_lock_irqsave(&t->sighand->siglock, flags);
64468+ action = &t->sighand->action[sig-1];
64469+ ignored = action->sa.sa_handler == SIG_IGN;
64470+ blocked = sigismember(&t->blocked, sig);
64471+ if (blocked || ignored) {
64472+ action->sa.sa_handler = SIG_DFL;
64473+ if (blocked) {
64474+ sigdelset(&t->blocked, sig);
64475+ recalc_sigpending_and_wake(t);
64476+ }
64477+ }
64478+ if (action->sa.sa_handler == SIG_DFL)
64479+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
64480+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
64481+
64482+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
64483+
64484+ return ret;
64485+}
64486+#endif
64487+
64488+#ifdef CONFIG_GRKERNSEC_BRUTE
64489+#define GR_USER_BAN_TIME (15 * 60)
64490+#define GR_DAEMON_BRUTE_TIME (30 * 60)
64491+
64492+static int __get_dumpable(unsigned long mm_flags)
64493+{
64494+ int ret;
64495+
64496+ ret = mm_flags & MMF_DUMPABLE_MASK;
64497+ return (ret >= 2) ? 2 : ret;
64498+}
64499+#endif
64500+
64501+void gr_handle_brute_attach(unsigned long mm_flags)
64502+{
64503+#ifdef CONFIG_GRKERNSEC_BRUTE
64504+ struct task_struct *p = current;
64505+ kuid_t uid = GLOBAL_ROOT_UID;
64506+ int daemon = 0;
64507+
64508+ if (!grsec_enable_brute)
64509+ return;
64510+
64511+ rcu_read_lock();
64512+ read_lock(&tasklist_lock);
64513+ read_lock(&grsec_exec_file_lock);
64514+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
64515+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
64516+ p->real_parent->brute = 1;
64517+ daemon = 1;
64518+ } else {
64519+ const struct cred *cred = __task_cred(p), *cred2;
64520+ struct task_struct *tsk, *tsk2;
64521+
64522+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
64523+ struct user_struct *user;
64524+
64525+ uid = cred->uid;
64526+
64527+ /* this is put upon execution past expiration */
64528+ user = find_user(uid);
64529+ if (user == NULL)
64530+ goto unlock;
64531+ user->banned = 1;
64532+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
64533+ if (user->ban_expires == ~0UL)
64534+ user->ban_expires--;
64535+
64536+ do_each_thread(tsk2, tsk) {
64537+ cred2 = __task_cred(tsk);
64538+ if (tsk != p && uid_eq(cred2->uid, uid))
64539+ gr_fake_force_sig(SIGKILL, tsk);
64540+ } while_each_thread(tsk2, tsk);
64541+ }
64542+ }
64543+unlock:
64544+ read_unlock(&grsec_exec_file_lock);
64545+ read_unlock(&tasklist_lock);
64546+ rcu_read_unlock();
64547+
64548+ if (gr_is_global_nonroot(uid))
64549+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
64550+ GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
64551+ else if (daemon)
64552+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
64553+
64554+#endif
64555+ return;
64556+}
64557+
64558+void gr_handle_brute_check(void)
64559+{
64560+#ifdef CONFIG_GRKERNSEC_BRUTE
64561+ struct task_struct *p = current;
64562+
64563+ if (unlikely(p->brute)) {
64564+ if (!grsec_enable_brute)
64565+ p->brute = 0;
64566+ else if (time_before(get_seconds(), p->brute_expires))
64567+ msleep(30 * 1000);
64568+ }
64569+#endif
64570+ return;
64571+}
64572+
64573+void gr_handle_kernel_exploit(void)
64574+{
64575+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
64576+ const struct cred *cred;
64577+ struct task_struct *tsk, *tsk2;
64578+ struct user_struct *user;
64579+ kuid_t uid;
64580+
64581+ if (in_irq() || in_serving_softirq() || in_nmi())
64582+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
64583+
64584+ uid = current_uid();
64585+
64586+ if (gr_is_global_root(uid))
64587+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
64588+ else {
64589+ /* kill all the processes of this user, hold a reference
64590+ to their creds struct, and prevent them from creating
64591+ another process until system reset
64592+ */
64593+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
64594+ GR_GLOBAL_UID(uid));
64595+ /* we intentionally leak this ref */
64596+ user = get_uid(current->cred->user);
64597+ if (user) {
64598+ user->banned = 1;
64599+ user->ban_expires = ~0UL;
64600+ }
64601+
64602+ read_lock(&tasklist_lock);
64603+ do_each_thread(tsk2, tsk) {
64604+ cred = __task_cred(tsk);
64605+ if (uid_eq(cred->uid, uid))
64606+ gr_fake_force_sig(SIGKILL, tsk);
64607+ } while_each_thread(tsk2, tsk);
64608+ read_unlock(&tasklist_lock);
64609+ }
64610+#endif
64611+}
64612+
64613+int __gr_process_user_ban(struct user_struct *user)
64614+{
64615+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64616+ if (unlikely(user->banned)) {
64617+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
64618+ user->banned = 0;
64619+ user->ban_expires = 0;
64620+ free_uid(user);
64621+ } else
64622+ return -EPERM;
64623+ }
64624+#endif
64625+ return 0;
64626+}
64627+
64628+int gr_process_user_ban(void)
64629+{
64630+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64631+ return __gr_process_user_ban(current->cred->user);
64632+#endif
64633+ return 0;
64634+}
64635diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
64636new file mode 100644
64637index 0000000..4030d57
64638--- /dev/null
64639+++ b/grsecurity/grsec_sock.c
64640@@ -0,0 +1,244 @@
64641+#include <linux/kernel.h>
64642+#include <linux/module.h>
64643+#include <linux/sched.h>
64644+#include <linux/file.h>
64645+#include <linux/net.h>
64646+#include <linux/in.h>
64647+#include <linux/ip.h>
64648+#include <net/sock.h>
64649+#include <net/inet_sock.h>
64650+#include <linux/grsecurity.h>
64651+#include <linux/grinternal.h>
64652+#include <linux/gracl.h>
64653+
64654+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
64655+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
64656+
64657+EXPORT_SYMBOL(gr_search_udp_recvmsg);
64658+EXPORT_SYMBOL(gr_search_udp_sendmsg);
64659+
64660+#ifdef CONFIG_UNIX_MODULE
64661+EXPORT_SYMBOL(gr_acl_handle_unix);
64662+EXPORT_SYMBOL(gr_acl_handle_mknod);
64663+EXPORT_SYMBOL(gr_handle_chroot_unix);
64664+EXPORT_SYMBOL(gr_handle_create);
64665+#endif
64666+
64667+#ifdef CONFIG_GRKERNSEC
64668+#define gr_conn_table_size 32749
64669+struct conn_table_entry {
64670+ struct conn_table_entry *next;
64671+ struct signal_struct *sig;
64672+};
64673+
64674+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
64675+DEFINE_SPINLOCK(gr_conn_table_lock);
64676+
64677+extern const char * gr_socktype_to_name(unsigned char type);
64678+extern const char * gr_proto_to_name(unsigned char proto);
64679+extern const char * gr_sockfamily_to_name(unsigned char family);
64680+
64681+static __inline__ int
64682+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
64683+{
64684+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
64685+}
64686+
64687+static __inline__ int
64688+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
64689+ __u16 sport, __u16 dport)
64690+{
64691+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
64692+ sig->gr_sport == sport && sig->gr_dport == dport))
64693+ return 1;
64694+ else
64695+ return 0;
64696+}
64697+
64698+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
64699+{
64700+ struct conn_table_entry **match;
64701+ unsigned int index;
64702+
64703+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64704+ sig->gr_sport, sig->gr_dport,
64705+ gr_conn_table_size);
64706+
64707+ newent->sig = sig;
64708+
64709+ match = &gr_conn_table[index];
64710+ newent->next = *match;
64711+ *match = newent;
64712+
64713+ return;
64714+}
64715+
64716+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
64717+{
64718+ struct conn_table_entry *match, *last = NULL;
64719+ unsigned int index;
64720+
64721+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64722+ sig->gr_sport, sig->gr_dport,
64723+ gr_conn_table_size);
64724+
64725+ match = gr_conn_table[index];
64726+ while (match && !conn_match(match->sig,
64727+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
64728+ sig->gr_dport)) {
64729+ last = match;
64730+ match = match->next;
64731+ }
64732+
64733+ if (match) {
64734+ if (last)
64735+ last->next = match->next;
64736+ else
64737+ gr_conn_table[index] = NULL;
64738+ kfree(match);
64739+ }
64740+
64741+ return;
64742+}
64743+
64744+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
64745+ __u16 sport, __u16 dport)
64746+{
64747+ struct conn_table_entry *match;
64748+ unsigned int index;
64749+
64750+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
64751+
64752+ match = gr_conn_table[index];
64753+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
64754+ match = match->next;
64755+
64756+ if (match)
64757+ return match->sig;
64758+ else
64759+ return NULL;
64760+}
64761+
64762+#endif
64763+
64764+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
64765+{
64766+#ifdef CONFIG_GRKERNSEC
64767+ struct signal_struct *sig = task->signal;
64768+ struct conn_table_entry *newent;
64769+
64770+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
64771+ if (newent == NULL)
64772+ return;
64773+ /* no bh lock needed since we are called with bh disabled */
64774+ spin_lock(&gr_conn_table_lock);
64775+ gr_del_task_from_ip_table_nolock(sig);
64776+ sig->gr_saddr = inet->inet_rcv_saddr;
64777+ sig->gr_daddr = inet->inet_daddr;
64778+ sig->gr_sport = inet->inet_sport;
64779+ sig->gr_dport = inet->inet_dport;
64780+ gr_add_to_task_ip_table_nolock(sig, newent);
64781+ spin_unlock(&gr_conn_table_lock);
64782+#endif
64783+ return;
64784+}
64785+
64786+void gr_del_task_from_ip_table(struct task_struct *task)
64787+{
64788+#ifdef CONFIG_GRKERNSEC
64789+ spin_lock_bh(&gr_conn_table_lock);
64790+ gr_del_task_from_ip_table_nolock(task->signal);
64791+ spin_unlock_bh(&gr_conn_table_lock);
64792+#endif
64793+ return;
64794+}
64795+
64796+void
64797+gr_attach_curr_ip(const struct sock *sk)
64798+{
64799+#ifdef CONFIG_GRKERNSEC
64800+ struct signal_struct *p, *set;
64801+ const struct inet_sock *inet = inet_sk(sk);
64802+
64803+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64804+ return;
64805+
64806+ set = current->signal;
64807+
64808+ spin_lock_bh(&gr_conn_table_lock);
64809+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
64810+ inet->inet_dport, inet->inet_sport);
64811+ if (unlikely(p != NULL)) {
64812+ set->curr_ip = p->curr_ip;
64813+ set->used_accept = 1;
64814+ gr_del_task_from_ip_table_nolock(p);
64815+ spin_unlock_bh(&gr_conn_table_lock);
64816+ return;
64817+ }
64818+ spin_unlock_bh(&gr_conn_table_lock);
64819+
64820+ set->curr_ip = inet->inet_daddr;
64821+ set->used_accept = 1;
64822+#endif
64823+ return;
64824+}
64825+
64826+int
64827+gr_handle_sock_all(const int family, const int type, const int protocol)
64828+{
64829+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64830+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64831+ (family != AF_UNIX)) {
64832+ if (family == AF_INET)
64833+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64834+ else
64835+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64836+ return -EACCES;
64837+ }
64838+#endif
64839+ return 0;
64840+}
64841+
64842+int
64843+gr_handle_sock_server(const struct sockaddr *sck)
64844+{
64845+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64846+ if (grsec_enable_socket_server &&
64847+ in_group_p(grsec_socket_server_gid) &&
64848+ sck && (sck->sa_family != AF_UNIX) &&
64849+ (sck->sa_family != AF_LOCAL)) {
64850+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64851+ return -EACCES;
64852+ }
64853+#endif
64854+ return 0;
64855+}
64856+
64857+int
64858+gr_handle_sock_server_other(const struct sock *sck)
64859+{
64860+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64861+ if (grsec_enable_socket_server &&
64862+ in_group_p(grsec_socket_server_gid) &&
64863+ sck && (sck->sk_family != AF_UNIX) &&
64864+ (sck->sk_family != AF_LOCAL)) {
64865+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64866+ return -EACCES;
64867+ }
64868+#endif
64869+ return 0;
64870+}
64871+
64872+int
64873+gr_handle_sock_client(const struct sockaddr *sck)
64874+{
64875+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64876+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64877+ sck && (sck->sa_family != AF_UNIX) &&
64878+ (sck->sa_family != AF_LOCAL)) {
64879+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64880+ return -EACCES;
64881+ }
64882+#endif
64883+ return 0;
64884+}
64885diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64886new file mode 100644
64887index 0000000..f55ef0f
64888--- /dev/null
64889+++ b/grsecurity/grsec_sysctl.c
64890@@ -0,0 +1,469 @@
64891+#include <linux/kernel.h>
64892+#include <linux/sched.h>
64893+#include <linux/sysctl.h>
64894+#include <linux/grsecurity.h>
64895+#include <linux/grinternal.h>
64896+
64897+int
64898+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64899+{
64900+#ifdef CONFIG_GRKERNSEC_SYSCTL
64901+ if (dirname == NULL || name == NULL)
64902+ return 0;
64903+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64904+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64905+ return -EACCES;
64906+ }
64907+#endif
64908+ return 0;
64909+}
64910+
64911+#ifdef CONFIG_GRKERNSEC_ROFS
64912+static int __maybe_unused one = 1;
64913+#endif
64914+
64915+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64916+struct ctl_table grsecurity_table[] = {
64917+#ifdef CONFIG_GRKERNSEC_SYSCTL
64918+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
64919+#ifdef CONFIG_GRKERNSEC_IO
64920+ {
64921+ .procname = "disable_priv_io",
64922+ .data = &grsec_disable_privio,
64923+ .maxlen = sizeof(int),
64924+ .mode = 0600,
64925+ .proc_handler = &proc_dointvec,
64926+ },
64927+#endif
64928+#endif
64929+#ifdef CONFIG_GRKERNSEC_LINK
64930+ {
64931+ .procname = "linking_restrictions",
64932+ .data = &grsec_enable_link,
64933+ .maxlen = sizeof(int),
64934+ .mode = 0600,
64935+ .proc_handler = &proc_dointvec,
64936+ },
64937+#endif
64938+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
64939+ {
64940+ .procname = "enforce_symlinksifowner",
64941+ .data = &grsec_enable_symlinkown,
64942+ .maxlen = sizeof(int),
64943+ .mode = 0600,
64944+ .proc_handler = &proc_dointvec,
64945+ },
64946+ {
64947+ .procname = "symlinkown_gid",
64948+ .data = &grsec_symlinkown_gid,
64949+ .maxlen = sizeof(int),
64950+ .mode = 0600,
64951+ .proc_handler = &proc_dointvec,
64952+ },
64953+#endif
64954+#ifdef CONFIG_GRKERNSEC_BRUTE
64955+ {
64956+ .procname = "deter_bruteforce",
64957+ .data = &grsec_enable_brute,
64958+ .maxlen = sizeof(int),
64959+ .mode = 0600,
64960+ .proc_handler = &proc_dointvec,
64961+ },
64962+#endif
64963+#ifdef CONFIG_GRKERNSEC_FIFO
64964+ {
64965+ .procname = "fifo_restrictions",
64966+ .data = &grsec_enable_fifo,
64967+ .maxlen = sizeof(int),
64968+ .mode = 0600,
64969+ .proc_handler = &proc_dointvec,
64970+ },
64971+#endif
64972+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64973+ {
64974+ .procname = "ptrace_readexec",
64975+ .data = &grsec_enable_ptrace_readexec,
64976+ .maxlen = sizeof(int),
64977+ .mode = 0600,
64978+ .proc_handler = &proc_dointvec,
64979+ },
64980+#endif
64981+#ifdef CONFIG_GRKERNSEC_SETXID
64982+ {
64983+ .procname = "consistent_setxid",
64984+ .data = &grsec_enable_setxid,
64985+ .maxlen = sizeof(int),
64986+ .mode = 0600,
64987+ .proc_handler = &proc_dointvec,
64988+ },
64989+#endif
64990+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64991+ {
64992+ .procname = "ip_blackhole",
64993+ .data = &grsec_enable_blackhole,
64994+ .maxlen = sizeof(int),
64995+ .mode = 0600,
64996+ .proc_handler = &proc_dointvec,
64997+ },
64998+ {
64999+ .procname = "lastack_retries",
65000+ .data = &grsec_lastack_retries,
65001+ .maxlen = sizeof(int),
65002+ .mode = 0600,
65003+ .proc_handler = &proc_dointvec,
65004+ },
65005+#endif
65006+#ifdef CONFIG_GRKERNSEC_EXECLOG
65007+ {
65008+ .procname = "exec_logging",
65009+ .data = &grsec_enable_execlog,
65010+ .maxlen = sizeof(int),
65011+ .mode = 0600,
65012+ .proc_handler = &proc_dointvec,
65013+ },
65014+#endif
65015+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65016+ {
65017+ .procname = "rwxmap_logging",
65018+ .data = &grsec_enable_log_rwxmaps,
65019+ .maxlen = sizeof(int),
65020+ .mode = 0600,
65021+ .proc_handler = &proc_dointvec,
65022+ },
65023+#endif
65024+#ifdef CONFIG_GRKERNSEC_SIGNAL
65025+ {
65026+ .procname = "signal_logging",
65027+ .data = &grsec_enable_signal,
65028+ .maxlen = sizeof(int),
65029+ .mode = 0600,
65030+ .proc_handler = &proc_dointvec,
65031+ },
65032+#endif
65033+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65034+ {
65035+ .procname = "forkfail_logging",
65036+ .data = &grsec_enable_forkfail,
65037+ .maxlen = sizeof(int),
65038+ .mode = 0600,
65039+ .proc_handler = &proc_dointvec,
65040+ },
65041+#endif
65042+#ifdef CONFIG_GRKERNSEC_TIME
65043+ {
65044+ .procname = "timechange_logging",
65045+ .data = &grsec_enable_time,
65046+ .maxlen = sizeof(int),
65047+ .mode = 0600,
65048+ .proc_handler = &proc_dointvec,
65049+ },
65050+#endif
65051+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
65052+ {
65053+ .procname = "chroot_deny_shmat",
65054+ .data = &grsec_enable_chroot_shmat,
65055+ .maxlen = sizeof(int),
65056+ .mode = 0600,
65057+ .proc_handler = &proc_dointvec,
65058+ },
65059+#endif
65060+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65061+ {
65062+ .procname = "chroot_deny_unix",
65063+ .data = &grsec_enable_chroot_unix,
65064+ .maxlen = sizeof(int),
65065+ .mode = 0600,
65066+ .proc_handler = &proc_dointvec,
65067+ },
65068+#endif
65069+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65070+ {
65071+ .procname = "chroot_deny_mount",
65072+ .data = &grsec_enable_chroot_mount,
65073+ .maxlen = sizeof(int),
65074+ .mode = 0600,
65075+ .proc_handler = &proc_dointvec,
65076+ },
65077+#endif
65078+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65079+ {
65080+ .procname = "chroot_deny_fchdir",
65081+ .data = &grsec_enable_chroot_fchdir,
65082+ .maxlen = sizeof(int),
65083+ .mode = 0600,
65084+ .proc_handler = &proc_dointvec,
65085+ },
65086+#endif
65087+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65088+ {
65089+ .procname = "chroot_deny_chroot",
65090+ .data = &grsec_enable_chroot_double,
65091+ .maxlen = sizeof(int),
65092+ .mode = 0600,
65093+ .proc_handler = &proc_dointvec,
65094+ },
65095+#endif
65096+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65097+ {
65098+ .procname = "chroot_deny_pivot",
65099+ .data = &grsec_enable_chroot_pivot,
65100+ .maxlen = sizeof(int),
65101+ .mode = 0600,
65102+ .proc_handler = &proc_dointvec,
65103+ },
65104+#endif
65105+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65106+ {
65107+ .procname = "chroot_enforce_chdir",
65108+ .data = &grsec_enable_chroot_chdir,
65109+ .maxlen = sizeof(int),
65110+ .mode = 0600,
65111+ .proc_handler = &proc_dointvec,
65112+ },
65113+#endif
65114+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65115+ {
65116+ .procname = "chroot_deny_chmod",
65117+ .data = &grsec_enable_chroot_chmod,
65118+ .maxlen = sizeof(int),
65119+ .mode = 0600,
65120+ .proc_handler = &proc_dointvec,
65121+ },
65122+#endif
65123+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65124+ {
65125+ .procname = "chroot_deny_mknod",
65126+ .data = &grsec_enable_chroot_mknod,
65127+ .maxlen = sizeof(int),
65128+ .mode = 0600,
65129+ .proc_handler = &proc_dointvec,
65130+ },
65131+#endif
65132+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65133+ {
65134+ .procname = "chroot_restrict_nice",
65135+ .data = &grsec_enable_chroot_nice,
65136+ .maxlen = sizeof(int),
65137+ .mode = 0600,
65138+ .proc_handler = &proc_dointvec,
65139+ },
65140+#endif
65141+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65142+ {
65143+ .procname = "chroot_execlog",
65144+ .data = &grsec_enable_chroot_execlog,
65145+ .maxlen = sizeof(int),
65146+ .mode = 0600,
65147+ .proc_handler = &proc_dointvec,
65148+ },
65149+#endif
65150+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65151+ {
65152+ .procname = "chroot_caps",
65153+ .data = &grsec_enable_chroot_caps,
65154+ .maxlen = sizeof(int),
65155+ .mode = 0600,
65156+ .proc_handler = &proc_dointvec,
65157+ },
65158+#endif
65159+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65160+ {
65161+ .procname = "chroot_deny_sysctl",
65162+ .data = &grsec_enable_chroot_sysctl,
65163+ .maxlen = sizeof(int),
65164+ .mode = 0600,
65165+ .proc_handler = &proc_dointvec,
65166+ },
65167+#endif
65168+#ifdef CONFIG_GRKERNSEC_TPE
65169+ {
65170+ .procname = "tpe",
65171+ .data = &grsec_enable_tpe,
65172+ .maxlen = sizeof(int),
65173+ .mode = 0600,
65174+ .proc_handler = &proc_dointvec,
65175+ },
65176+ {
65177+ .procname = "tpe_gid",
65178+ .data = &grsec_tpe_gid,
65179+ .maxlen = sizeof(int),
65180+ .mode = 0600,
65181+ .proc_handler = &proc_dointvec,
65182+ },
65183+#endif
65184+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65185+ {
65186+ .procname = "tpe_invert",
65187+ .data = &grsec_enable_tpe_invert,
65188+ .maxlen = sizeof(int),
65189+ .mode = 0600,
65190+ .proc_handler = &proc_dointvec,
65191+ },
65192+#endif
65193+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65194+ {
65195+ .procname = "tpe_restrict_all",
65196+ .data = &grsec_enable_tpe_all,
65197+ .maxlen = sizeof(int),
65198+ .mode = 0600,
65199+ .proc_handler = &proc_dointvec,
65200+ },
65201+#endif
65202+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65203+ {
65204+ .procname = "socket_all",
65205+ .data = &grsec_enable_socket_all,
65206+ .maxlen = sizeof(int),
65207+ .mode = 0600,
65208+ .proc_handler = &proc_dointvec,
65209+ },
65210+ {
65211+ .procname = "socket_all_gid",
65212+ .data = &grsec_socket_all_gid,
65213+ .maxlen = sizeof(int),
65214+ .mode = 0600,
65215+ .proc_handler = &proc_dointvec,
65216+ },
65217+#endif
65218+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65219+ {
65220+ .procname = "socket_client",
65221+ .data = &grsec_enable_socket_client,
65222+ .maxlen = sizeof(int),
65223+ .mode = 0600,
65224+ .proc_handler = &proc_dointvec,
65225+ },
65226+ {
65227+ .procname = "socket_client_gid",
65228+ .data = &grsec_socket_client_gid,
65229+ .maxlen = sizeof(int),
65230+ .mode = 0600,
65231+ .proc_handler = &proc_dointvec,
65232+ },
65233+#endif
65234+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65235+ {
65236+ .procname = "socket_server",
65237+ .data = &grsec_enable_socket_server,
65238+ .maxlen = sizeof(int),
65239+ .mode = 0600,
65240+ .proc_handler = &proc_dointvec,
65241+ },
65242+ {
65243+ .procname = "socket_server_gid",
65244+ .data = &grsec_socket_server_gid,
65245+ .maxlen = sizeof(int),
65246+ .mode = 0600,
65247+ .proc_handler = &proc_dointvec,
65248+ },
65249+#endif
65250+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65251+ {
65252+ .procname = "audit_group",
65253+ .data = &grsec_enable_group,
65254+ .maxlen = sizeof(int),
65255+ .mode = 0600,
65256+ .proc_handler = &proc_dointvec,
65257+ },
65258+ {
65259+ .procname = "audit_gid",
65260+ .data = &grsec_audit_gid,
65261+ .maxlen = sizeof(int),
65262+ .mode = 0600,
65263+ .proc_handler = &proc_dointvec,
65264+ },
65265+#endif
65266+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65267+ {
65268+ .procname = "audit_chdir",
65269+ .data = &grsec_enable_chdir,
65270+ .maxlen = sizeof(int),
65271+ .mode = 0600,
65272+ .proc_handler = &proc_dointvec,
65273+ },
65274+#endif
65275+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65276+ {
65277+ .procname = "audit_mount",
65278+ .data = &grsec_enable_mount,
65279+ .maxlen = sizeof(int),
65280+ .mode = 0600,
65281+ .proc_handler = &proc_dointvec,
65282+ },
65283+#endif
65284+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65285+ {
65286+ .procname = "audit_textrel",
65287+ .data = &grsec_enable_audit_textrel,
65288+ .maxlen = sizeof(int),
65289+ .mode = 0600,
65290+ .proc_handler = &proc_dointvec,
65291+ },
65292+#endif
65293+#ifdef CONFIG_GRKERNSEC_DMESG
65294+ {
65295+ .procname = "dmesg",
65296+ .data = &grsec_enable_dmesg,
65297+ .maxlen = sizeof(int),
65298+ .mode = 0600,
65299+ .proc_handler = &proc_dointvec,
65300+ },
65301+#endif
65302+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65303+ {
65304+ .procname = "chroot_findtask",
65305+ .data = &grsec_enable_chroot_findtask,
65306+ .maxlen = sizeof(int),
65307+ .mode = 0600,
65308+ .proc_handler = &proc_dointvec,
65309+ },
65310+#endif
65311+#ifdef CONFIG_GRKERNSEC_RESLOG
65312+ {
65313+ .procname = "resource_logging",
65314+ .data = &grsec_resource_logging,
65315+ .maxlen = sizeof(int),
65316+ .mode = 0600,
65317+ .proc_handler = &proc_dointvec,
65318+ },
65319+#endif
65320+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65321+ {
65322+ .procname = "audit_ptrace",
65323+ .data = &grsec_enable_audit_ptrace,
65324+ .maxlen = sizeof(int),
65325+ .mode = 0600,
65326+ .proc_handler = &proc_dointvec,
65327+ },
65328+#endif
65329+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65330+ {
65331+ .procname = "harden_ptrace",
65332+ .data = &grsec_enable_harden_ptrace,
65333+ .maxlen = sizeof(int),
65334+ .mode = 0600,
65335+ .proc_handler = &proc_dointvec,
65336+ },
65337+#endif
65338+ {
65339+ .procname = "grsec_lock",
65340+ .data = &grsec_lock,
65341+ .maxlen = sizeof(int),
65342+ .mode = 0600,
65343+ .proc_handler = &proc_dointvec,
65344+ },
65345+#endif
65346+#ifdef CONFIG_GRKERNSEC_ROFS
65347+ {
65348+ .procname = "romount_protect",
65349+ .data = &grsec_enable_rofs,
65350+ .maxlen = sizeof(int),
65351+ .mode = 0600,
65352+ .proc_handler = &proc_dointvec_minmax,
65353+ .extra1 = &one,
65354+ .extra2 = &one,
65355+ },
65356+#endif
65357+ { }
65358+};
65359+#endif
65360diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
65361new file mode 100644
65362index 0000000..0dc13c3
65363--- /dev/null
65364+++ b/grsecurity/grsec_time.c
65365@@ -0,0 +1,16 @@
65366+#include <linux/kernel.h>
65367+#include <linux/sched.h>
65368+#include <linux/grinternal.h>
65369+#include <linux/module.h>
65370+
65371+void
65372+gr_log_timechange(void)
65373+{
65374+#ifdef CONFIG_GRKERNSEC_TIME
65375+ if (grsec_enable_time)
65376+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
65377+#endif
65378+ return;
65379+}
65380+
65381+EXPORT_SYMBOL(gr_log_timechange);
65382diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
65383new file mode 100644
65384index 0000000..ee57dcf
65385--- /dev/null
65386+++ b/grsecurity/grsec_tpe.c
65387@@ -0,0 +1,73 @@
65388+#include <linux/kernel.h>
65389+#include <linux/sched.h>
65390+#include <linux/file.h>
65391+#include <linux/fs.h>
65392+#include <linux/grinternal.h>
65393+
65394+extern int gr_acl_tpe_check(void);
65395+
65396+int
65397+gr_tpe_allow(const struct file *file)
65398+{
65399+#ifdef CONFIG_GRKERNSEC
65400+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
65401+ const struct cred *cred = current_cred();
65402+ char *msg = NULL;
65403+ char *msg2 = NULL;
65404+
65405+ // never restrict root
65406+ if (gr_is_global_root(cred->uid))
65407+ return 1;
65408+
65409+ if (grsec_enable_tpe) {
65410+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65411+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
65412+ msg = "not being in trusted group";
65413+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
65414+ msg = "being in untrusted group";
65415+#else
65416+ if (in_group_p(grsec_tpe_gid))
65417+ msg = "being in untrusted group";
65418+#endif
65419+ }
65420+ if (!msg && gr_acl_tpe_check())
65421+ msg = "being in untrusted role";
65422+
65423+ // not in any affected group/role
65424+ if (!msg)
65425+ goto next_check;
65426+
65427+ if (gr_is_global_nonroot(inode->i_uid))
65428+ msg2 = "file in non-root-owned directory";
65429+ else if (inode->i_mode & S_IWOTH)
65430+ msg2 = "file in world-writable directory";
65431+ else if (inode->i_mode & S_IWGRP)
65432+ msg2 = "file in group-writable directory";
65433+
65434+ if (msg && msg2) {
65435+ char fullmsg[70] = {0};
65436+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
65437+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
65438+ return 0;
65439+ }
65440+ msg = NULL;
65441+next_check:
65442+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65443+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
65444+ return 1;
65445+
65446+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
65447+ msg = "directory not owned by user";
65448+ else if (inode->i_mode & S_IWOTH)
65449+ msg = "file in world-writable directory";
65450+ else if (inode->i_mode & S_IWGRP)
65451+ msg = "file in group-writable directory";
65452+
65453+ if (msg) {
65454+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
65455+ return 0;
65456+ }
65457+#endif
65458+#endif
65459+ return 1;
65460+}
65461diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
65462new file mode 100644
65463index 0000000..9f7b1ac
65464--- /dev/null
65465+++ b/grsecurity/grsum.c
65466@@ -0,0 +1,61 @@
65467+#include <linux/err.h>
65468+#include <linux/kernel.h>
65469+#include <linux/sched.h>
65470+#include <linux/mm.h>
65471+#include <linux/scatterlist.h>
65472+#include <linux/crypto.h>
65473+#include <linux/gracl.h>
65474+
65475+
65476+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
65477+#error "crypto and sha256 must be built into the kernel"
65478+#endif
65479+
65480+int
65481+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
65482+{
65483+ char *p;
65484+ struct crypto_hash *tfm;
65485+ struct hash_desc desc;
65486+ struct scatterlist sg;
65487+ unsigned char temp_sum[GR_SHA_LEN];
65488+ volatile int retval = 0;
65489+ volatile int dummy = 0;
65490+ unsigned int i;
65491+
65492+ sg_init_table(&sg, 1);
65493+
65494+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
65495+ if (IS_ERR(tfm)) {
65496+ /* should never happen, since sha256 should be built in */
65497+ return 1;
65498+ }
65499+
65500+ desc.tfm = tfm;
65501+ desc.flags = 0;
65502+
65503+ crypto_hash_init(&desc);
65504+
65505+ p = salt;
65506+ sg_set_buf(&sg, p, GR_SALT_LEN);
65507+ crypto_hash_update(&desc, &sg, sg.length);
65508+
65509+ p = entry->pw;
65510+ sg_set_buf(&sg, p, strlen(p));
65511+
65512+ crypto_hash_update(&desc, &sg, sg.length);
65513+
65514+ crypto_hash_final(&desc, temp_sum);
65515+
65516+ memset(entry->pw, 0, GR_PW_LEN);
65517+
65518+ for (i = 0; i < GR_SHA_LEN; i++)
65519+ if (sum[i] != temp_sum[i])
65520+ retval = 1;
65521+ else
65522+ dummy = 1; // waste a cycle
65523+
65524+ crypto_free_hash(tfm);
65525+
65526+ return retval;
65527+}
65528diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
65529index 77ff547..181834f 100644
65530--- a/include/asm-generic/4level-fixup.h
65531+++ b/include/asm-generic/4level-fixup.h
65532@@ -13,8 +13,10 @@
65533 #define pmd_alloc(mm, pud, address) \
65534 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
65535 NULL: pmd_offset(pud, address))
65536+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
65537
65538 #define pud_alloc(mm, pgd, address) (pgd)
65539+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
65540 #define pud_offset(pgd, start) (pgd)
65541 #define pud_none(pud) 0
65542 #define pud_bad(pud) 0
65543diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
65544index b7babf0..04ad282 100644
65545--- a/include/asm-generic/atomic-long.h
65546+++ b/include/asm-generic/atomic-long.h
65547@@ -22,6 +22,12 @@
65548
65549 typedef atomic64_t atomic_long_t;
65550
65551+#ifdef CONFIG_PAX_REFCOUNT
65552+typedef atomic64_unchecked_t atomic_long_unchecked_t;
65553+#else
65554+typedef atomic64_t atomic_long_unchecked_t;
65555+#endif
65556+
65557 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
65558
65559 static inline long atomic_long_read(atomic_long_t *l)
65560@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65561 return (long)atomic64_read(v);
65562 }
65563
65564+#ifdef CONFIG_PAX_REFCOUNT
65565+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65566+{
65567+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65568+
65569+ return (long)atomic64_read_unchecked(v);
65570+}
65571+#endif
65572+
65573 static inline void atomic_long_set(atomic_long_t *l, long i)
65574 {
65575 atomic64_t *v = (atomic64_t *)l;
65576@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65577 atomic64_set(v, i);
65578 }
65579
65580+#ifdef CONFIG_PAX_REFCOUNT
65581+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65582+{
65583+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65584+
65585+ atomic64_set_unchecked(v, i);
65586+}
65587+#endif
65588+
65589 static inline void atomic_long_inc(atomic_long_t *l)
65590 {
65591 atomic64_t *v = (atomic64_t *)l;
65592@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65593 atomic64_inc(v);
65594 }
65595
65596+#ifdef CONFIG_PAX_REFCOUNT
65597+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65598+{
65599+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65600+
65601+ atomic64_inc_unchecked(v);
65602+}
65603+#endif
65604+
65605 static inline void atomic_long_dec(atomic_long_t *l)
65606 {
65607 atomic64_t *v = (atomic64_t *)l;
65608@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65609 atomic64_dec(v);
65610 }
65611
65612+#ifdef CONFIG_PAX_REFCOUNT
65613+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65614+{
65615+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65616+
65617+ atomic64_dec_unchecked(v);
65618+}
65619+#endif
65620+
65621 static inline void atomic_long_add(long i, atomic_long_t *l)
65622 {
65623 atomic64_t *v = (atomic64_t *)l;
65624@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65625 atomic64_add(i, v);
65626 }
65627
65628+#ifdef CONFIG_PAX_REFCOUNT
65629+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65630+{
65631+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65632+
65633+ atomic64_add_unchecked(i, v);
65634+}
65635+#endif
65636+
65637 static inline void atomic_long_sub(long i, atomic_long_t *l)
65638 {
65639 atomic64_t *v = (atomic64_t *)l;
65640@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
65641 atomic64_sub(i, v);
65642 }
65643
65644+#ifdef CONFIG_PAX_REFCOUNT
65645+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
65646+{
65647+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65648+
65649+ atomic64_sub_unchecked(i, v);
65650+}
65651+#endif
65652+
65653 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
65654 {
65655 atomic64_t *v = (atomic64_t *)l;
65656@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
65657 return (long)atomic64_add_return(i, v);
65658 }
65659
65660+#ifdef CONFIG_PAX_REFCOUNT
65661+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
65662+{
65663+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65664+
65665+ return (long)atomic64_add_return_unchecked(i, v);
65666+}
65667+#endif
65668+
65669 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
65670 {
65671 atomic64_t *v = (atomic64_t *)l;
65672@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65673 return (long)atomic64_inc_return(v);
65674 }
65675
65676+#ifdef CONFIG_PAX_REFCOUNT
65677+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65678+{
65679+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65680+
65681+ return (long)atomic64_inc_return_unchecked(v);
65682+}
65683+#endif
65684+
65685 static inline long atomic_long_dec_return(atomic_long_t *l)
65686 {
65687 atomic64_t *v = (atomic64_t *)l;
65688@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65689
65690 typedef atomic_t atomic_long_t;
65691
65692+#ifdef CONFIG_PAX_REFCOUNT
65693+typedef atomic_unchecked_t atomic_long_unchecked_t;
65694+#else
65695+typedef atomic_t atomic_long_unchecked_t;
65696+#endif
65697+
65698 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
65699 static inline long atomic_long_read(atomic_long_t *l)
65700 {
65701@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65702 return (long)atomic_read(v);
65703 }
65704
65705+#ifdef CONFIG_PAX_REFCOUNT
65706+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65707+{
65708+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65709+
65710+ return (long)atomic_read_unchecked(v);
65711+}
65712+#endif
65713+
65714 static inline void atomic_long_set(atomic_long_t *l, long i)
65715 {
65716 atomic_t *v = (atomic_t *)l;
65717@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65718 atomic_set(v, i);
65719 }
65720
65721+#ifdef CONFIG_PAX_REFCOUNT
65722+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65723+{
65724+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65725+
65726+ atomic_set_unchecked(v, i);
65727+}
65728+#endif
65729+
65730 static inline void atomic_long_inc(atomic_long_t *l)
65731 {
65732 atomic_t *v = (atomic_t *)l;
65733@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65734 atomic_inc(v);
65735 }
65736
65737+#ifdef CONFIG_PAX_REFCOUNT
65738+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65739+{
65740+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65741+
65742+ atomic_inc_unchecked(v);
65743+}
65744+#endif
65745+
65746 static inline void atomic_long_dec(atomic_long_t *l)
65747 {
65748 atomic_t *v = (atomic_t *)l;
65749@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65750 atomic_dec(v);
65751 }
65752
65753+#ifdef CONFIG_PAX_REFCOUNT
65754+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65755+{
65756+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65757+
65758+ atomic_dec_unchecked(v);
65759+}
65760+#endif
65761+
65762 static inline void atomic_long_add(long i, atomic_long_t *l)
65763 {
65764 atomic_t *v = (atomic_t *)l;
65765@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65766 atomic_add(i, v);
65767 }
65768
65769+#ifdef CONFIG_PAX_REFCOUNT
65770+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65771+{
65772+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65773+
65774+ atomic_add_unchecked(i, v);
65775+}
65776+#endif
65777+
65778 static inline void atomic_long_sub(long i, atomic_long_t *l)
65779 {
65780 atomic_t *v = (atomic_t *)l;
65781@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
65782 atomic_sub(i, v);
65783 }
65784
65785+#ifdef CONFIG_PAX_REFCOUNT
65786+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
65787+{
65788+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65789+
65790+ atomic_sub_unchecked(i, v);
65791+}
65792+#endif
65793+
65794 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
65795 {
65796 atomic_t *v = (atomic_t *)l;
65797@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
65798 return (long)atomic_add_return(i, v);
65799 }
65800
65801+#ifdef CONFIG_PAX_REFCOUNT
65802+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
65803+{
65804+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65805+
65806+ return (long)atomic_add_return_unchecked(i, v);
65807+}
65808+
65809+#endif
65810+
65811 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
65812 {
65813 atomic_t *v = (atomic_t *)l;
65814@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65815 return (long)atomic_inc_return(v);
65816 }
65817
65818+#ifdef CONFIG_PAX_REFCOUNT
65819+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65820+{
65821+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65822+
65823+ return (long)atomic_inc_return_unchecked(v);
65824+}
65825+#endif
65826+
65827 static inline long atomic_long_dec_return(atomic_long_t *l)
65828 {
65829 atomic_t *v = (atomic_t *)l;
65830@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65831
65832 #endif /* BITS_PER_LONG == 64 */
65833
65834+#ifdef CONFIG_PAX_REFCOUNT
65835+static inline void pax_refcount_needs_these_functions(void)
65836+{
65837+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
65838+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65839+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65840+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65841+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65842+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65843+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65844+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65845+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65846+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65847+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65848+#ifdef CONFIG_X86
65849+ atomic_clear_mask_unchecked(0, NULL);
65850+ atomic_set_mask_unchecked(0, NULL);
65851+#endif
65852+
65853+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65854+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65855+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65856+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
65857+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65858+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
65859+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65860+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65861+}
65862+#else
65863+#define atomic_read_unchecked(v) atomic_read(v)
65864+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65865+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65866+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65867+#define atomic_inc_unchecked(v) atomic_inc(v)
65868+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65869+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65870+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65871+#define atomic_dec_unchecked(v) atomic_dec(v)
65872+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65873+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65874+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
65875+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
65876+
65877+#define atomic_long_read_unchecked(v) atomic_long_read(v)
65878+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65879+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65880+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
65881+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65882+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
65883+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65884+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65885+#endif
65886+
65887 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65888diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
65889index 1ced641..c896ee8 100644
65890--- a/include/asm-generic/atomic.h
65891+++ b/include/asm-generic/atomic.h
65892@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
65893 * Atomically clears the bits set in @mask from @v
65894 */
65895 #ifndef atomic_clear_mask
65896-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
65897+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
65898 {
65899 unsigned long flags;
65900
65901diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65902index b18ce4f..2ee2843 100644
65903--- a/include/asm-generic/atomic64.h
65904+++ b/include/asm-generic/atomic64.h
65905@@ -16,6 +16,8 @@ typedef struct {
65906 long long counter;
65907 } atomic64_t;
65908
65909+typedef atomic64_t atomic64_unchecked_t;
65910+
65911 #define ATOMIC64_INIT(i) { (i) }
65912
65913 extern long long atomic64_read(const atomic64_t *v);
65914@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65915 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65916 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65917
65918+#define atomic64_read_unchecked(v) atomic64_read(v)
65919+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65920+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65921+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65922+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65923+#define atomic64_inc_unchecked(v) atomic64_inc(v)
65924+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65925+#define atomic64_dec_unchecked(v) atomic64_dec(v)
65926+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65927+
65928 #endif /* _ASM_GENERIC_ATOMIC64_H */
65929diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
65930index 1bfcfe5..e04c5c9 100644
65931--- a/include/asm-generic/cache.h
65932+++ b/include/asm-generic/cache.h
65933@@ -6,7 +6,7 @@
65934 * cache lines need to provide their own cache.h.
65935 */
65936
65937-#define L1_CACHE_SHIFT 5
65938-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
65939+#define L1_CACHE_SHIFT 5UL
65940+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
65941
65942 #endif /* __ASM_GENERIC_CACHE_H */
65943diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
65944index 0d68a1e..b74a761 100644
65945--- a/include/asm-generic/emergency-restart.h
65946+++ b/include/asm-generic/emergency-restart.h
65947@@ -1,7 +1,7 @@
65948 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
65949 #define _ASM_GENERIC_EMERGENCY_RESTART_H
65950
65951-static inline void machine_emergency_restart(void)
65952+static inline __noreturn void machine_emergency_restart(void)
65953 {
65954 machine_restart(NULL);
65955 }
65956diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
65957index 90f99c7..00ce236 100644
65958--- a/include/asm-generic/kmap_types.h
65959+++ b/include/asm-generic/kmap_types.h
65960@@ -2,9 +2,9 @@
65961 #define _ASM_GENERIC_KMAP_TYPES_H
65962
65963 #ifdef __WITH_KM_FENCE
65964-# define KM_TYPE_NR 41
65965+# define KM_TYPE_NR 42
65966 #else
65967-# define KM_TYPE_NR 20
65968+# define KM_TYPE_NR 21
65969 #endif
65970
65971 #endif
65972diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
65973index 9ceb03b..62b0b8f 100644
65974--- a/include/asm-generic/local.h
65975+++ b/include/asm-generic/local.h
65976@@ -23,24 +23,37 @@ typedef struct
65977 atomic_long_t a;
65978 } local_t;
65979
65980+typedef struct {
65981+ atomic_long_unchecked_t a;
65982+} local_unchecked_t;
65983+
65984 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
65985
65986 #define local_read(l) atomic_long_read(&(l)->a)
65987+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
65988 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
65989+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
65990 #define local_inc(l) atomic_long_inc(&(l)->a)
65991+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
65992 #define local_dec(l) atomic_long_dec(&(l)->a)
65993+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
65994 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
65995+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
65996 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
65997+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
65998
65999 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
66000 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
66001 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
66002 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
66003 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
66004+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
66005 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
66006 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
66007+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
66008
66009 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
66010+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
66011 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
66012 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
66013 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
66014diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
66015index 725612b..9cc513a 100644
66016--- a/include/asm-generic/pgtable-nopmd.h
66017+++ b/include/asm-generic/pgtable-nopmd.h
66018@@ -1,14 +1,19 @@
66019 #ifndef _PGTABLE_NOPMD_H
66020 #define _PGTABLE_NOPMD_H
66021
66022-#ifndef __ASSEMBLY__
66023-
66024 #include <asm-generic/pgtable-nopud.h>
66025
66026-struct mm_struct;
66027-
66028 #define __PAGETABLE_PMD_FOLDED
66029
66030+#define PMD_SHIFT PUD_SHIFT
66031+#define PTRS_PER_PMD 1
66032+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
66033+#define PMD_MASK (~(PMD_SIZE-1))
66034+
66035+#ifndef __ASSEMBLY__
66036+
66037+struct mm_struct;
66038+
66039 /*
66040 * Having the pmd type consist of a pud gets the size right, and allows
66041 * us to conceptually access the pud entry that this pmd is folded into
66042@@ -16,11 +21,6 @@ struct mm_struct;
66043 */
66044 typedef struct { pud_t pud; } pmd_t;
66045
66046-#define PMD_SHIFT PUD_SHIFT
66047-#define PTRS_PER_PMD 1
66048-#define PMD_SIZE (1UL << PMD_SHIFT)
66049-#define PMD_MASK (~(PMD_SIZE-1))
66050-
66051 /*
66052 * The "pud_xxx()" functions here are trivial for a folded two-level
66053 * setup: the pmd is never bad, and a pmd always exists (as it's folded
66054diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
66055index 810431d..0ec4804f 100644
66056--- a/include/asm-generic/pgtable-nopud.h
66057+++ b/include/asm-generic/pgtable-nopud.h
66058@@ -1,10 +1,15 @@
66059 #ifndef _PGTABLE_NOPUD_H
66060 #define _PGTABLE_NOPUD_H
66061
66062-#ifndef __ASSEMBLY__
66063-
66064 #define __PAGETABLE_PUD_FOLDED
66065
66066+#define PUD_SHIFT PGDIR_SHIFT
66067+#define PTRS_PER_PUD 1
66068+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
66069+#define PUD_MASK (~(PUD_SIZE-1))
66070+
66071+#ifndef __ASSEMBLY__
66072+
66073 /*
66074 * Having the pud type consist of a pgd gets the size right, and allows
66075 * us to conceptually access the pgd entry that this pud is folded into
66076@@ -12,11 +17,6 @@
66077 */
66078 typedef struct { pgd_t pgd; } pud_t;
66079
66080-#define PUD_SHIFT PGDIR_SHIFT
66081-#define PTRS_PER_PUD 1
66082-#define PUD_SIZE (1UL << PUD_SHIFT)
66083-#define PUD_MASK (~(PUD_SIZE-1))
66084-
66085 /*
66086 * The "pgd_xxx()" functions here are trivial for a folded two-level
66087 * setup: the pud is never bad, and a pud always exists (as it's folded
66088@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
66089 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
66090
66091 #define pgd_populate(mm, pgd, pud) do { } while (0)
66092+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
66093 /*
66094 * (puds are folded into pgds so this doesn't get actually called,
66095 * but the define is needed for a generic inline function.)
66096diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
66097index 5cf680a..4b74d62 100644
66098--- a/include/asm-generic/pgtable.h
66099+++ b/include/asm-generic/pgtable.h
66100@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
66101 }
66102 #endif /* CONFIG_NUMA_BALANCING */
66103
66104+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
66105+static inline unsigned long pax_open_kernel(void) { return 0; }
66106+#endif
66107+
66108+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
66109+static inline unsigned long pax_close_kernel(void) { return 0; }
66110+#endif
66111+
66112 #endif /* CONFIG_MMU */
66113
66114 #endif /* !__ASSEMBLY__ */
66115diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
66116index d1ea7ce..b1ebf2a 100644
66117--- a/include/asm-generic/vmlinux.lds.h
66118+++ b/include/asm-generic/vmlinux.lds.h
66119@@ -218,6 +218,7 @@
66120 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
66121 VMLINUX_SYMBOL(__start_rodata) = .; \
66122 *(.rodata) *(.rodata.*) \
66123+ *(.data..read_only) \
66124 *(__vermagic) /* Kernel version magic */ \
66125 . = ALIGN(8); \
66126 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
66127@@ -725,17 +726,18 @@
66128 * section in the linker script will go there too. @phdr should have
66129 * a leading colon.
66130 *
66131- * Note that this macros defines __per_cpu_load as an absolute symbol.
66132+ * Note that this macros defines per_cpu_load as an absolute symbol.
66133 * If there is no need to put the percpu section at a predetermined
66134 * address, use PERCPU_SECTION.
66135 */
66136 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
66137- VMLINUX_SYMBOL(__per_cpu_load) = .; \
66138- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
66139+ per_cpu_load = .; \
66140+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
66141 - LOAD_OFFSET) { \
66142+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
66143 PERCPU_INPUT(cacheline) \
66144 } phdr \
66145- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
66146+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
66147
66148 /**
66149 * PERCPU_SECTION - define output section for percpu area, simple version
66150diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
66151index 418d270..bfd2794 100644
66152--- a/include/crypto/algapi.h
66153+++ b/include/crypto/algapi.h
66154@@ -34,7 +34,7 @@ struct crypto_type {
66155 unsigned int maskclear;
66156 unsigned int maskset;
66157 unsigned int tfmsize;
66158-};
66159+} __do_const;
66160
66161 struct crypto_instance {
66162 struct crypto_alg alg;
66163diff --git a/include/drm/drmP.h b/include/drm/drmP.h
66164index fad21c9..ab858bc 100644
66165--- a/include/drm/drmP.h
66166+++ b/include/drm/drmP.h
66167@@ -72,6 +72,7 @@
66168 #include <linux/workqueue.h>
66169 #include <linux/poll.h>
66170 #include <asm/pgalloc.h>
66171+#include <asm/local.h>
66172 #include <drm/drm.h>
66173 #include <drm/drm_sarea.h>
66174
66175@@ -293,10 +294,12 @@ do { \
66176 * \param cmd command.
66177 * \param arg argument.
66178 */
66179-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
66180+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
66181+ struct drm_file *file_priv);
66182+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
66183 struct drm_file *file_priv);
66184
66185-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
66186+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
66187 unsigned long arg);
66188
66189 #define DRM_IOCTL_NR(n) _IOC_NR(n)
66190@@ -311,9 +314,9 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
66191 struct drm_ioctl_desc {
66192 unsigned int cmd;
66193 int flags;
66194- drm_ioctl_t *func;
66195+ drm_ioctl_t func;
66196 unsigned int cmd_drv;
66197-};
66198+} __do_const;
66199
66200 /**
66201 * Creates a driver or general drm_ioctl_desc array entry for the given
66202@@ -995,7 +998,7 @@ struct drm_info_list {
66203 int (*show)(struct seq_file*, void*); /** show callback */
66204 u32 driver_features; /**< Required driver features for this entry */
66205 void *data;
66206-};
66207+} __do_const;
66208
66209 /**
66210 * debugfs node structure. This structure represents a debugfs file.
66211@@ -1068,7 +1071,7 @@ struct drm_device {
66212
66213 /** \name Usage Counters */
66214 /*@{ */
66215- int open_count; /**< Outstanding files open */
66216+ local_t open_count; /**< Outstanding files open */
66217 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
66218 atomic_t vma_count; /**< Outstanding vma areas open */
66219 int buf_use; /**< Buffers in use -- cannot alloc */
66220@@ -1079,7 +1082,7 @@ struct drm_device {
66221 /*@{ */
66222 unsigned long counters;
66223 enum drm_stat_type types[15];
66224- atomic_t counts[15];
66225+ atomic_unchecked_t counts[15];
66226 /*@} */
66227
66228 struct list_head filelist;
66229diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
66230index f43d556..94d9343 100644
66231--- a/include/drm/drm_crtc_helper.h
66232+++ b/include/drm/drm_crtc_helper.h
66233@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
66234 struct drm_connector *connector);
66235 /* disable encoder when not in use - more explicit than dpms off */
66236 void (*disable)(struct drm_encoder *encoder);
66237-};
66238+} __no_const;
66239
66240 /**
66241 * drm_connector_helper_funcs - helper operations for connectors
66242diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
66243index 72dcbe8..8db58d7 100644
66244--- a/include/drm/ttm/ttm_memory.h
66245+++ b/include/drm/ttm/ttm_memory.h
66246@@ -48,7 +48,7 @@
66247
66248 struct ttm_mem_shrink {
66249 int (*do_shrink) (struct ttm_mem_shrink *);
66250-};
66251+} __no_const;
66252
66253 /**
66254 * struct ttm_mem_global - Global memory accounting structure.
66255diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
66256index 4b840e8..155d235 100644
66257--- a/include/keys/asymmetric-subtype.h
66258+++ b/include/keys/asymmetric-subtype.h
66259@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
66260 /* Verify the signature on a key of this subtype (optional) */
66261 int (*verify_signature)(const struct key *key,
66262 const struct public_key_signature *sig);
66263-};
66264+} __do_const;
66265
66266 /**
66267 * asymmetric_key_subtype - Get the subtype from an asymmetric key
66268diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
66269index c1da539..1dcec55 100644
66270--- a/include/linux/atmdev.h
66271+++ b/include/linux/atmdev.h
66272@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
66273 #endif
66274
66275 struct k_atm_aal_stats {
66276-#define __HANDLE_ITEM(i) atomic_t i
66277+#define __HANDLE_ITEM(i) atomic_unchecked_t i
66278 __AAL_STAT_ITEMS
66279 #undef __HANDLE_ITEM
66280 };
66281@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
66282 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
66283 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
66284 struct module *owner;
66285-};
66286+} __do_const ;
66287
66288 struct atmphy_ops {
66289 int (*start)(struct atm_dev *dev);
66290diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
66291index 0530b98..96a8ac0 100644
66292--- a/include/linux/binfmts.h
66293+++ b/include/linux/binfmts.h
66294@@ -73,8 +73,9 @@ struct linux_binfmt {
66295 int (*load_binary)(struct linux_binprm *);
66296 int (*load_shlib)(struct file *);
66297 int (*core_dump)(struct coredump_params *cprm);
66298+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
66299 unsigned long min_coredump; /* minimal dump size */
66300-};
66301+} __do_const;
66302
66303 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
66304
66305diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
66306index f94bc83..62b9cfe 100644
66307--- a/include/linux/blkdev.h
66308+++ b/include/linux/blkdev.h
66309@@ -1498,7 +1498,7 @@ struct block_device_operations {
66310 /* this callback is with swap_lock and sometimes page table lock held */
66311 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
66312 struct module *owner;
66313-};
66314+} __do_const;
66315
66316 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
66317 unsigned long);
66318diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
66319index 7c2e030..b72475d 100644
66320--- a/include/linux/blktrace_api.h
66321+++ b/include/linux/blktrace_api.h
66322@@ -23,7 +23,7 @@ struct blk_trace {
66323 struct dentry *dir;
66324 struct dentry *dropped_file;
66325 struct dentry *msg_file;
66326- atomic_t dropped;
66327+ atomic_unchecked_t dropped;
66328 };
66329
66330 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
66331diff --git a/include/linux/cache.h b/include/linux/cache.h
66332index 4c57065..4307975 100644
66333--- a/include/linux/cache.h
66334+++ b/include/linux/cache.h
66335@@ -16,6 +16,10 @@
66336 #define __read_mostly
66337 #endif
66338
66339+#ifndef __read_only
66340+#define __read_only __read_mostly
66341+#endif
66342+
66343 #ifndef ____cacheline_aligned
66344 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
66345 #endif
66346diff --git a/include/linux/capability.h b/include/linux/capability.h
66347index 98503b7..cc36d18 100644
66348--- a/include/linux/capability.h
66349+++ b/include/linux/capability.h
66350@@ -211,8 +211,13 @@ extern bool capable(int cap);
66351 extern bool ns_capable(struct user_namespace *ns, int cap);
66352 extern bool nsown_capable(int cap);
66353 extern bool inode_capable(const struct inode *inode, int cap);
66354+extern bool capable_nolog(int cap);
66355+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
66356+extern bool inode_capable_nolog(const struct inode *inode, int cap);
66357
66358 /* audit system wants to get cap info from files as well */
66359 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
66360
66361+extern int is_privileged_binary(const struct dentry *dentry);
66362+
66363 #endif /* !_LINUX_CAPABILITY_H */
66364diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
66365index 8609d57..86e4d79 100644
66366--- a/include/linux/cdrom.h
66367+++ b/include/linux/cdrom.h
66368@@ -87,7 +87,6 @@ struct cdrom_device_ops {
66369
66370 /* driver specifications */
66371 const int capability; /* capability flags */
66372- int n_minors; /* number of active minor devices */
66373 /* handle uniform packets for scsi type devices (scsi,atapi) */
66374 int (*generic_packet) (struct cdrom_device_info *,
66375 struct packet_command *);
66376diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
66377index 42e55de..1cd0e66 100644
66378--- a/include/linux/cleancache.h
66379+++ b/include/linux/cleancache.h
66380@@ -31,7 +31,7 @@ struct cleancache_ops {
66381 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
66382 void (*invalidate_inode)(int, struct cleancache_filekey);
66383 void (*invalidate_fs)(int);
66384-};
66385+} __no_const;
66386
66387 extern struct cleancache_ops
66388 cleancache_register_ops(struct cleancache_ops *ops);
66389diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
66390index 662fd1b..e801992 100644
66391--- a/include/linux/compiler-gcc4.h
66392+++ b/include/linux/compiler-gcc4.h
66393@@ -34,6 +34,21 @@
66394 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
66395
66396 #if __GNUC_MINOR__ >= 5
66397+
66398+#ifdef CONSTIFY_PLUGIN
66399+#define __no_const __attribute__((no_const))
66400+#define __do_const __attribute__((do_const))
66401+#endif
66402+
66403+#ifdef SIZE_OVERFLOW_PLUGIN
66404+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
66405+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
66406+#endif
66407+
66408+#ifdef LATENT_ENTROPY_PLUGIN
66409+#define __latent_entropy __attribute__((latent_entropy))
66410+#endif
66411+
66412 /*
66413 * Mark a position in code as unreachable. This can be used to
66414 * suppress control flow warnings after asm blocks that transfer
66415@@ -49,6 +64,11 @@
66416 #define __noclone __attribute__((__noclone__))
66417
66418 #endif
66419+
66420+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
66421+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
66422+#define __bos0(ptr) __bos((ptr), 0)
66423+#define __bos1(ptr) __bos((ptr), 1)
66424 #endif
66425
66426 #if __GNUC_MINOR__ >= 6
66427diff --git a/include/linux/compiler.h b/include/linux/compiler.h
66428index dd852b7..72924c0 100644
66429--- a/include/linux/compiler.h
66430+++ b/include/linux/compiler.h
66431@@ -5,11 +5,14 @@
66432
66433 #ifdef __CHECKER__
66434 # define __user __attribute__((noderef, address_space(1)))
66435+# define __force_user __force __user
66436 # define __kernel __attribute__((address_space(0)))
66437+# define __force_kernel __force __kernel
66438 # define __safe __attribute__((safe))
66439 # define __force __attribute__((force))
66440 # define __nocast __attribute__((nocast))
66441 # define __iomem __attribute__((noderef, address_space(2)))
66442+# define __force_iomem __force __iomem
66443 # define __must_hold(x) __attribute__((context(x,1,1)))
66444 # define __acquires(x) __attribute__((context(x,0,1)))
66445 # define __releases(x) __attribute__((context(x,1,0)))
66446@@ -17,20 +20,48 @@
66447 # define __release(x) __context__(x,-1)
66448 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
66449 # define __percpu __attribute__((noderef, address_space(3)))
66450+# define __force_percpu __force __percpu
66451 #ifdef CONFIG_SPARSE_RCU_POINTER
66452 # define __rcu __attribute__((noderef, address_space(4)))
66453+# define __force_rcu __force __rcu
66454 #else
66455 # define __rcu
66456+# define __force_rcu
66457 #endif
66458 extern void __chk_user_ptr(const volatile void __user *);
66459 extern void __chk_io_ptr(const volatile void __iomem *);
66460+#elif defined(CHECKER_PLUGIN)
66461+//# define __user
66462+//# define __force_user
66463+//# define __kernel
66464+//# define __force_kernel
66465+# define __safe
66466+# define __force
66467+# define __nocast
66468+# define __iomem
66469+# define __force_iomem
66470+# define __chk_user_ptr(x) (void)0
66471+# define __chk_io_ptr(x) (void)0
66472+# define __builtin_warning(x, y...) (1)
66473+# define __acquires(x)
66474+# define __releases(x)
66475+# define __acquire(x) (void)0
66476+# define __release(x) (void)0
66477+# define __cond_lock(x,c) (c)
66478+# define __percpu
66479+# define __force_percpu
66480+# define __rcu
66481+# define __force_rcu
66482 #else
66483 # define __user
66484+# define __force_user
66485 # define __kernel
66486+# define __force_kernel
66487 # define __safe
66488 # define __force
66489 # define __nocast
66490 # define __iomem
66491+# define __force_iomem
66492 # define __chk_user_ptr(x) (void)0
66493 # define __chk_io_ptr(x) (void)0
66494 # define __builtin_warning(x, y...) (1)
66495@@ -41,7 +72,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
66496 # define __release(x) (void)0
66497 # define __cond_lock(x,c) (c)
66498 # define __percpu
66499+# define __force_percpu
66500 # define __rcu
66501+# define __force_rcu
66502 #endif
66503
66504 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
66505@@ -275,6 +308,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66506 # define __attribute_const__ /* unimplemented */
66507 #endif
66508
66509+#ifndef __no_const
66510+# define __no_const
66511+#endif
66512+
66513+#ifndef __do_const
66514+# define __do_const
66515+#endif
66516+
66517+#ifndef __size_overflow
66518+# define __size_overflow(...)
66519+#endif
66520+
66521+#ifndef __intentional_overflow
66522+# define __intentional_overflow(...)
66523+#endif
66524+
66525+#ifndef __latent_entropy
66526+# define __latent_entropy
66527+#endif
66528+
66529 /*
66530 * Tell gcc if a function is cold. The compiler will assume any path
66531 * directly leading to the call is unlikely.
66532@@ -284,6 +337,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66533 #define __cold
66534 #endif
66535
66536+#ifndef __alloc_size
66537+#define __alloc_size(...)
66538+#endif
66539+
66540+#ifndef __bos
66541+#define __bos(ptr, arg)
66542+#endif
66543+
66544+#ifndef __bos0
66545+#define __bos0(ptr)
66546+#endif
66547+
66548+#ifndef __bos1
66549+#define __bos1(ptr)
66550+#endif
66551+
66552 /* Simple shorthand for a section definition */
66553 #ifndef __section
66554 # define __section(S) __attribute__ ((__section__(#S)))
66555@@ -323,6 +392,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66556 * use is to mediate communication between process-level code and irq/NMI
66557 * handlers, all running on the same CPU.
66558 */
66559-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
66560+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
66561+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
66562
66563 #endif /* __LINUX_COMPILER_H */
66564diff --git a/include/linux/configfs.h b/include/linux/configfs.h
66565index 34025df..d94bbbc 100644
66566--- a/include/linux/configfs.h
66567+++ b/include/linux/configfs.h
66568@@ -125,7 +125,7 @@ struct configfs_attribute {
66569 const char *ca_name;
66570 struct module *ca_owner;
66571 umode_t ca_mode;
66572-};
66573+} __do_const;
66574
66575 /*
66576 * Users often need to create attribute structures for their configurable
66577diff --git a/include/linux/cpu.h b/include/linux/cpu.h
66578index ce7a074..01ab8ac 100644
66579--- a/include/linux/cpu.h
66580+++ b/include/linux/cpu.h
66581@@ -115,7 +115,7 @@ enum {
66582 /* Need to know about CPUs going up/down? */
66583 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
66584 #define cpu_notifier(fn, pri) { \
66585- static struct notifier_block fn##_nb __cpuinitdata = \
66586+ static struct notifier_block fn##_nb = \
66587 { .notifier_call = fn, .priority = pri }; \
66588 register_cpu_notifier(&fn##_nb); \
66589 }
66590diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
66591index a55b88e..fba90c5 100644
66592--- a/include/linux/cpufreq.h
66593+++ b/include/linux/cpufreq.h
66594@@ -240,7 +240,7 @@ struct cpufreq_driver {
66595 int (*suspend) (struct cpufreq_policy *policy);
66596 int (*resume) (struct cpufreq_policy *policy);
66597 struct freq_attr **attr;
66598-};
66599+} __do_const;
66600
66601 /* flags */
66602
66603@@ -299,6 +299,7 @@ struct global_attr {
66604 ssize_t (*store)(struct kobject *a, struct attribute *b,
66605 const char *c, size_t count);
66606 };
66607+typedef struct global_attr __no_const global_attr_no_const;
66608
66609 #define define_one_global_ro(_name) \
66610 static struct global_attr _name = \
66611diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
66612index 24cd1037..20a63aae 100644
66613--- a/include/linux/cpuidle.h
66614+++ b/include/linux/cpuidle.h
66615@@ -54,7 +54,8 @@ struct cpuidle_state {
66616 int index);
66617
66618 int (*enter_dead) (struct cpuidle_device *dev, int index);
66619-};
66620+} __do_const;
66621+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
66622
66623 /* Idle State Flags */
66624 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
66625@@ -216,7 +217,7 @@ struct cpuidle_governor {
66626 void (*reflect) (struct cpuidle_device *dev, int index);
66627
66628 struct module *owner;
66629-};
66630+} __do_const;
66631
66632 #ifdef CONFIG_CPU_IDLE
66633
66634diff --git a/include/linux/cred.h b/include/linux/cred.h
66635index 04421e8..6bce4ef 100644
66636--- a/include/linux/cred.h
66637+++ b/include/linux/cred.h
66638@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
66639 static inline void validate_process_creds(void)
66640 {
66641 }
66642+static inline void validate_task_creds(struct task_struct *task)
66643+{
66644+}
66645 #endif
66646
66647 /**
66648diff --git a/include/linux/crypto.h b/include/linux/crypto.h
66649index b92eadf..b4ecdc1 100644
66650--- a/include/linux/crypto.h
66651+++ b/include/linux/crypto.h
66652@@ -373,7 +373,7 @@ struct cipher_tfm {
66653 const u8 *key, unsigned int keylen);
66654 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66655 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66656-};
66657+} __no_const;
66658
66659 struct hash_tfm {
66660 int (*init)(struct hash_desc *desc);
66661@@ -394,13 +394,13 @@ struct compress_tfm {
66662 int (*cot_decompress)(struct crypto_tfm *tfm,
66663 const u8 *src, unsigned int slen,
66664 u8 *dst, unsigned int *dlen);
66665-};
66666+} __no_const;
66667
66668 struct rng_tfm {
66669 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
66670 unsigned int dlen);
66671 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
66672-};
66673+} __no_const;
66674
66675 #define crt_ablkcipher crt_u.ablkcipher
66676 #define crt_aead crt_u.aead
66677diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
66678index 7925bf0..d5143d2 100644
66679--- a/include/linux/decompress/mm.h
66680+++ b/include/linux/decompress/mm.h
66681@@ -77,7 +77,7 @@ static void free(void *where)
66682 * warnings when not needed (indeed large_malloc / large_free are not
66683 * needed by inflate */
66684
66685-#define malloc(a) kmalloc(a, GFP_KERNEL)
66686+#define malloc(a) kmalloc((a), GFP_KERNEL)
66687 #define free(a) kfree(a)
66688
66689 #define large_malloc(a) vmalloc(a)
66690diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
66691index e83ef39..33e0eb3 100644
66692--- a/include/linux/devfreq.h
66693+++ b/include/linux/devfreq.h
66694@@ -114,7 +114,7 @@ struct devfreq_governor {
66695 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
66696 int (*event_handler)(struct devfreq *devfreq,
66697 unsigned int event, void *data);
66698-};
66699+} __do_const;
66700
66701 /**
66702 * struct devfreq - Device devfreq structure
66703diff --git a/include/linux/device.h b/include/linux/device.h
66704index 43dcda9..7a1fb65 100644
66705--- a/include/linux/device.h
66706+++ b/include/linux/device.h
66707@@ -294,7 +294,7 @@ struct subsys_interface {
66708 struct list_head node;
66709 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
66710 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
66711-};
66712+} __do_const;
66713
66714 int subsys_interface_register(struct subsys_interface *sif);
66715 void subsys_interface_unregister(struct subsys_interface *sif);
66716@@ -474,7 +474,7 @@ struct device_type {
66717 void (*release)(struct device *dev);
66718
66719 const struct dev_pm_ops *pm;
66720-};
66721+} __do_const;
66722
66723 /* interface for exporting device attributes */
66724 struct device_attribute {
66725@@ -484,11 +484,12 @@ struct device_attribute {
66726 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
66727 const char *buf, size_t count);
66728 };
66729+typedef struct device_attribute __no_const device_attribute_no_const;
66730
66731 struct dev_ext_attribute {
66732 struct device_attribute attr;
66733 void *var;
66734-};
66735+} __do_const;
66736
66737 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
66738 char *buf);
66739diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
66740index 94af418..b1ca7a2 100644
66741--- a/include/linux/dma-mapping.h
66742+++ b/include/linux/dma-mapping.h
66743@@ -54,7 +54,7 @@ struct dma_map_ops {
66744 u64 (*get_required_mask)(struct device *dev);
66745 #endif
66746 int is_phys;
66747-};
66748+} __do_const;
66749
66750 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66751
66752diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
66753index d3201e4..8281e63 100644
66754--- a/include/linux/dmaengine.h
66755+++ b/include/linux/dmaengine.h
66756@@ -1018,9 +1018,9 @@ struct dma_pinned_list {
66757 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
66758 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
66759
66760-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
66761+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
66762 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
66763-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
66764+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
66765 struct dma_pinned_list *pinned_list, struct page *page,
66766 unsigned int offset, size_t len);
66767
66768diff --git a/include/linux/efi.h b/include/linux/efi.h
66769index 7a9498a..155713d 100644
66770--- a/include/linux/efi.h
66771+++ b/include/linux/efi.h
66772@@ -733,6 +733,7 @@ struct efivar_operations {
66773 efi_set_variable_t *set_variable;
66774 efi_query_variable_info_t *query_variable_info;
66775 };
66776+typedef struct efivar_operations __no_const efivar_operations_no_const;
66777
66778 struct efivars {
66779 /*
66780diff --git a/include/linux/elf.h b/include/linux/elf.h
66781index 8c9048e..16a4665 100644
66782--- a/include/linux/elf.h
66783+++ b/include/linux/elf.h
66784@@ -20,6 +20,7 @@ extern Elf32_Dyn _DYNAMIC [];
66785 #define elf_note elf32_note
66786 #define elf_addr_t Elf32_Off
66787 #define Elf_Half Elf32_Half
66788+#define elf_dyn Elf32_Dyn
66789
66790 #else
66791
66792@@ -30,6 +31,7 @@ extern Elf64_Dyn _DYNAMIC [];
66793 #define elf_note elf64_note
66794 #define elf_addr_t Elf64_Off
66795 #define Elf_Half Elf64_Half
66796+#define elf_dyn Elf64_Dyn
66797
66798 #endif
66799
66800diff --git a/include/linux/extcon.h b/include/linux/extcon.h
66801index fcb51c8..bdafcf6 100644
66802--- a/include/linux/extcon.h
66803+++ b/include/linux/extcon.h
66804@@ -134,7 +134,7 @@ struct extcon_dev {
66805 /* /sys/class/extcon/.../mutually_exclusive/... */
66806 struct attribute_group attr_g_muex;
66807 struct attribute **attrs_muex;
66808- struct device_attribute *d_attrs_muex;
66809+ device_attribute_no_const *d_attrs_muex;
66810 };
66811
66812 /**
66813diff --git a/include/linux/fb.h b/include/linux/fb.h
66814index c7a9571..02eeffe 100644
66815--- a/include/linux/fb.h
66816+++ b/include/linux/fb.h
66817@@ -302,7 +302,7 @@ struct fb_ops {
66818 /* called at KDB enter and leave time to prepare the console */
66819 int (*fb_debug_enter)(struct fb_info *info);
66820 int (*fb_debug_leave)(struct fb_info *info);
66821-};
66822+} __do_const;
66823
66824 #ifdef CONFIG_FB_TILEBLITTING
66825 #define FB_TILE_CURSOR_NONE 0
66826diff --git a/include/linux/filter.h b/include/linux/filter.h
66827index c45eabc..baa0be5 100644
66828--- a/include/linux/filter.h
66829+++ b/include/linux/filter.h
66830@@ -20,6 +20,7 @@ struct compat_sock_fprog {
66831
66832 struct sk_buff;
66833 struct sock;
66834+struct bpf_jit_work;
66835
66836 struct sk_filter
66837 {
66838@@ -27,6 +28,9 @@ struct sk_filter
66839 unsigned int len; /* Number of filter blocks */
66840 unsigned int (*bpf_func)(const struct sk_buff *skb,
66841 const struct sock_filter *filter);
66842+#ifdef CONFIG_BPF_JIT
66843+ struct bpf_jit_work *work;
66844+#endif
66845 struct rcu_head rcu;
66846 struct sock_filter insns[0];
66847 };
66848diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
66849index 3044254..9767f41 100644
66850--- a/include/linux/frontswap.h
66851+++ b/include/linux/frontswap.h
66852@@ -11,7 +11,7 @@ struct frontswap_ops {
66853 int (*load)(unsigned, pgoff_t, struct page *);
66854 void (*invalidate_page)(unsigned, pgoff_t);
66855 void (*invalidate_area)(unsigned);
66856-};
66857+} __no_const;
66858
66859 extern bool frontswap_enabled;
66860 extern struct frontswap_ops
66861diff --git a/include/linux/fs.h b/include/linux/fs.h
66862index 7617ee0..b575199 100644
66863--- a/include/linux/fs.h
66864+++ b/include/linux/fs.h
66865@@ -1541,7 +1541,8 @@ struct file_operations {
66866 long (*fallocate)(struct file *file, int mode, loff_t offset,
66867 loff_t len);
66868 int (*show_fdinfo)(struct seq_file *m, struct file *f);
66869-};
66870+} __do_const;
66871+typedef struct file_operations __no_const file_operations_no_const;
66872
66873 struct inode_operations {
66874 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
66875@@ -2665,4 +2666,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
66876 inode->i_flags |= S_NOSEC;
66877 }
66878
66879+static inline bool is_sidechannel_device(const struct inode *inode)
66880+{
66881+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
66882+ umode_t mode = inode->i_mode;
66883+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
66884+#else
66885+ return false;
66886+#endif
66887+}
66888+
66889 #endif /* _LINUX_FS_H */
66890diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
66891index d0ae3a8..0244b34 100644
66892--- a/include/linux/fs_struct.h
66893+++ b/include/linux/fs_struct.h
66894@@ -6,7 +6,7 @@
66895 #include <linux/seqlock.h>
66896
66897 struct fs_struct {
66898- int users;
66899+ atomic_t users;
66900 spinlock_t lock;
66901 seqcount_t seq;
66902 int umask;
66903diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
66904index 5dfa0aa..6acf322 100644
66905--- a/include/linux/fscache-cache.h
66906+++ b/include/linux/fscache-cache.h
66907@@ -112,7 +112,7 @@ struct fscache_operation {
66908 fscache_operation_release_t release;
66909 };
66910
66911-extern atomic_t fscache_op_debug_id;
66912+extern atomic_unchecked_t fscache_op_debug_id;
66913 extern void fscache_op_work_func(struct work_struct *work);
66914
66915 extern void fscache_enqueue_operation(struct fscache_operation *);
66916@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
66917 INIT_WORK(&op->work, fscache_op_work_func);
66918 atomic_set(&op->usage, 1);
66919 op->state = FSCACHE_OP_ST_INITIALISED;
66920- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
66921+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
66922 op->processor = processor;
66923 op->release = release;
66924 INIT_LIST_HEAD(&op->pend_link);
66925diff --git a/include/linux/fscache.h b/include/linux/fscache.h
66926index 7a08623..4c07b0f 100644
66927--- a/include/linux/fscache.h
66928+++ b/include/linux/fscache.h
66929@@ -152,7 +152,7 @@ struct fscache_cookie_def {
66930 * - this is mandatory for any object that may have data
66931 */
66932 void (*now_uncached)(void *cookie_netfs_data);
66933-};
66934+} __do_const;
66935
66936 /*
66937 * fscache cached network filesystem type
66938diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
66939index 0fbfb46..508eb0d 100644
66940--- a/include/linux/fsnotify.h
66941+++ b/include/linux/fsnotify.h
66942@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
66943 struct inode *inode = path->dentry->d_inode;
66944 __u32 mask = FS_ACCESS;
66945
66946+ if (is_sidechannel_device(inode))
66947+ return;
66948+
66949 if (S_ISDIR(inode->i_mode))
66950 mask |= FS_ISDIR;
66951
66952@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
66953 struct inode *inode = path->dentry->d_inode;
66954 __u32 mask = FS_MODIFY;
66955
66956+ if (is_sidechannel_device(inode))
66957+ return;
66958+
66959 if (S_ISDIR(inode->i_mode))
66960 mask |= FS_ISDIR;
66961
66962@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
66963 */
66964 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
66965 {
66966- return kstrdup(name, GFP_KERNEL);
66967+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
66968 }
66969
66970 /*
66971diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
66972index a3d4895..ddd2a50 100644
66973--- a/include/linux/ftrace_event.h
66974+++ b/include/linux/ftrace_event.h
66975@@ -272,7 +272,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
66976 extern int trace_add_event_call(struct ftrace_event_call *call);
66977 extern void trace_remove_event_call(struct ftrace_event_call *call);
66978
66979-#define is_signed_type(type) (((type)(-1)) < 0)
66980+#define is_signed_type(type) (((type)(-1)) < (type)1)
66981
66982 int trace_set_clr_event(const char *system, const char *event, int set);
66983
66984diff --git a/include/linux/genhd.h b/include/linux/genhd.h
66985index 79b8bba..86b539e 100644
66986--- a/include/linux/genhd.h
66987+++ b/include/linux/genhd.h
66988@@ -194,7 +194,7 @@ struct gendisk {
66989 struct kobject *slave_dir;
66990
66991 struct timer_rand_state *random;
66992- atomic_t sync_io; /* RAID */
66993+ atomic_unchecked_t sync_io; /* RAID */
66994 struct disk_events *ev;
66995 #ifdef CONFIG_BLK_DEV_INTEGRITY
66996 struct blk_integrity *integrity;
66997diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
66998index 023bc34..b02b46a 100644
66999--- a/include/linux/genl_magic_func.h
67000+++ b/include/linux/genl_magic_func.h
67001@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
67002 },
67003
67004 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
67005-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
67006+static struct genl_ops ZZZ_genl_ops[] = {
67007 #include GENL_MAGIC_INCLUDE_FILE
67008 };
67009
67010diff --git a/include/linux/gfp.h b/include/linux/gfp.h
67011index 0f615eb..5c3832f 100644
67012--- a/include/linux/gfp.h
67013+++ b/include/linux/gfp.h
67014@@ -35,6 +35,13 @@ struct vm_area_struct;
67015 #define ___GFP_NO_KSWAPD 0x400000u
67016 #define ___GFP_OTHER_NODE 0x800000u
67017 #define ___GFP_WRITE 0x1000000u
67018+
67019+#ifdef CONFIG_PAX_USERCOPY_SLABS
67020+#define ___GFP_USERCOPY 0x2000000u
67021+#else
67022+#define ___GFP_USERCOPY 0
67023+#endif
67024+
67025 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
67026
67027 /*
67028@@ -92,6 +99,7 @@ struct vm_area_struct;
67029 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
67030 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
67031 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
67032+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
67033
67034 /*
67035 * This may seem redundant, but it's a way of annotating false positives vs.
67036@@ -99,7 +107,7 @@ struct vm_area_struct;
67037 */
67038 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
67039
67040-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
67041+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
67042 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
67043
67044 /* This equals 0, but use constants in case they ever change */
67045@@ -153,6 +161,8 @@ struct vm_area_struct;
67046 /* 4GB DMA on some platforms */
67047 #define GFP_DMA32 __GFP_DMA32
67048
67049+#define GFP_USERCOPY __GFP_USERCOPY
67050+
67051 /* Convert GFP flags to their corresponding migrate type */
67052 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
67053 {
67054diff --git a/include/linux/gracl.h b/include/linux/gracl.h
67055new file mode 100644
67056index 0000000..ebe6d72
67057--- /dev/null
67058+++ b/include/linux/gracl.h
67059@@ -0,0 +1,319 @@
67060+#ifndef GR_ACL_H
67061+#define GR_ACL_H
67062+
67063+#include <linux/grdefs.h>
67064+#include <linux/resource.h>
67065+#include <linux/capability.h>
67066+#include <linux/dcache.h>
67067+#include <asm/resource.h>
67068+
67069+/* Major status information */
67070+
67071+#define GR_VERSION "grsecurity 2.9.1"
67072+#define GRSECURITY_VERSION 0x2901
67073+
67074+enum {
67075+ GR_SHUTDOWN = 0,
67076+ GR_ENABLE = 1,
67077+ GR_SPROLE = 2,
67078+ GR_RELOAD = 3,
67079+ GR_SEGVMOD = 4,
67080+ GR_STATUS = 5,
67081+ GR_UNSPROLE = 6,
67082+ GR_PASSSET = 7,
67083+ GR_SPROLEPAM = 8,
67084+};
67085+
67086+/* Password setup definitions
67087+ * kernel/grhash.c */
67088+enum {
67089+ GR_PW_LEN = 128,
67090+ GR_SALT_LEN = 16,
67091+ GR_SHA_LEN = 32,
67092+};
67093+
67094+enum {
67095+ GR_SPROLE_LEN = 64,
67096+};
67097+
67098+enum {
67099+ GR_NO_GLOB = 0,
67100+ GR_REG_GLOB,
67101+ GR_CREATE_GLOB
67102+};
67103+
67104+#define GR_NLIMITS 32
67105+
67106+/* Begin Data Structures */
67107+
67108+struct sprole_pw {
67109+ unsigned char *rolename;
67110+ unsigned char salt[GR_SALT_LEN];
67111+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
67112+};
67113+
67114+struct name_entry {
67115+ __u32 key;
67116+ ino_t inode;
67117+ dev_t device;
67118+ char *name;
67119+ __u16 len;
67120+ __u8 deleted;
67121+ struct name_entry *prev;
67122+ struct name_entry *next;
67123+};
67124+
67125+struct inodev_entry {
67126+ struct name_entry *nentry;
67127+ struct inodev_entry *prev;
67128+ struct inodev_entry *next;
67129+};
67130+
67131+struct acl_role_db {
67132+ struct acl_role_label **r_hash;
67133+ __u32 r_size;
67134+};
67135+
67136+struct inodev_db {
67137+ struct inodev_entry **i_hash;
67138+ __u32 i_size;
67139+};
67140+
67141+struct name_db {
67142+ struct name_entry **n_hash;
67143+ __u32 n_size;
67144+};
67145+
67146+struct crash_uid {
67147+ uid_t uid;
67148+ unsigned long expires;
67149+};
67150+
67151+struct gr_hash_struct {
67152+ void **table;
67153+ void **nametable;
67154+ void *first;
67155+ __u32 table_size;
67156+ __u32 used_size;
67157+ int type;
67158+};
67159+
67160+/* Userspace Grsecurity ACL data structures */
67161+
67162+struct acl_subject_label {
67163+ char *filename;
67164+ ino_t inode;
67165+ dev_t device;
67166+ __u32 mode;
67167+ kernel_cap_t cap_mask;
67168+ kernel_cap_t cap_lower;
67169+ kernel_cap_t cap_invert_audit;
67170+
67171+ struct rlimit res[GR_NLIMITS];
67172+ __u32 resmask;
67173+
67174+ __u8 user_trans_type;
67175+ __u8 group_trans_type;
67176+ uid_t *user_transitions;
67177+ gid_t *group_transitions;
67178+ __u16 user_trans_num;
67179+ __u16 group_trans_num;
67180+
67181+ __u32 sock_families[2];
67182+ __u32 ip_proto[8];
67183+ __u32 ip_type;
67184+ struct acl_ip_label **ips;
67185+ __u32 ip_num;
67186+ __u32 inaddr_any_override;
67187+
67188+ __u32 crashes;
67189+ unsigned long expires;
67190+
67191+ struct acl_subject_label *parent_subject;
67192+ struct gr_hash_struct *hash;
67193+ struct acl_subject_label *prev;
67194+ struct acl_subject_label *next;
67195+
67196+ struct acl_object_label **obj_hash;
67197+ __u32 obj_hash_size;
67198+ __u16 pax_flags;
67199+};
67200+
67201+struct role_allowed_ip {
67202+ __u32 addr;
67203+ __u32 netmask;
67204+
67205+ struct role_allowed_ip *prev;
67206+ struct role_allowed_ip *next;
67207+};
67208+
67209+struct role_transition {
67210+ char *rolename;
67211+
67212+ struct role_transition *prev;
67213+ struct role_transition *next;
67214+};
67215+
67216+struct acl_role_label {
67217+ char *rolename;
67218+ uid_t uidgid;
67219+ __u16 roletype;
67220+
67221+ __u16 auth_attempts;
67222+ unsigned long expires;
67223+
67224+ struct acl_subject_label *root_label;
67225+ struct gr_hash_struct *hash;
67226+
67227+ struct acl_role_label *prev;
67228+ struct acl_role_label *next;
67229+
67230+ struct role_transition *transitions;
67231+ struct role_allowed_ip *allowed_ips;
67232+ uid_t *domain_children;
67233+ __u16 domain_child_num;
67234+
67235+ umode_t umask;
67236+
67237+ struct acl_subject_label **subj_hash;
67238+ __u32 subj_hash_size;
67239+};
67240+
67241+struct user_acl_role_db {
67242+ struct acl_role_label **r_table;
67243+ __u32 num_pointers; /* Number of allocations to track */
67244+ __u32 num_roles; /* Number of roles */
67245+ __u32 num_domain_children; /* Number of domain children */
67246+ __u32 num_subjects; /* Number of subjects */
67247+ __u32 num_objects; /* Number of objects */
67248+};
67249+
67250+struct acl_object_label {
67251+ char *filename;
67252+ ino_t inode;
67253+ dev_t device;
67254+ __u32 mode;
67255+
67256+ struct acl_subject_label *nested;
67257+ struct acl_object_label *globbed;
67258+
67259+ /* next two structures not used */
67260+
67261+ struct acl_object_label *prev;
67262+ struct acl_object_label *next;
67263+};
67264+
67265+struct acl_ip_label {
67266+ char *iface;
67267+ __u32 addr;
67268+ __u32 netmask;
67269+ __u16 low, high;
67270+ __u8 mode;
67271+ __u32 type;
67272+ __u32 proto[8];
67273+
67274+ /* next two structures not used */
67275+
67276+ struct acl_ip_label *prev;
67277+ struct acl_ip_label *next;
67278+};
67279+
67280+struct gr_arg {
67281+ struct user_acl_role_db role_db;
67282+ unsigned char pw[GR_PW_LEN];
67283+ unsigned char salt[GR_SALT_LEN];
67284+ unsigned char sum[GR_SHA_LEN];
67285+ unsigned char sp_role[GR_SPROLE_LEN];
67286+ struct sprole_pw *sprole_pws;
67287+ dev_t segv_device;
67288+ ino_t segv_inode;
67289+ uid_t segv_uid;
67290+ __u16 num_sprole_pws;
67291+ __u16 mode;
67292+};
67293+
67294+struct gr_arg_wrapper {
67295+ struct gr_arg *arg;
67296+ __u32 version;
67297+ __u32 size;
67298+};
67299+
67300+struct subject_map {
67301+ struct acl_subject_label *user;
67302+ struct acl_subject_label *kernel;
67303+ struct subject_map *prev;
67304+ struct subject_map *next;
67305+};
67306+
67307+struct acl_subj_map_db {
67308+ struct subject_map **s_hash;
67309+ __u32 s_size;
67310+};
67311+
67312+/* End Data Structures Section */
67313+
67314+/* Hash functions generated by empirical testing by Brad Spengler
67315+ Makes good use of the low bits of the inode. Generally 0-1 times
67316+ in loop for successful match. 0-3 for unsuccessful match.
67317+ Shift/add algorithm with modulus of table size and an XOR*/
67318+
67319+static __inline__ unsigned int
67320+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
67321+{
67322+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
67323+}
67324+
67325+ static __inline__ unsigned int
67326+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
67327+{
67328+ return ((const unsigned long)userp % sz);
67329+}
67330+
67331+static __inline__ unsigned int
67332+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
67333+{
67334+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
67335+}
67336+
67337+static __inline__ unsigned int
67338+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
67339+{
67340+ return full_name_hash((const unsigned char *)name, len) % sz;
67341+}
67342+
67343+#define FOR_EACH_ROLE_START(role) \
67344+ role = role_list; \
67345+ while (role) {
67346+
67347+#define FOR_EACH_ROLE_END(role) \
67348+ role = role->prev; \
67349+ }
67350+
67351+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
67352+ subj = NULL; \
67353+ iter = 0; \
67354+ while (iter < role->subj_hash_size) { \
67355+ if (subj == NULL) \
67356+ subj = role->subj_hash[iter]; \
67357+ if (subj == NULL) { \
67358+ iter++; \
67359+ continue; \
67360+ }
67361+
67362+#define FOR_EACH_SUBJECT_END(subj,iter) \
67363+ subj = subj->next; \
67364+ if (subj == NULL) \
67365+ iter++; \
67366+ }
67367+
67368+
67369+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
67370+ subj = role->hash->first; \
67371+ while (subj != NULL) {
67372+
67373+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
67374+ subj = subj->next; \
67375+ }
67376+
67377+#endif
67378+
67379diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
67380new file mode 100644
67381index 0000000..323ecf2
67382--- /dev/null
67383+++ b/include/linux/gralloc.h
67384@@ -0,0 +1,9 @@
67385+#ifndef __GRALLOC_H
67386+#define __GRALLOC_H
67387+
67388+void acl_free_all(void);
67389+int acl_alloc_stack_init(unsigned long size);
67390+void *acl_alloc(unsigned long len);
67391+void *acl_alloc_num(unsigned long num, unsigned long len);
67392+
67393+#endif
67394diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
67395new file mode 100644
67396index 0000000..be66033
67397--- /dev/null
67398+++ b/include/linux/grdefs.h
67399@@ -0,0 +1,140 @@
67400+#ifndef GRDEFS_H
67401+#define GRDEFS_H
67402+
67403+/* Begin grsecurity status declarations */
67404+
67405+enum {
67406+ GR_READY = 0x01,
67407+ GR_STATUS_INIT = 0x00 // disabled state
67408+};
67409+
67410+/* Begin ACL declarations */
67411+
67412+/* Role flags */
67413+
67414+enum {
67415+ GR_ROLE_USER = 0x0001,
67416+ GR_ROLE_GROUP = 0x0002,
67417+ GR_ROLE_DEFAULT = 0x0004,
67418+ GR_ROLE_SPECIAL = 0x0008,
67419+ GR_ROLE_AUTH = 0x0010,
67420+ GR_ROLE_NOPW = 0x0020,
67421+ GR_ROLE_GOD = 0x0040,
67422+ GR_ROLE_LEARN = 0x0080,
67423+ GR_ROLE_TPE = 0x0100,
67424+ GR_ROLE_DOMAIN = 0x0200,
67425+ GR_ROLE_PAM = 0x0400,
67426+ GR_ROLE_PERSIST = 0x0800
67427+};
67428+
67429+/* ACL Subject and Object mode flags */
67430+enum {
67431+ GR_DELETED = 0x80000000
67432+};
67433+
67434+/* ACL Object-only mode flags */
67435+enum {
67436+ GR_READ = 0x00000001,
67437+ GR_APPEND = 0x00000002,
67438+ GR_WRITE = 0x00000004,
67439+ GR_EXEC = 0x00000008,
67440+ GR_FIND = 0x00000010,
67441+ GR_INHERIT = 0x00000020,
67442+ GR_SETID = 0x00000040,
67443+ GR_CREATE = 0x00000080,
67444+ GR_DELETE = 0x00000100,
67445+ GR_LINK = 0x00000200,
67446+ GR_AUDIT_READ = 0x00000400,
67447+ GR_AUDIT_APPEND = 0x00000800,
67448+ GR_AUDIT_WRITE = 0x00001000,
67449+ GR_AUDIT_EXEC = 0x00002000,
67450+ GR_AUDIT_FIND = 0x00004000,
67451+ GR_AUDIT_INHERIT= 0x00008000,
67452+ GR_AUDIT_SETID = 0x00010000,
67453+ GR_AUDIT_CREATE = 0x00020000,
67454+ GR_AUDIT_DELETE = 0x00040000,
67455+ GR_AUDIT_LINK = 0x00080000,
67456+ GR_PTRACERD = 0x00100000,
67457+ GR_NOPTRACE = 0x00200000,
67458+ GR_SUPPRESS = 0x00400000,
67459+ GR_NOLEARN = 0x00800000,
67460+ GR_INIT_TRANSFER= 0x01000000
67461+};
67462+
67463+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
67464+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
67465+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
67466+
67467+/* ACL subject-only mode flags */
67468+enum {
67469+ GR_KILL = 0x00000001,
67470+ GR_VIEW = 0x00000002,
67471+ GR_PROTECTED = 0x00000004,
67472+ GR_LEARN = 0x00000008,
67473+ GR_OVERRIDE = 0x00000010,
67474+ /* just a placeholder, this mode is only used in userspace */
67475+ GR_DUMMY = 0x00000020,
67476+ GR_PROTSHM = 0x00000040,
67477+ GR_KILLPROC = 0x00000080,
67478+ GR_KILLIPPROC = 0x00000100,
67479+ /* just a placeholder, this mode is only used in userspace */
67480+ GR_NOTROJAN = 0x00000200,
67481+ GR_PROTPROCFD = 0x00000400,
67482+ GR_PROCACCT = 0x00000800,
67483+ GR_RELAXPTRACE = 0x00001000,
67484+ //GR_NESTED = 0x00002000,
67485+ GR_INHERITLEARN = 0x00004000,
67486+ GR_PROCFIND = 0x00008000,
67487+ GR_POVERRIDE = 0x00010000,
67488+ GR_KERNELAUTH = 0x00020000,
67489+ GR_ATSECURE = 0x00040000,
67490+ GR_SHMEXEC = 0x00080000
67491+};
67492+
67493+enum {
67494+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
67495+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
67496+ GR_PAX_ENABLE_MPROTECT = 0x0004,
67497+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
67498+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
67499+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
67500+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
67501+ GR_PAX_DISABLE_MPROTECT = 0x0400,
67502+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
67503+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
67504+};
67505+
67506+enum {
67507+ GR_ID_USER = 0x01,
67508+ GR_ID_GROUP = 0x02,
67509+};
67510+
67511+enum {
67512+ GR_ID_ALLOW = 0x01,
67513+ GR_ID_DENY = 0x02,
67514+};
67515+
67516+#define GR_CRASH_RES 31
67517+#define GR_UIDTABLE_MAX 500
67518+
67519+/* begin resource learning section */
67520+enum {
67521+ GR_RLIM_CPU_BUMP = 60,
67522+ GR_RLIM_FSIZE_BUMP = 50000,
67523+ GR_RLIM_DATA_BUMP = 10000,
67524+ GR_RLIM_STACK_BUMP = 1000,
67525+ GR_RLIM_CORE_BUMP = 10000,
67526+ GR_RLIM_RSS_BUMP = 500000,
67527+ GR_RLIM_NPROC_BUMP = 1,
67528+ GR_RLIM_NOFILE_BUMP = 5,
67529+ GR_RLIM_MEMLOCK_BUMP = 50000,
67530+ GR_RLIM_AS_BUMP = 500000,
67531+ GR_RLIM_LOCKS_BUMP = 2,
67532+ GR_RLIM_SIGPENDING_BUMP = 5,
67533+ GR_RLIM_MSGQUEUE_BUMP = 10000,
67534+ GR_RLIM_NICE_BUMP = 1,
67535+ GR_RLIM_RTPRIO_BUMP = 1,
67536+ GR_RLIM_RTTIME_BUMP = 1000000
67537+};
67538+
67539+#endif
67540diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
67541new file mode 100644
67542index 0000000..9bb6662
67543--- /dev/null
67544+++ b/include/linux/grinternal.h
67545@@ -0,0 +1,215 @@
67546+#ifndef __GRINTERNAL_H
67547+#define __GRINTERNAL_H
67548+
67549+#ifdef CONFIG_GRKERNSEC
67550+
67551+#include <linux/fs.h>
67552+#include <linux/mnt_namespace.h>
67553+#include <linux/nsproxy.h>
67554+#include <linux/gracl.h>
67555+#include <linux/grdefs.h>
67556+#include <linux/grmsg.h>
67557+
67558+void gr_add_learn_entry(const char *fmt, ...)
67559+ __attribute__ ((format (printf, 1, 2)));
67560+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
67561+ const struct vfsmount *mnt);
67562+__u32 gr_check_create(const struct dentry *new_dentry,
67563+ const struct dentry *parent,
67564+ const struct vfsmount *mnt, const __u32 mode);
67565+int gr_check_protected_task(const struct task_struct *task);
67566+__u32 to_gr_audit(const __u32 reqmode);
67567+int gr_set_acls(const int type);
67568+int gr_apply_subject_to_task(struct task_struct *task);
67569+int gr_acl_is_enabled(void);
67570+char gr_roletype_to_char(void);
67571+
67572+void gr_handle_alertkill(struct task_struct *task);
67573+char *gr_to_filename(const struct dentry *dentry,
67574+ const struct vfsmount *mnt);
67575+char *gr_to_filename1(const struct dentry *dentry,
67576+ const struct vfsmount *mnt);
67577+char *gr_to_filename2(const struct dentry *dentry,
67578+ const struct vfsmount *mnt);
67579+char *gr_to_filename3(const struct dentry *dentry,
67580+ const struct vfsmount *mnt);
67581+
67582+extern int grsec_enable_ptrace_readexec;
67583+extern int grsec_enable_harden_ptrace;
67584+extern int grsec_enable_link;
67585+extern int grsec_enable_fifo;
67586+extern int grsec_enable_execve;
67587+extern int grsec_enable_shm;
67588+extern int grsec_enable_execlog;
67589+extern int grsec_enable_signal;
67590+extern int grsec_enable_audit_ptrace;
67591+extern int grsec_enable_forkfail;
67592+extern int grsec_enable_time;
67593+extern int grsec_enable_rofs;
67594+extern int grsec_enable_chroot_shmat;
67595+extern int grsec_enable_chroot_mount;
67596+extern int grsec_enable_chroot_double;
67597+extern int grsec_enable_chroot_pivot;
67598+extern int grsec_enable_chroot_chdir;
67599+extern int grsec_enable_chroot_chmod;
67600+extern int grsec_enable_chroot_mknod;
67601+extern int grsec_enable_chroot_fchdir;
67602+extern int grsec_enable_chroot_nice;
67603+extern int grsec_enable_chroot_execlog;
67604+extern int grsec_enable_chroot_caps;
67605+extern int grsec_enable_chroot_sysctl;
67606+extern int grsec_enable_chroot_unix;
67607+extern int grsec_enable_symlinkown;
67608+extern kgid_t grsec_symlinkown_gid;
67609+extern int grsec_enable_tpe;
67610+extern kgid_t grsec_tpe_gid;
67611+extern int grsec_enable_tpe_all;
67612+extern int grsec_enable_tpe_invert;
67613+extern int grsec_enable_socket_all;
67614+extern kgid_t grsec_socket_all_gid;
67615+extern int grsec_enable_socket_client;
67616+extern kgid_t grsec_socket_client_gid;
67617+extern int grsec_enable_socket_server;
67618+extern kgid_t grsec_socket_server_gid;
67619+extern kgid_t grsec_audit_gid;
67620+extern int grsec_enable_group;
67621+extern int grsec_enable_audit_textrel;
67622+extern int grsec_enable_log_rwxmaps;
67623+extern int grsec_enable_mount;
67624+extern int grsec_enable_chdir;
67625+extern int grsec_resource_logging;
67626+extern int grsec_enable_blackhole;
67627+extern int grsec_lastack_retries;
67628+extern int grsec_enable_brute;
67629+extern int grsec_lock;
67630+
67631+extern spinlock_t grsec_alert_lock;
67632+extern unsigned long grsec_alert_wtime;
67633+extern unsigned long grsec_alert_fyet;
67634+
67635+extern spinlock_t grsec_audit_lock;
67636+
67637+extern rwlock_t grsec_exec_file_lock;
67638+
67639+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
67640+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
67641+ (tsk)->exec_file->f_vfsmnt) : "/")
67642+
67643+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
67644+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
67645+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67646+
67647+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
67648+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
67649+ (tsk)->exec_file->f_vfsmnt) : "/")
67650+
67651+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
67652+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
67653+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67654+
67655+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
67656+
67657+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
67658+
67659+#define GR_CHROOT_CAPS {{ \
67660+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
67661+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
67662+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
67663+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
67664+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
67665+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
67666+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
67667+
67668+#define security_learn(normal_msg,args...) \
67669+({ \
67670+ read_lock(&grsec_exec_file_lock); \
67671+ gr_add_learn_entry(normal_msg "\n", ## args); \
67672+ read_unlock(&grsec_exec_file_lock); \
67673+})
67674+
67675+enum {
67676+ GR_DO_AUDIT,
67677+ GR_DONT_AUDIT,
67678+ /* used for non-audit messages that we shouldn't kill the task on */
67679+ GR_DONT_AUDIT_GOOD
67680+};
67681+
67682+enum {
67683+ GR_TTYSNIFF,
67684+ GR_RBAC,
67685+ GR_RBAC_STR,
67686+ GR_STR_RBAC,
67687+ GR_RBAC_MODE2,
67688+ GR_RBAC_MODE3,
67689+ GR_FILENAME,
67690+ GR_SYSCTL_HIDDEN,
67691+ GR_NOARGS,
67692+ GR_ONE_INT,
67693+ GR_ONE_INT_TWO_STR,
67694+ GR_ONE_STR,
67695+ GR_STR_INT,
67696+ GR_TWO_STR_INT,
67697+ GR_TWO_INT,
67698+ GR_TWO_U64,
67699+ GR_THREE_INT,
67700+ GR_FIVE_INT_TWO_STR,
67701+ GR_TWO_STR,
67702+ GR_THREE_STR,
67703+ GR_FOUR_STR,
67704+ GR_STR_FILENAME,
67705+ GR_FILENAME_STR,
67706+ GR_FILENAME_TWO_INT,
67707+ GR_FILENAME_TWO_INT_STR,
67708+ GR_TEXTREL,
67709+ GR_PTRACE,
67710+ GR_RESOURCE,
67711+ GR_CAP,
67712+ GR_SIG,
67713+ GR_SIG2,
67714+ GR_CRASH1,
67715+ GR_CRASH2,
67716+ GR_PSACCT,
67717+ GR_RWXMAP
67718+};
67719+
67720+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67721+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67722+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67723+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67724+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67725+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67726+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67727+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67728+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67729+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67730+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67731+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67732+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67733+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67734+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67735+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67736+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67737+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67738+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67739+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67740+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67741+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67742+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67743+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67744+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67745+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67746+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67747+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67748+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67749+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67750+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67751+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67752+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67753+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67754+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67755+
67756+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67757+
67758+#endif
67759+
67760+#endif
67761diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67762new file mode 100644
67763index 0000000..2bd4c8d
67764--- /dev/null
67765+++ b/include/linux/grmsg.h
67766@@ -0,0 +1,111 @@
67767+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67768+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67769+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67770+#define GR_STOPMOD_MSG "denied modification of module state by "
67771+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67772+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67773+#define GR_IOPERM_MSG "denied use of ioperm() by "
67774+#define GR_IOPL_MSG "denied use of iopl() by "
67775+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67776+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67777+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67778+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67779+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67780+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67781+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67782+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67783+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67784+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67785+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67786+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67787+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67788+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67789+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67790+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67791+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67792+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67793+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67794+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67795+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67796+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67797+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67798+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67799+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67800+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67801+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
67802+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67803+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67804+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67805+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67806+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67807+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67808+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67809+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67810+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
67811+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
67812+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
67813+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
67814+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
67815+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
67816+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
67817+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
67818+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
67819+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
67820+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
67821+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
67822+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
67823+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
67824+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
67825+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
67826+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
67827+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
67828+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
67829+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
67830+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
67831+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
67832+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
67833+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
67834+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
67835+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
67836+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
67837+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
67838+#define GR_FAILFORK_MSG "failed fork with errno %s by "
67839+#define GR_NICE_CHROOT_MSG "denied priority change by "
67840+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
67841+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
67842+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
67843+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
67844+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
67845+#define GR_TIME_MSG "time set by "
67846+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
67847+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
67848+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
67849+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
67850+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
67851+#define GR_BIND_MSG "denied bind() by "
67852+#define GR_CONNECT_MSG "denied connect() by "
67853+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
67854+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
67855+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
67856+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
67857+#define GR_CAP_ACL_MSG "use of %s denied for "
67858+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
67859+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
67860+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
67861+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
67862+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
67863+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
67864+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
67865+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
67866+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
67867+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
67868+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
67869+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
67870+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
67871+#define GR_VM86_MSG "denied use of vm86 by "
67872+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
67873+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
67874+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
67875+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
67876+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
67877+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
67878diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
67879new file mode 100644
67880index 0000000..1ae241a
67881--- /dev/null
67882+++ b/include/linux/grsecurity.h
67883@@ -0,0 +1,257 @@
67884+#ifndef GR_SECURITY_H
67885+#define GR_SECURITY_H
67886+#include <linux/fs.h>
67887+#include <linux/fs_struct.h>
67888+#include <linux/binfmts.h>
67889+#include <linux/gracl.h>
67890+
67891+/* notify of brain-dead configs */
67892+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67893+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
67894+#endif
67895+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
67896+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
67897+#endif
67898+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
67899+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
67900+#endif
67901+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
67902+#error "CONFIG_PAX enabled, but no PaX options are enabled."
67903+#endif
67904+
67905+#include <linux/compat.h>
67906+
67907+struct user_arg_ptr {
67908+#ifdef CONFIG_COMPAT
67909+ bool is_compat;
67910+#endif
67911+ union {
67912+ const char __user *const __user *native;
67913+#ifdef CONFIG_COMPAT
67914+ const compat_uptr_t __user *compat;
67915+#endif
67916+ } ptr;
67917+};
67918+
67919+void gr_handle_brute_attach(unsigned long mm_flags);
67920+void gr_handle_brute_check(void);
67921+void gr_handle_kernel_exploit(void);
67922+int gr_process_user_ban(void);
67923+
67924+char gr_roletype_to_char(void);
67925+
67926+int gr_acl_enable_at_secure(void);
67927+
67928+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
67929+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
67930+
67931+void gr_del_task_from_ip_table(struct task_struct *p);
67932+
67933+int gr_pid_is_chrooted(struct task_struct *p);
67934+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
67935+int gr_handle_chroot_nice(void);
67936+int gr_handle_chroot_sysctl(const int op);
67937+int gr_handle_chroot_setpriority(struct task_struct *p,
67938+ const int niceval);
67939+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
67940+int gr_handle_chroot_chroot(const struct dentry *dentry,
67941+ const struct vfsmount *mnt);
67942+void gr_handle_chroot_chdir(struct path *path);
67943+int gr_handle_chroot_chmod(const struct dentry *dentry,
67944+ const struct vfsmount *mnt, const int mode);
67945+int gr_handle_chroot_mknod(const struct dentry *dentry,
67946+ const struct vfsmount *mnt, const int mode);
67947+int gr_handle_chroot_mount(const struct dentry *dentry,
67948+ const struct vfsmount *mnt,
67949+ const char *dev_name);
67950+int gr_handle_chroot_pivot(void);
67951+int gr_handle_chroot_unix(const pid_t pid);
67952+
67953+int gr_handle_rawio(const struct inode *inode);
67954+
67955+void gr_handle_ioperm(void);
67956+void gr_handle_iopl(void);
67957+
67958+umode_t gr_acl_umask(void);
67959+
67960+int gr_tpe_allow(const struct file *file);
67961+
67962+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
67963+void gr_clear_chroot_entries(struct task_struct *task);
67964+
67965+void gr_log_forkfail(const int retval);
67966+void gr_log_timechange(void);
67967+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
67968+void gr_log_chdir(const struct dentry *dentry,
67969+ const struct vfsmount *mnt);
67970+void gr_log_chroot_exec(const struct dentry *dentry,
67971+ const struct vfsmount *mnt);
67972+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
67973+void gr_log_remount(const char *devname, const int retval);
67974+void gr_log_unmount(const char *devname, const int retval);
67975+void gr_log_mount(const char *from, const char *to, const int retval);
67976+void gr_log_textrel(struct vm_area_struct *vma);
67977+void gr_log_rwxmmap(struct file *file);
67978+void gr_log_rwxmprotect(struct file *file);
67979+
67980+int gr_handle_follow_link(const struct inode *parent,
67981+ const struct inode *inode,
67982+ const struct dentry *dentry,
67983+ const struct vfsmount *mnt);
67984+int gr_handle_fifo(const struct dentry *dentry,
67985+ const struct vfsmount *mnt,
67986+ const struct dentry *dir, const int flag,
67987+ const int acc_mode);
67988+int gr_handle_hardlink(const struct dentry *dentry,
67989+ const struct vfsmount *mnt,
67990+ struct inode *inode,
67991+ const int mode, const struct filename *to);
67992+
67993+int gr_is_capable(const int cap);
67994+int gr_is_capable_nolog(const int cap);
67995+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
67996+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
67997+
67998+void gr_copy_label(struct task_struct *tsk);
67999+void gr_handle_crash(struct task_struct *task, const int sig);
68000+int gr_handle_signal(const struct task_struct *p, const int sig);
68001+int gr_check_crash_uid(const kuid_t uid);
68002+int gr_check_protected_task(const struct task_struct *task);
68003+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
68004+int gr_acl_handle_mmap(const struct file *file,
68005+ const unsigned long prot);
68006+int gr_acl_handle_mprotect(const struct file *file,
68007+ const unsigned long prot);
68008+int gr_check_hidden_task(const struct task_struct *tsk);
68009+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
68010+ const struct vfsmount *mnt);
68011+__u32 gr_acl_handle_utime(const struct dentry *dentry,
68012+ const struct vfsmount *mnt);
68013+__u32 gr_acl_handle_access(const struct dentry *dentry,
68014+ const struct vfsmount *mnt, const int fmode);
68015+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
68016+ const struct vfsmount *mnt, umode_t *mode);
68017+__u32 gr_acl_handle_chown(const struct dentry *dentry,
68018+ const struct vfsmount *mnt);
68019+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
68020+ const struct vfsmount *mnt);
68021+int gr_handle_ptrace(struct task_struct *task, const long request);
68022+int gr_handle_proc_ptrace(struct task_struct *task);
68023+__u32 gr_acl_handle_execve(const struct dentry *dentry,
68024+ const struct vfsmount *mnt);
68025+int gr_check_crash_exec(const struct file *filp);
68026+int gr_acl_is_enabled(void);
68027+void gr_set_kernel_label(struct task_struct *task);
68028+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
68029+ const kgid_t gid);
68030+int gr_set_proc_label(const struct dentry *dentry,
68031+ const struct vfsmount *mnt,
68032+ const int unsafe_flags);
68033+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
68034+ const struct vfsmount *mnt);
68035+__u32 gr_acl_handle_open(const struct dentry *dentry,
68036+ const struct vfsmount *mnt, int acc_mode);
68037+__u32 gr_acl_handle_creat(const struct dentry *dentry,
68038+ const struct dentry *p_dentry,
68039+ const struct vfsmount *p_mnt,
68040+ int open_flags, int acc_mode, const int imode);
68041+void gr_handle_create(const struct dentry *dentry,
68042+ const struct vfsmount *mnt);
68043+void gr_handle_proc_create(const struct dentry *dentry,
68044+ const struct inode *inode);
68045+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
68046+ const struct dentry *parent_dentry,
68047+ const struct vfsmount *parent_mnt,
68048+ const int mode);
68049+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
68050+ const struct dentry *parent_dentry,
68051+ const struct vfsmount *parent_mnt);
68052+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
68053+ const struct vfsmount *mnt);
68054+void gr_handle_delete(const ino_t ino, const dev_t dev);
68055+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
68056+ const struct vfsmount *mnt);
68057+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
68058+ const struct dentry *parent_dentry,
68059+ const struct vfsmount *parent_mnt,
68060+ const struct filename *from);
68061+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
68062+ const struct dentry *parent_dentry,
68063+ const struct vfsmount *parent_mnt,
68064+ const struct dentry *old_dentry,
68065+ const struct vfsmount *old_mnt, const struct filename *to);
68066+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
68067+int gr_acl_handle_rename(struct dentry *new_dentry,
68068+ struct dentry *parent_dentry,
68069+ const struct vfsmount *parent_mnt,
68070+ struct dentry *old_dentry,
68071+ struct inode *old_parent_inode,
68072+ struct vfsmount *old_mnt, const struct filename *newname);
68073+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68074+ struct dentry *old_dentry,
68075+ struct dentry *new_dentry,
68076+ struct vfsmount *mnt, const __u8 replace);
68077+__u32 gr_check_link(const struct dentry *new_dentry,
68078+ const struct dentry *parent_dentry,
68079+ const struct vfsmount *parent_mnt,
68080+ const struct dentry *old_dentry,
68081+ const struct vfsmount *old_mnt);
68082+int gr_acl_handle_filldir(const struct file *file, const char *name,
68083+ const unsigned int namelen, const ino_t ino);
68084+
68085+__u32 gr_acl_handle_unix(const struct dentry *dentry,
68086+ const struct vfsmount *mnt);
68087+void gr_acl_handle_exit(void);
68088+void gr_acl_handle_psacct(struct task_struct *task, const long code);
68089+int gr_acl_handle_procpidmem(const struct task_struct *task);
68090+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
68091+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
68092+void gr_audit_ptrace(struct task_struct *task);
68093+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
68094+void gr_put_exec_file(struct task_struct *task);
68095+
68096+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
68097+
68098+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
68099+extern void gr_learn_resource(const struct task_struct *task, const int res,
68100+ const unsigned long wanted, const int gt);
68101+#else
68102+static inline void gr_learn_resource(const struct task_struct *task, const int res,
68103+ const unsigned long wanted, const int gt)
68104+{
68105+}
68106+#endif
68107+
68108+#ifdef CONFIG_GRKERNSEC_RESLOG
68109+extern void gr_log_resource(const struct task_struct *task, const int res,
68110+ const unsigned long wanted, const int gt);
68111+#else
68112+static inline void gr_log_resource(const struct task_struct *task, const int res,
68113+ const unsigned long wanted, const int gt)
68114+{
68115+}
68116+#endif
68117+
68118+#ifdef CONFIG_GRKERNSEC
68119+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
68120+void gr_handle_vm86(void);
68121+void gr_handle_mem_readwrite(u64 from, u64 to);
68122+
68123+void gr_log_badprocpid(const char *entry);
68124+
68125+extern int grsec_enable_dmesg;
68126+extern int grsec_disable_privio;
68127+
68128+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68129+extern kgid_t grsec_proc_gid;
68130+#endif
68131+
68132+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68133+extern int grsec_enable_chroot_findtask;
68134+#endif
68135+#ifdef CONFIG_GRKERNSEC_SETXID
68136+extern int grsec_enable_setxid;
68137+#endif
68138+#endif
68139+
68140+#endif
68141diff --git a/include/linux/grsock.h b/include/linux/grsock.h
68142new file mode 100644
68143index 0000000..e7ffaaf
68144--- /dev/null
68145+++ b/include/linux/grsock.h
68146@@ -0,0 +1,19 @@
68147+#ifndef __GRSOCK_H
68148+#define __GRSOCK_H
68149+
68150+extern void gr_attach_curr_ip(const struct sock *sk);
68151+extern int gr_handle_sock_all(const int family, const int type,
68152+ const int protocol);
68153+extern int gr_handle_sock_server(const struct sockaddr *sck);
68154+extern int gr_handle_sock_server_other(const struct sock *sck);
68155+extern int gr_handle_sock_client(const struct sockaddr *sck);
68156+extern int gr_search_connect(struct socket * sock,
68157+ struct sockaddr_in * addr);
68158+extern int gr_search_bind(struct socket * sock,
68159+ struct sockaddr_in * addr);
68160+extern int gr_search_listen(struct socket * sock);
68161+extern int gr_search_accept(struct socket * sock);
68162+extern int gr_search_socket(const int domain, const int type,
68163+ const int protocol);
68164+
68165+#endif
68166diff --git a/include/linux/highmem.h b/include/linux/highmem.h
68167index ef788b5..ac41b7b 100644
68168--- a/include/linux/highmem.h
68169+++ b/include/linux/highmem.h
68170@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
68171 kunmap_atomic(kaddr);
68172 }
68173
68174+static inline void sanitize_highpage(struct page *page)
68175+{
68176+ void *kaddr;
68177+ unsigned long flags;
68178+
68179+ local_irq_save(flags);
68180+ kaddr = kmap_atomic(page);
68181+ clear_page(kaddr);
68182+ kunmap_atomic(kaddr);
68183+ local_irq_restore(flags);
68184+}
68185+
68186 static inline void zero_user_segments(struct page *page,
68187 unsigned start1, unsigned end1,
68188 unsigned start2, unsigned end2)
68189diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
68190index 1c7b89a..7f52502 100644
68191--- a/include/linux/hwmon-sysfs.h
68192+++ b/include/linux/hwmon-sysfs.h
68193@@ -25,7 +25,8 @@
68194 struct sensor_device_attribute{
68195 struct device_attribute dev_attr;
68196 int index;
68197-};
68198+} __do_const;
68199+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
68200 #define to_sensor_dev_attr(_dev_attr) \
68201 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
68202
68203@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
68204 struct device_attribute dev_attr;
68205 u8 index;
68206 u8 nr;
68207-};
68208+} __do_const;
68209 #define to_sensor_dev_attr_2(_dev_attr) \
68210 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
68211
68212diff --git a/include/linux/i2c.h b/include/linux/i2c.h
68213index d0c4db7..61b3577 100644
68214--- a/include/linux/i2c.h
68215+++ b/include/linux/i2c.h
68216@@ -369,6 +369,7 @@ struct i2c_algorithm {
68217 /* To determine what the adapter supports */
68218 u32 (*functionality) (struct i2c_adapter *);
68219 };
68220+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
68221
68222 /*
68223 * i2c_adapter is the structure used to identify a physical i2c bus along
68224diff --git a/include/linux/i2o.h b/include/linux/i2o.h
68225index d23c3c2..eb63c81 100644
68226--- a/include/linux/i2o.h
68227+++ b/include/linux/i2o.h
68228@@ -565,7 +565,7 @@ struct i2o_controller {
68229 struct i2o_device *exec; /* Executive */
68230 #if BITS_PER_LONG == 64
68231 spinlock_t context_list_lock; /* lock for context_list */
68232- atomic_t context_list_counter; /* needed for unique contexts */
68233+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
68234 struct list_head context_list; /* list of context id's
68235 and pointers */
68236 #endif
68237diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
68238index aff7ad8..3942bbd 100644
68239--- a/include/linux/if_pppox.h
68240+++ b/include/linux/if_pppox.h
68241@@ -76,7 +76,7 @@ struct pppox_proto {
68242 int (*ioctl)(struct socket *sock, unsigned int cmd,
68243 unsigned long arg);
68244 struct module *owner;
68245-};
68246+} __do_const;
68247
68248 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
68249 extern void unregister_pppox_proto(int proto_num);
68250diff --git a/include/linux/init.h b/include/linux/init.h
68251index 10ed4f4..8e8490d 100644
68252--- a/include/linux/init.h
68253+++ b/include/linux/init.h
68254@@ -39,9 +39,36 @@
68255 * Also note, that this data cannot be "const".
68256 */
68257
68258+#ifdef MODULE
68259+#define add_init_latent_entropy
68260+#define add_devinit_latent_entropy
68261+#define add_cpuinit_latent_entropy
68262+#define add_meminit_latent_entropy
68263+#else
68264+#define add_init_latent_entropy __latent_entropy
68265+
68266+#ifdef CONFIG_HOTPLUG
68267+#define add_devinit_latent_entropy
68268+#else
68269+#define add_devinit_latent_entropy __latent_entropy
68270+#endif
68271+
68272+#ifdef CONFIG_HOTPLUG_CPU
68273+#define add_cpuinit_latent_entropy
68274+#else
68275+#define add_cpuinit_latent_entropy __latent_entropy
68276+#endif
68277+
68278+#ifdef CONFIG_MEMORY_HOTPLUG
68279+#define add_meminit_latent_entropy
68280+#else
68281+#define add_meminit_latent_entropy __latent_entropy
68282+#endif
68283+#endif
68284+
68285 /* These are for everybody (although not all archs will actually
68286 discard it in modules) */
68287-#define __init __section(.init.text) __cold notrace
68288+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
68289 #define __initdata __section(.init.data)
68290 #define __initconst __constsection(.init.rodata)
68291 #define __exitdata __section(.exit.data)
68292@@ -94,7 +121,7 @@
68293 #define __exit __section(.exit.text) __exitused __cold notrace
68294
68295 /* Used for HOTPLUG_CPU */
68296-#define __cpuinit __section(.cpuinit.text) __cold notrace
68297+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
68298 #define __cpuinitdata __section(.cpuinit.data)
68299 #define __cpuinitconst __constsection(.cpuinit.rodata)
68300 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
68301@@ -102,7 +129,7 @@
68302 #define __cpuexitconst __constsection(.cpuexit.rodata)
68303
68304 /* Used for MEMORY_HOTPLUG */
68305-#define __meminit __section(.meminit.text) __cold notrace
68306+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
68307 #define __meminitdata __section(.meminit.data)
68308 #define __meminitconst __constsection(.meminit.rodata)
68309 #define __memexit __section(.memexit.text) __exitused __cold notrace
68310diff --git a/include/linux/init_task.h b/include/linux/init_task.h
68311index 6d087c5..401cab8 100644
68312--- a/include/linux/init_task.h
68313+++ b/include/linux/init_task.h
68314@@ -143,6 +143,12 @@ extern struct task_group root_task_group;
68315
68316 #define INIT_TASK_COMM "swapper"
68317
68318+#ifdef CONFIG_X86
68319+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
68320+#else
68321+#define INIT_TASK_THREAD_INFO
68322+#endif
68323+
68324 /*
68325 * INIT_TASK is used to set up the first task table, touch at
68326 * your own risk!. Base=0, limit=0x1fffff (=2MB)
68327@@ -182,6 +188,7 @@ extern struct task_group root_task_group;
68328 RCU_POINTER_INITIALIZER(cred, &init_cred), \
68329 .comm = INIT_TASK_COMM, \
68330 .thread = INIT_THREAD, \
68331+ INIT_TASK_THREAD_INFO \
68332 .fs = &init_fs, \
68333 .files = &init_files, \
68334 .signal = &init_signals, \
68335diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
68336index 5fa5afe..ac55b25 100644
68337--- a/include/linux/interrupt.h
68338+++ b/include/linux/interrupt.h
68339@@ -430,7 +430,7 @@ enum
68340 /* map softirq index to softirq name. update 'softirq_to_name' in
68341 * kernel/softirq.c when adding a new softirq.
68342 */
68343-extern char *softirq_to_name[NR_SOFTIRQS];
68344+extern const char * const softirq_to_name[NR_SOFTIRQS];
68345
68346 /* softirq mask and active fields moved to irq_cpustat_t in
68347 * asm/hardirq.h to get better cache usage. KAO
68348@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
68349
68350 struct softirq_action
68351 {
68352- void (*action)(struct softirq_action *);
68353-};
68354+ void (*action)(void);
68355+} __no_const;
68356
68357 asmlinkage void do_softirq(void);
68358 asmlinkage void __do_softirq(void);
68359-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
68360+extern void open_softirq(int nr, void (*action)(void));
68361 extern void softirq_init(void);
68362 extern void __raise_softirq_irqoff(unsigned int nr);
68363
68364diff --git a/include/linux/iommu.h b/include/linux/iommu.h
68365index f3b99e1..9b73cee 100644
68366--- a/include/linux/iommu.h
68367+++ b/include/linux/iommu.h
68368@@ -101,7 +101,7 @@ struct iommu_ops {
68369 int (*domain_set_attr)(struct iommu_domain *domain,
68370 enum iommu_attr attr, void *data);
68371 unsigned long pgsize_bitmap;
68372-};
68373+} __do_const;
68374
68375 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
68376 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
68377diff --git a/include/linux/irq.h b/include/linux/irq.h
68378index fdf2c4a..5332486 100644
68379--- a/include/linux/irq.h
68380+++ b/include/linux/irq.h
68381@@ -328,7 +328,8 @@ struct irq_chip {
68382 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
68383
68384 unsigned long flags;
68385-};
68386+} __do_const;
68387+typedef struct irq_chip __no_const irq_chip_no_const;
68388
68389 /*
68390 * irq_chip specific flags
68391diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
68392index 6883e19..06992b1 100644
68393--- a/include/linux/kallsyms.h
68394+++ b/include/linux/kallsyms.h
68395@@ -15,7 +15,8 @@
68396
68397 struct module;
68398
68399-#ifdef CONFIG_KALLSYMS
68400+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
68401+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
68402 /* Lookup the address for a symbol. Returns 0 if not found. */
68403 unsigned long kallsyms_lookup_name(const char *name);
68404
68405@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
68406 /* Stupid that this does nothing, but I didn't create this mess. */
68407 #define __print_symbol(fmt, addr)
68408 #endif /*CONFIG_KALLSYMS*/
68409+#else /* when included by kallsyms.c, vsnprintf.c, or
68410+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
68411+extern void __print_symbol(const char *fmt, unsigned long address);
68412+extern int sprint_backtrace(char *buffer, unsigned long address);
68413+extern int sprint_symbol(char *buffer, unsigned long address);
68414+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
68415+const char *kallsyms_lookup(unsigned long addr,
68416+ unsigned long *symbolsize,
68417+ unsigned long *offset,
68418+ char **modname, char *namebuf);
68419+#endif
68420
68421 /* This macro allows us to keep printk typechecking */
68422 static __printf(1, 2)
68423diff --git a/include/linux/key-type.h b/include/linux/key-type.h
68424index 518a53a..5e28358 100644
68425--- a/include/linux/key-type.h
68426+++ b/include/linux/key-type.h
68427@@ -125,7 +125,7 @@ struct key_type {
68428 /* internal fields */
68429 struct list_head link; /* link in types list */
68430 struct lock_class_key lock_class; /* key->sem lock class */
68431-};
68432+} __do_const;
68433
68434 extern struct key_type key_type_keyring;
68435
68436diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
68437index 4dff0c6..1ca9b72 100644
68438--- a/include/linux/kgdb.h
68439+++ b/include/linux/kgdb.h
68440@@ -53,7 +53,7 @@ extern int kgdb_connected;
68441 extern int kgdb_io_module_registered;
68442
68443 extern atomic_t kgdb_setting_breakpoint;
68444-extern atomic_t kgdb_cpu_doing_single_step;
68445+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
68446
68447 extern struct task_struct *kgdb_usethread;
68448 extern struct task_struct *kgdb_contthread;
68449@@ -255,7 +255,7 @@ struct kgdb_arch {
68450 void (*correct_hw_break)(void);
68451
68452 void (*enable_nmi)(bool on);
68453-};
68454+} __do_const;
68455
68456 /**
68457 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
68458@@ -280,7 +280,7 @@ struct kgdb_io {
68459 void (*pre_exception) (void);
68460 void (*post_exception) (void);
68461 int is_console;
68462-};
68463+} __do_const;
68464
68465 extern struct kgdb_arch arch_kgdb_ops;
68466
68467diff --git a/include/linux/kmod.h b/include/linux/kmod.h
68468index 5398d58..5883a34 100644
68469--- a/include/linux/kmod.h
68470+++ b/include/linux/kmod.h
68471@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
68472 * usually useless though. */
68473 extern __printf(2, 3)
68474 int __request_module(bool wait, const char *name, ...);
68475+extern __printf(3, 4)
68476+int ___request_module(bool wait, char *param_name, const char *name, ...);
68477 #define request_module(mod...) __request_module(true, mod)
68478 #define request_module_nowait(mod...) __request_module(false, mod)
68479 #define try_then_request_module(x, mod...) \
68480diff --git a/include/linux/kobject.h b/include/linux/kobject.h
68481index 939b112..ed6ed51 100644
68482--- a/include/linux/kobject.h
68483+++ b/include/linux/kobject.h
68484@@ -111,7 +111,7 @@ struct kobj_type {
68485 struct attribute **default_attrs;
68486 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
68487 const void *(*namespace)(struct kobject *kobj);
68488-};
68489+} __do_const;
68490
68491 struct kobj_uevent_env {
68492 char *envp[UEVENT_NUM_ENVP];
68493@@ -134,6 +134,7 @@ struct kobj_attribute {
68494 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
68495 const char *buf, size_t count);
68496 };
68497+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
68498
68499 extern const struct sysfs_ops kobj_sysfs_ops;
68500
68501diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
68502index f66b065..c2c29b4 100644
68503--- a/include/linux/kobject_ns.h
68504+++ b/include/linux/kobject_ns.h
68505@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
68506 const void *(*netlink_ns)(struct sock *sk);
68507 const void *(*initial_ns)(void);
68508 void (*drop_ns)(void *);
68509-};
68510+} __do_const;
68511
68512 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
68513 int kobj_ns_type_registered(enum kobj_ns_type type);
68514diff --git a/include/linux/kref.h b/include/linux/kref.h
68515index 4972e6e..de4d19b 100644
68516--- a/include/linux/kref.h
68517+++ b/include/linux/kref.h
68518@@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
68519 static inline int kref_sub(struct kref *kref, unsigned int count,
68520 void (*release)(struct kref *kref))
68521 {
68522- WARN_ON(release == NULL);
68523+ BUG_ON(release == NULL);
68524
68525 if (atomic_sub_and_test((int) count, &kref->refcount)) {
68526 release(kref);
68527diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
68528index 2c497ab..afe32f5 100644
68529--- a/include/linux/kvm_host.h
68530+++ b/include/linux/kvm_host.h
68531@@ -418,7 +418,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
68532 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
68533 void vcpu_put(struct kvm_vcpu *vcpu);
68534
68535-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
68536+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
68537 struct module *module);
68538 void kvm_exit(void);
68539
68540@@ -574,7 +574,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
68541 struct kvm_guest_debug *dbg);
68542 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
68543
68544-int kvm_arch_init(void *opaque);
68545+int kvm_arch_init(const void *opaque);
68546 void kvm_arch_exit(void);
68547
68548 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
68549diff --git a/include/linux/libata.h b/include/linux/libata.h
68550index 649e5f8..ead5194 100644
68551--- a/include/linux/libata.h
68552+++ b/include/linux/libata.h
68553@@ -915,7 +915,7 @@ struct ata_port_operations {
68554 * fields must be pointers.
68555 */
68556 const struct ata_port_operations *inherits;
68557-};
68558+} __do_const;
68559
68560 struct ata_port_info {
68561 unsigned long flags;
68562diff --git a/include/linux/list.h b/include/linux/list.h
68563index cc6d2aa..c10ee83 100644
68564--- a/include/linux/list.h
68565+++ b/include/linux/list.h
68566@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
68567 extern void list_del(struct list_head *entry);
68568 #endif
68569
68570+extern void __pax_list_add(struct list_head *new,
68571+ struct list_head *prev,
68572+ struct list_head *next);
68573+static inline void pax_list_add(struct list_head *new, struct list_head *head)
68574+{
68575+ __pax_list_add(new, head, head->next);
68576+}
68577+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
68578+{
68579+ __pax_list_add(new, head->prev, head);
68580+}
68581+extern void pax_list_del(struct list_head *entry);
68582+
68583 /**
68584 * list_replace - replace old entry by new one
68585 * @old : the element to be replaced
68586@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
68587 INIT_LIST_HEAD(entry);
68588 }
68589
68590+extern void pax_list_del_init(struct list_head *entry);
68591+
68592 /**
68593 * list_move - delete from one list and add as another's head
68594 * @list: the entry to move
68595diff --git a/include/linux/mm.h b/include/linux/mm.h
68596index 66e2f7c..ea88001 100644
68597--- a/include/linux/mm.h
68598+++ b/include/linux/mm.h
68599@@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
68600 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
68601 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
68602 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
68603+
68604+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68605+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
68606+#endif
68607+
68608 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
68609
68610 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
68611@@ -231,6 +236,7 @@ struct vm_operations_struct {
68612 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
68613 unsigned long size, pgoff_t pgoff);
68614 };
68615+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
68616
68617 struct mmu_gather;
68618 struct inode;
68619@@ -1068,34 +1074,6 @@ int set_page_dirty(struct page *page);
68620 int set_page_dirty_lock(struct page *page);
68621 int clear_page_dirty_for_io(struct page *page);
68622
68623-/* Is the vma a continuation of the stack vma above it? */
68624-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
68625-{
68626- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
68627-}
68628-
68629-static inline int stack_guard_page_start(struct vm_area_struct *vma,
68630- unsigned long addr)
68631-{
68632- return (vma->vm_flags & VM_GROWSDOWN) &&
68633- (vma->vm_start == addr) &&
68634- !vma_growsdown(vma->vm_prev, addr);
68635-}
68636-
68637-/* Is the vma a continuation of the stack vma below it? */
68638-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
68639-{
68640- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
68641-}
68642-
68643-static inline int stack_guard_page_end(struct vm_area_struct *vma,
68644- unsigned long addr)
68645-{
68646- return (vma->vm_flags & VM_GROWSUP) &&
68647- (vma->vm_end == addr) &&
68648- !vma_growsup(vma->vm_next, addr);
68649-}
68650-
68651 extern pid_t
68652 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
68653
68654@@ -1198,6 +1176,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
68655 }
68656 #endif
68657
68658+#ifdef CONFIG_MMU
68659+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
68660+#else
68661+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68662+{
68663+ return __pgprot(0);
68664+}
68665+#endif
68666+
68667 int vma_wants_writenotify(struct vm_area_struct *vma);
68668
68669 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
68670@@ -1216,8 +1203,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
68671 {
68672 return 0;
68673 }
68674+
68675+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
68676+ unsigned long address)
68677+{
68678+ return 0;
68679+}
68680 #else
68681 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
68682+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
68683 #endif
68684
68685 #ifdef __PAGETABLE_PMD_FOLDED
68686@@ -1226,8 +1220,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
68687 {
68688 return 0;
68689 }
68690+
68691+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
68692+ unsigned long address)
68693+{
68694+ return 0;
68695+}
68696 #else
68697 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
68698+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
68699 #endif
68700
68701 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
68702@@ -1245,11 +1246,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
68703 NULL: pud_offset(pgd, address);
68704 }
68705
68706+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
68707+{
68708+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
68709+ NULL: pud_offset(pgd, address);
68710+}
68711+
68712 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
68713 {
68714 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
68715 NULL: pmd_offset(pud, address);
68716 }
68717+
68718+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
68719+{
68720+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
68721+ NULL: pmd_offset(pud, address);
68722+}
68723 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
68724
68725 #if USE_SPLIT_PTLOCKS
68726@@ -1479,6 +1492,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
68727 unsigned long, unsigned long,
68728 unsigned long, unsigned long);
68729 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
68730+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
68731
68732 /* These take the mm semaphore themselves */
68733 extern unsigned long vm_brk(unsigned long, unsigned long);
68734@@ -1573,6 +1587,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
68735 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
68736 struct vm_area_struct **pprev);
68737
68738+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
68739+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
68740+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
68741+
68742 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
68743 NULL if none. Assume start_addr < end_addr. */
68744 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
68745@@ -1601,15 +1619,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
68746 return vma;
68747 }
68748
68749-#ifdef CONFIG_MMU
68750-pgprot_t vm_get_page_prot(unsigned long vm_flags);
68751-#else
68752-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
68753-{
68754- return __pgprot(0);
68755-}
68756-#endif
68757-
68758 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
68759 unsigned long change_prot_numa(struct vm_area_struct *vma,
68760 unsigned long start, unsigned long end);
68761@@ -1721,7 +1730,7 @@ extern int unpoison_memory(unsigned long pfn);
68762 extern int sysctl_memory_failure_early_kill;
68763 extern int sysctl_memory_failure_recovery;
68764 extern void shake_page(struct page *p, int access);
68765-extern atomic_long_t mce_bad_pages;
68766+extern atomic_long_unchecked_t mce_bad_pages;
68767 extern int soft_offline_page(struct page *page, int flags);
68768
68769 extern void dump_page(struct page *page);
68770@@ -1752,5 +1761,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
68771 static inline bool page_is_guard(struct page *page) { return false; }
68772 #endif /* CONFIG_DEBUG_PAGEALLOC */
68773
68774+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68775+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
68776+#else
68777+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
68778+#endif
68779+
68780 #endif /* __KERNEL__ */
68781 #endif /* _LINUX_MM_H */
68782diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
68783index f8f5162..6276a36 100644
68784--- a/include/linux/mm_types.h
68785+++ b/include/linux/mm_types.h
68786@@ -288,6 +288,8 @@ struct vm_area_struct {
68787 #ifdef CONFIG_NUMA
68788 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
68789 #endif
68790+
68791+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
68792 };
68793
68794 struct core_thread {
68795@@ -362,7 +364,7 @@ struct mm_struct {
68796 unsigned long def_flags;
68797 unsigned long nr_ptes; /* Page table pages */
68798 unsigned long start_code, end_code, start_data, end_data;
68799- unsigned long start_brk, brk, start_stack;
68800+ unsigned long brk_gap, start_brk, brk, start_stack;
68801 unsigned long arg_start, arg_end, env_start, env_end;
68802
68803 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
68804@@ -436,6 +438,24 @@ struct mm_struct {
68805 int first_nid;
68806 #endif
68807 struct uprobes_state uprobes_state;
68808+
68809+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68810+ unsigned long pax_flags;
68811+#endif
68812+
68813+#ifdef CONFIG_PAX_DLRESOLVE
68814+ unsigned long call_dl_resolve;
68815+#endif
68816+
68817+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
68818+ unsigned long call_syscall;
68819+#endif
68820+
68821+#ifdef CONFIG_PAX_ASLR
68822+ unsigned long delta_mmap; /* randomized offset */
68823+ unsigned long delta_stack; /* randomized offset */
68824+#endif
68825+
68826 };
68827
68828 /* first nid will either be a valid NID or one of these values */
68829diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
68830index c5d5278..f0b68c8 100644
68831--- a/include/linux/mmiotrace.h
68832+++ b/include/linux/mmiotrace.h
68833@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
68834 /* Called from ioremap.c */
68835 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
68836 void __iomem *addr);
68837-extern void mmiotrace_iounmap(volatile void __iomem *addr);
68838+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
68839
68840 /* For anyone to insert markers. Remember trailing newline. */
68841 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
68842@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
68843 {
68844 }
68845
68846-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
68847+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
68848 {
68849 }
68850
68851diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
68852index 73b64a3..6562925 100644
68853--- a/include/linux/mmzone.h
68854+++ b/include/linux/mmzone.h
68855@@ -412,7 +412,7 @@ struct zone {
68856 unsigned long flags; /* zone flags, see below */
68857
68858 /* Zone statistics */
68859- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68860+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68861
68862 /*
68863 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
68864diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
68865index fed3def..c933f99 100644
68866--- a/include/linux/mod_devicetable.h
68867+++ b/include/linux/mod_devicetable.h
68868@@ -12,7 +12,7 @@
68869 typedef unsigned long kernel_ulong_t;
68870 #endif
68871
68872-#define PCI_ANY_ID (~0)
68873+#define PCI_ANY_ID ((__u16)~0)
68874
68875 struct pci_device_id {
68876 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
68877@@ -139,7 +139,7 @@ struct usb_device_id {
68878 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
68879 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
68880
68881-#define HID_ANY_ID (~0)
68882+#define HID_ANY_ID (~0U)
68883 #define HID_BUS_ANY 0xffff
68884 #define HID_GROUP_ANY 0x0000
68885
68886@@ -498,7 +498,7 @@ struct dmi_system_id {
68887 const char *ident;
68888 struct dmi_strmatch matches[4];
68889 void *driver_data;
68890-};
68891+} __do_const;
68892 /*
68893 * struct dmi_device_id appears during expansion of
68894 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
68895diff --git a/include/linux/module.h b/include/linux/module.h
68896index 1375ee3..ced8177 100644
68897--- a/include/linux/module.h
68898+++ b/include/linux/module.h
68899@@ -17,9 +17,11 @@
68900 #include <linux/moduleparam.h>
68901 #include <linux/tracepoint.h>
68902 #include <linux/export.h>
68903+#include <linux/fs.h>
68904
68905 #include <linux/percpu.h>
68906 #include <asm/module.h>
68907+#include <asm/pgtable.h>
68908
68909 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
68910 #define MODULE_SIG_STRING "~Module signature appended~\n"
68911@@ -54,12 +56,13 @@ struct module_attribute {
68912 int (*test)(struct module *);
68913 void (*free)(struct module *);
68914 };
68915+typedef struct module_attribute __no_const module_attribute_no_const;
68916
68917 struct module_version_attribute {
68918 struct module_attribute mattr;
68919 const char *module_name;
68920 const char *version;
68921-} __attribute__ ((__aligned__(sizeof(void *))));
68922+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
68923
68924 extern ssize_t __modver_version_show(struct module_attribute *,
68925 struct module_kobject *, char *);
68926@@ -232,7 +235,7 @@ struct module
68927
68928 /* Sysfs stuff. */
68929 struct module_kobject mkobj;
68930- struct module_attribute *modinfo_attrs;
68931+ module_attribute_no_const *modinfo_attrs;
68932 const char *version;
68933 const char *srcversion;
68934 struct kobject *holders_dir;
68935@@ -281,19 +284,16 @@ struct module
68936 int (*init)(void);
68937
68938 /* If this is non-NULL, vfree after init() returns */
68939- void *module_init;
68940+ void *module_init_rx, *module_init_rw;
68941
68942 /* Here is the actual code + data, vfree'd on unload. */
68943- void *module_core;
68944+ void *module_core_rx, *module_core_rw;
68945
68946 /* Here are the sizes of the init and core sections */
68947- unsigned int init_size, core_size;
68948+ unsigned int init_size_rw, core_size_rw;
68949
68950 /* The size of the executable code in each section. */
68951- unsigned int init_text_size, core_text_size;
68952-
68953- /* Size of RO sections of the module (text+rodata) */
68954- unsigned int init_ro_size, core_ro_size;
68955+ unsigned int init_size_rx, core_size_rx;
68956
68957 /* Arch-specific module values */
68958 struct mod_arch_specific arch;
68959@@ -349,6 +349,10 @@ struct module
68960 #ifdef CONFIG_EVENT_TRACING
68961 struct ftrace_event_call **trace_events;
68962 unsigned int num_trace_events;
68963+ struct file_operations trace_id;
68964+ struct file_operations trace_enable;
68965+ struct file_operations trace_format;
68966+ struct file_operations trace_filter;
68967 #endif
68968 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68969 unsigned int num_ftrace_callsites;
68970@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
68971 bool is_module_percpu_address(unsigned long addr);
68972 bool is_module_text_address(unsigned long addr);
68973
68974+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68975+{
68976+
68977+#ifdef CONFIG_PAX_KERNEXEC
68978+ if (ktla_ktva(addr) >= (unsigned long)start &&
68979+ ktla_ktva(addr) < (unsigned long)start + size)
68980+ return 1;
68981+#endif
68982+
68983+ return ((void *)addr >= start && (void *)addr < start + size);
68984+}
68985+
68986+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
68987+{
68988+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
68989+}
68990+
68991+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
68992+{
68993+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
68994+}
68995+
68996+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
68997+{
68998+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
68999+}
69000+
69001+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
69002+{
69003+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
69004+}
69005+
69006 static inline int within_module_core(unsigned long addr, struct module *mod)
69007 {
69008- return (unsigned long)mod->module_core <= addr &&
69009- addr < (unsigned long)mod->module_core + mod->core_size;
69010+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
69011 }
69012
69013 static inline int within_module_init(unsigned long addr, struct module *mod)
69014 {
69015- return (unsigned long)mod->module_init <= addr &&
69016- addr < (unsigned long)mod->module_init + mod->init_size;
69017+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
69018 }
69019
69020 /* Search for module by name: must hold module_mutex. */
69021diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
69022index 560ca53..5ee8d73 100644
69023--- a/include/linux/moduleloader.h
69024+++ b/include/linux/moduleloader.h
69025@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
69026
69027 /* Allocator used for allocating struct module, core sections and init
69028 sections. Returns NULL on failure. */
69029-void *module_alloc(unsigned long size);
69030+void *module_alloc(unsigned long size) __size_overflow(1);
69031+
69032+#ifdef CONFIG_PAX_KERNEXEC
69033+void *module_alloc_exec(unsigned long size) __size_overflow(1);
69034+#else
69035+#define module_alloc_exec(x) module_alloc(x)
69036+#endif
69037
69038 /* Free memory returned from module_alloc. */
69039 void module_free(struct module *mod, void *module_region);
69040
69041+#ifdef CONFIG_PAX_KERNEXEC
69042+void module_free_exec(struct module *mod, void *module_region);
69043+#else
69044+#define module_free_exec(x, y) module_free((x), (y))
69045+#endif
69046+
69047 /*
69048 * Apply the given relocation to the (simplified) ELF. Return -error
69049 * or 0.
69050@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
69051 unsigned int relsec,
69052 struct module *me)
69053 {
69054+#ifdef CONFIG_MODULES
69055 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
69056+#endif
69057 return -ENOEXEC;
69058 }
69059 #endif
69060@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
69061 unsigned int relsec,
69062 struct module *me)
69063 {
69064+#ifdef CONFIG_MODULES
69065 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
69066+#endif
69067 return -ENOEXEC;
69068 }
69069 #endif
69070diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
69071index 137b419..fe663ec 100644
69072--- a/include/linux/moduleparam.h
69073+++ b/include/linux/moduleparam.h
69074@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
69075 * @len is usually just sizeof(string).
69076 */
69077 #define module_param_string(name, string, len, perm) \
69078- static const struct kparam_string __param_string_##name \
69079+ static const struct kparam_string __param_string_##name __used \
69080 = { len, string }; \
69081 __module_param_call(MODULE_PARAM_PREFIX, name, \
69082 &param_ops_string, \
69083@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
69084 */
69085 #define module_param_array_named(name, array, type, nump, perm) \
69086 param_check_##type(name, &(array)[0]); \
69087- static const struct kparam_array __param_arr_##name \
69088+ static const struct kparam_array __param_arr_##name __used \
69089 = { .max = ARRAY_SIZE(array), .num = nump, \
69090 .ops = &param_ops_##type, \
69091 .elemsize = sizeof(array[0]), .elem = array }; \
69092diff --git a/include/linux/namei.h b/include/linux/namei.h
69093index 5a5ff57..5ae5070 100644
69094--- a/include/linux/namei.h
69095+++ b/include/linux/namei.h
69096@@ -19,7 +19,7 @@ struct nameidata {
69097 unsigned seq;
69098 int last_type;
69099 unsigned depth;
69100- char *saved_names[MAX_NESTED_LINKS + 1];
69101+ const char *saved_names[MAX_NESTED_LINKS + 1];
69102 };
69103
69104 /*
69105@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
69106
69107 extern void nd_jump_link(struct nameidata *nd, struct path *path);
69108
69109-static inline void nd_set_link(struct nameidata *nd, char *path)
69110+static inline void nd_set_link(struct nameidata *nd, const char *path)
69111 {
69112 nd->saved_names[nd->depth] = path;
69113 }
69114
69115-static inline char *nd_get_link(struct nameidata *nd)
69116+static inline const char *nd_get_link(const struct nameidata *nd)
69117 {
69118 return nd->saved_names[nd->depth];
69119 }
69120diff --git a/include/linux/net.h b/include/linux/net.h
69121index aa16731..514b875 100644
69122--- a/include/linux/net.h
69123+++ b/include/linux/net.h
69124@@ -183,7 +183,7 @@ struct net_proto_family {
69125 int (*create)(struct net *net, struct socket *sock,
69126 int protocol, int kern);
69127 struct module *owner;
69128-};
69129+} __do_const;
69130
69131 struct iovec;
69132 struct kvec;
69133diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
69134index 9ef07d0..130a5d9 100644
69135--- a/include/linux/netdevice.h
69136+++ b/include/linux/netdevice.h
69137@@ -1012,6 +1012,7 @@ struct net_device_ops {
69138 u32 pid, u32 seq,
69139 struct net_device *dev);
69140 };
69141+typedef struct net_device_ops __no_const net_device_ops_no_const;
69142
69143 /*
69144 * The DEVICE structure.
69145@@ -1078,7 +1079,7 @@ struct net_device {
69146 int iflink;
69147
69148 struct net_device_stats stats;
69149- atomic_long_t rx_dropped; /* dropped packets by core network
69150+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
69151 * Do not use this in drivers.
69152 */
69153
69154diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
69155index ee14284..bc65d63 100644
69156--- a/include/linux/netfilter.h
69157+++ b/include/linux/netfilter.h
69158@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
69159 #endif
69160 /* Use the module struct to lock set/get code in place */
69161 struct module *owner;
69162-};
69163+} __do_const;
69164
69165 /* Function to register/unregister hook points. */
69166 int nf_register_hook(struct nf_hook_ops *reg);
69167diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
69168index 7958e84..ed74d7a 100644
69169--- a/include/linux/netfilter/ipset/ip_set.h
69170+++ b/include/linux/netfilter/ipset/ip_set.h
69171@@ -98,7 +98,7 @@ struct ip_set_type_variant {
69172 /* Return true if "b" set is the same as "a"
69173 * according to the create set parameters */
69174 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
69175-};
69176+} __do_const;
69177
69178 /* The core set type structure */
69179 struct ip_set_type {
69180diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
69181index 4966dde..7d8ce06 100644
69182--- a/include/linux/netfilter/nfnetlink.h
69183+++ b/include/linux/netfilter/nfnetlink.h
69184@@ -16,7 +16,7 @@ struct nfnl_callback {
69185 const struct nlattr * const cda[]);
69186 const struct nla_policy *policy; /* netlink attribute policy */
69187 const u_int16_t attr_count; /* number of nlattr's */
69188-};
69189+} __do_const;
69190
69191 struct nfnetlink_subsystem {
69192 const char *name;
69193diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
69194new file mode 100644
69195index 0000000..33f4af8
69196--- /dev/null
69197+++ b/include/linux/netfilter/xt_gradm.h
69198@@ -0,0 +1,9 @@
69199+#ifndef _LINUX_NETFILTER_XT_GRADM_H
69200+#define _LINUX_NETFILTER_XT_GRADM_H 1
69201+
69202+struct xt_gradm_mtinfo {
69203+ __u16 flags;
69204+ __u16 invflags;
69205+};
69206+
69207+#endif
69208diff --git a/include/linux/nls.h b/include/linux/nls.h
69209index 5dc635f..35f5e11 100644
69210--- a/include/linux/nls.h
69211+++ b/include/linux/nls.h
69212@@ -31,7 +31,7 @@ struct nls_table {
69213 const unsigned char *charset2upper;
69214 struct module *owner;
69215 struct nls_table *next;
69216-};
69217+} __do_const;
69218
69219 /* this value hold the maximum octet of charset */
69220 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
69221diff --git a/include/linux/notifier.h b/include/linux/notifier.h
69222index d65746e..62e72c2 100644
69223--- a/include/linux/notifier.h
69224+++ b/include/linux/notifier.h
69225@@ -51,7 +51,8 @@ struct notifier_block {
69226 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
69227 struct notifier_block __rcu *next;
69228 int priority;
69229-};
69230+} __do_const;
69231+typedef struct notifier_block __no_const notifier_block_no_const;
69232
69233 struct atomic_notifier_head {
69234 spinlock_t lock;
69235diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
69236index a4c5624..79d6d88 100644
69237--- a/include/linux/oprofile.h
69238+++ b/include/linux/oprofile.h
69239@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
69240 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
69241 char const * name, ulong * val);
69242
69243-/** Create a file for read-only access to an atomic_t. */
69244+/** Create a file for read-only access to an atomic_unchecked_t. */
69245 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
69246- char const * name, atomic_t * val);
69247+ char const * name, atomic_unchecked_t * val);
69248
69249 /** create a directory */
69250 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
69251diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
69252index 45fc162..01a4068 100644
69253--- a/include/linux/pci_hotplug.h
69254+++ b/include/linux/pci_hotplug.h
69255@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
69256 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
69257 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
69258 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
69259-};
69260+} __do_const;
69261+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
69262
69263 /**
69264 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
69265diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
69266index 6bfb2faa..e5bc5e5 100644
69267--- a/include/linux/perf_event.h
69268+++ b/include/linux/perf_event.h
69269@@ -328,8 +328,8 @@ struct perf_event {
69270
69271 enum perf_event_active_state state;
69272 unsigned int attach_state;
69273- local64_t count;
69274- atomic64_t child_count;
69275+ local64_t count; /* PaX: fix it one day */
69276+ atomic64_unchecked_t child_count;
69277
69278 /*
69279 * These are the total time in nanoseconds that the event
69280@@ -380,8 +380,8 @@ struct perf_event {
69281 * These accumulate total time (in nanoseconds) that children
69282 * events have been enabled and running, respectively.
69283 */
69284- atomic64_t child_total_time_enabled;
69285- atomic64_t child_total_time_running;
69286+ atomic64_unchecked_t child_total_time_enabled;
69287+ atomic64_unchecked_t child_total_time_running;
69288
69289 /*
69290 * Protect attach/detach and child_list:
69291@@ -801,7 +801,7 @@ static inline void perf_event_task_tick(void) { }
69292 */
69293 #define perf_cpu_notifier(fn) \
69294 do { \
69295- static struct notifier_block fn##_nb __cpuinitdata = \
69296+ static struct notifier_block fn##_nb = \
69297 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
69298 unsigned long cpu = smp_processor_id(); \
69299 unsigned long flags; \
69300diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
69301index ad1a427..6419649 100644
69302--- a/include/linux/pipe_fs_i.h
69303+++ b/include/linux/pipe_fs_i.h
69304@@ -45,9 +45,9 @@ struct pipe_buffer {
69305 struct pipe_inode_info {
69306 wait_queue_head_t wait;
69307 unsigned int nrbufs, curbuf, buffers;
69308- unsigned int readers;
69309- unsigned int writers;
69310- unsigned int waiting_writers;
69311+ atomic_t readers;
69312+ atomic_t writers;
69313+ atomic_t waiting_writers;
69314 unsigned int r_counter;
69315 unsigned int w_counter;
69316 struct page *tmp_page;
69317diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
69318index 5f28cae..3d23723 100644
69319--- a/include/linux/platform_data/usb-ehci-s5p.h
69320+++ b/include/linux/platform_data/usb-ehci-s5p.h
69321@@ -14,7 +14,7 @@
69322 struct s5p_ehci_platdata {
69323 int (*phy_init)(struct platform_device *pdev, int type);
69324 int (*phy_exit)(struct platform_device *pdev, int type);
69325-};
69326+} __no_const;
69327
69328 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
69329
69330diff --git a/include/linux/platform_data/usb-exynos.h b/include/linux/platform_data/usb-exynos.h
69331index c256c59..8ea94c7 100644
69332--- a/include/linux/platform_data/usb-exynos.h
69333+++ b/include/linux/platform_data/usb-exynos.h
69334@@ -14,7 +14,7 @@
69335 struct exynos4_ohci_platdata {
69336 int (*phy_init)(struct platform_device *pdev, int type);
69337 int (*phy_exit)(struct platform_device *pdev, int type);
69338-};
69339+} __no_const;
69340
69341 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
69342
69343diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
69344index 7c1d252..c5c773e 100644
69345--- a/include/linux/pm_domain.h
69346+++ b/include/linux/pm_domain.h
69347@@ -48,7 +48,7 @@ struct gpd_dev_ops {
69348
69349 struct gpd_cpu_data {
69350 unsigned int saved_exit_latency;
69351- struct cpuidle_state *idle_state;
69352+ cpuidle_state_no_const *idle_state;
69353 };
69354
69355 struct generic_pm_domain {
69356diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
69357index f271860..6b3bec5 100644
69358--- a/include/linux/pm_runtime.h
69359+++ b/include/linux/pm_runtime.h
69360@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
69361
69362 static inline void pm_runtime_mark_last_busy(struct device *dev)
69363 {
69364- ACCESS_ONCE(dev->power.last_busy) = jiffies;
69365+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
69366 }
69367
69368 #else /* !CONFIG_PM_RUNTIME */
69369diff --git a/include/linux/pnp.h b/include/linux/pnp.h
69370index 195aafc..49a7bc2 100644
69371--- a/include/linux/pnp.h
69372+++ b/include/linux/pnp.h
69373@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
69374 struct pnp_fixup {
69375 char id[7];
69376 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
69377-};
69378+} __do_const;
69379
69380 /* config parameters */
69381 #define PNP_CONFIG_NORMAL 0x0001
69382diff --git a/include/linux/poison.h b/include/linux/poison.h
69383index 2110a81..13a11bb 100644
69384--- a/include/linux/poison.h
69385+++ b/include/linux/poison.h
69386@@ -19,8 +19,8 @@
69387 * under normal circumstances, used to verify that nobody uses
69388 * non-initialized list entries.
69389 */
69390-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
69391-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
69392+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
69393+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
69394
69395 /********** include/linux/timer.h **********/
69396 /*
69397diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
69398index c0f44c2..1572583 100644
69399--- a/include/linux/power/smartreflex.h
69400+++ b/include/linux/power/smartreflex.h
69401@@ -238,7 +238,7 @@ struct omap_sr_class_data {
69402 int (*notify)(struct omap_sr *sr, u32 status);
69403 u8 notify_flags;
69404 u8 class_type;
69405-};
69406+} __do_const;
69407
69408 /**
69409 * struct omap_sr_nvalue_table - Smartreflex n-target value info
69410diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
69411index 4ea1d37..80f4b33 100644
69412--- a/include/linux/ppp-comp.h
69413+++ b/include/linux/ppp-comp.h
69414@@ -84,7 +84,7 @@ struct compressor {
69415 struct module *owner;
69416 /* Extra skb space needed by the compressor algorithm */
69417 unsigned int comp_extra;
69418-};
69419+} __do_const;
69420
69421 /*
69422 * The return value from decompress routine is the length of the
69423diff --git a/include/linux/printk.h b/include/linux/printk.h
69424index 9afc01e..92c32e8 100644
69425--- a/include/linux/printk.h
69426+++ b/include/linux/printk.h
69427@@ -101,6 +101,8 @@ void early_printk(const char *fmt, ...);
69428 extern int printk_needs_cpu(int cpu);
69429 extern void printk_tick(void);
69430
69431+extern int kptr_restrict;
69432+
69433 #ifdef CONFIG_PRINTK
69434 asmlinkage __printf(5, 0)
69435 int vprintk_emit(int facility, int level,
69436@@ -135,7 +137,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
69437
69438 extern int printk_delay_msec;
69439 extern int dmesg_restrict;
69440-extern int kptr_restrict;
69441
69442 void log_buf_kexec_setup(void);
69443 void __init setup_log_buf(int early);
69444diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
69445index 32676b3..8f7a182 100644
69446--- a/include/linux/proc_fs.h
69447+++ b/include/linux/proc_fs.h
69448@@ -159,6 +159,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
69449 return proc_create_data(name, mode, parent, proc_fops, NULL);
69450 }
69451
69452+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
69453+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
69454+{
69455+#ifdef CONFIG_GRKERNSEC_PROC_USER
69456+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
69457+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69458+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
69459+#else
69460+ return proc_create_data(name, mode, parent, proc_fops, NULL);
69461+#endif
69462+}
69463+
69464 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
69465 umode_t mode, struct proc_dir_entry *base,
69466 read_proc_t *read_proc, void * data)
69467diff --git a/include/linux/random.h b/include/linux/random.h
69468index d984608..d6f0042 100644
69469--- a/include/linux/random.h
69470+++ b/include/linux/random.h
69471@@ -39,6 +39,11 @@ void prandom_seed(u32 seed);
69472 u32 prandom_u32_state(struct rnd_state *);
69473 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
69474
69475+static inline unsigned long pax_get_random_long(void)
69476+{
69477+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
69478+}
69479+
69480 /*
69481 * Handle minimum values for seeds
69482 */
69483diff --git a/include/linux/rculist.h b/include/linux/rculist.h
69484index c92dd28..08f4eab 100644
69485--- a/include/linux/rculist.h
69486+++ b/include/linux/rculist.h
69487@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
69488 struct list_head *prev, struct list_head *next);
69489 #endif
69490
69491+extern void __pax_list_add_rcu(struct list_head *new,
69492+ struct list_head *prev, struct list_head *next);
69493+
69494 /**
69495 * list_add_rcu - add a new entry to rcu-protected list
69496 * @new: new entry to be added
69497@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
69498 __list_add_rcu(new, head, head->next);
69499 }
69500
69501+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
69502+{
69503+ __pax_list_add_rcu(new, head, head->next);
69504+}
69505+
69506 /**
69507 * list_add_tail_rcu - add a new entry to rcu-protected list
69508 * @new: new entry to be added
69509@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
69510 __list_add_rcu(new, head->prev, head);
69511 }
69512
69513+static inline void pax_list_add_tail_rcu(struct list_head *new,
69514+ struct list_head *head)
69515+{
69516+ __pax_list_add_rcu(new, head->prev, head);
69517+}
69518+
69519 /**
69520 * list_del_rcu - deletes entry from list without re-initialization
69521 * @entry: the element to delete from the list.
69522@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
69523 entry->prev = LIST_POISON2;
69524 }
69525
69526+extern void pax_list_del_rcu(struct list_head *entry);
69527+
69528 /**
69529 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
69530 * @n: the element to delete from the hash list.
69531diff --git a/include/linux/reboot.h b/include/linux/reboot.h
69532index 23b3630..e1bc12b 100644
69533--- a/include/linux/reboot.h
69534+++ b/include/linux/reboot.h
69535@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
69536 * Architecture-specific implementations of sys_reboot commands.
69537 */
69538
69539-extern void machine_restart(char *cmd);
69540-extern void machine_halt(void);
69541-extern void machine_power_off(void);
69542+extern void machine_restart(char *cmd) __noreturn;
69543+extern void machine_halt(void) __noreturn;
69544+extern void machine_power_off(void) __noreturn;
69545
69546 extern void machine_shutdown(void);
69547 struct pt_regs;
69548@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
69549 */
69550
69551 extern void kernel_restart_prepare(char *cmd);
69552-extern void kernel_restart(char *cmd);
69553-extern void kernel_halt(void);
69554-extern void kernel_power_off(void);
69555+extern void kernel_restart(char *cmd) __noreturn;
69556+extern void kernel_halt(void) __noreturn;
69557+extern void kernel_power_off(void) __noreturn;
69558
69559 extern int C_A_D; /* for sysctl */
69560 void ctrl_alt_del(void);
69561@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
69562 * Emergency restart, callable from an interrupt handler.
69563 */
69564
69565-extern void emergency_restart(void);
69566+extern void emergency_restart(void) __noreturn;
69567 #include <asm/emergency-restart.h>
69568
69569 #endif /* _LINUX_REBOOT_H */
69570diff --git a/include/linux/regset.h b/include/linux/regset.h
69571index 8e0c9fe..ac4d221 100644
69572--- a/include/linux/regset.h
69573+++ b/include/linux/regset.h
69574@@ -161,7 +161,8 @@ struct user_regset {
69575 unsigned int align;
69576 unsigned int bias;
69577 unsigned int core_note_type;
69578-};
69579+} __do_const;
69580+typedef struct user_regset __no_const user_regset_no_const;
69581
69582 /**
69583 * struct user_regset_view - available regsets
69584diff --git a/include/linux/relay.h b/include/linux/relay.h
69585index 91cacc3..b55ff74 100644
69586--- a/include/linux/relay.h
69587+++ b/include/linux/relay.h
69588@@ -160,7 +160,7 @@ struct rchan_callbacks
69589 * The callback should return 0 if successful, negative if not.
69590 */
69591 int (*remove_buf_file)(struct dentry *dentry);
69592-};
69593+} __no_const;
69594
69595 /*
69596 * CONFIG_RELAY kernel API, kernel/relay.c
69597diff --git a/include/linux/rio.h b/include/linux/rio.h
69598index a3e7842..d973ca6 100644
69599--- a/include/linux/rio.h
69600+++ b/include/linux/rio.h
69601@@ -339,7 +339,7 @@ struct rio_ops {
69602 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
69603 u64 rstart, u32 size, u32 flags);
69604 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
69605-};
69606+} __no_const;
69607
69608 #define RIO_RESOURCE_MEM 0x00000100
69609 #define RIO_RESOURCE_DOORBELL 0x00000200
69610diff --git a/include/linux/rmap.h b/include/linux/rmap.h
69611index c20635c..2f5def4 100644
69612--- a/include/linux/rmap.h
69613+++ b/include/linux/rmap.h
69614@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
69615 void anon_vma_init(void); /* create anon_vma_cachep */
69616 int anon_vma_prepare(struct vm_area_struct *);
69617 void unlink_anon_vmas(struct vm_area_struct *);
69618-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
69619-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
69620+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
69621+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
69622
69623 static inline void anon_vma_merge(struct vm_area_struct *vma,
69624 struct vm_area_struct *next)
69625diff --git a/include/linux/sched.h b/include/linux/sched.h
69626index d211247..a5cbf38b 100644
69627--- a/include/linux/sched.h
69628+++ b/include/linux/sched.h
69629@@ -61,6 +61,7 @@ struct bio_list;
69630 struct fs_struct;
69631 struct perf_event_context;
69632 struct blk_plug;
69633+struct linux_binprm;
69634
69635 /*
69636 * List of flags we want to share for kernel threads,
69637@@ -354,10 +355,23 @@ struct user_namespace;
69638 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
69639
69640 extern int sysctl_max_map_count;
69641+extern unsigned long sysctl_heap_stack_gap;
69642
69643 #include <linux/aio.h>
69644
69645 #ifdef CONFIG_MMU
69646+
69647+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
69648+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
69649+#else
69650+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
69651+{
69652+ return 0;
69653+}
69654+#endif
69655+
69656+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
69657+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
69658 extern void arch_pick_mmap_layout(struct mm_struct *mm);
69659 extern unsigned long
69660 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
69661@@ -639,6 +653,17 @@ struct signal_struct {
69662 #ifdef CONFIG_TASKSTATS
69663 struct taskstats *stats;
69664 #endif
69665+
69666+#ifdef CONFIG_GRKERNSEC
69667+ u32 curr_ip;
69668+ u32 saved_ip;
69669+ u32 gr_saddr;
69670+ u32 gr_daddr;
69671+ u16 gr_sport;
69672+ u16 gr_dport;
69673+ u8 used_accept:1;
69674+#endif
69675+
69676 #ifdef CONFIG_AUDIT
69677 unsigned audit_tty;
69678 struct tty_audit_buf *tty_audit_buf;
69679@@ -717,6 +742,11 @@ struct user_struct {
69680 struct key *session_keyring; /* UID's default session keyring */
69681 #endif
69682
69683+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
69684+ unsigned int banned;
69685+ unsigned long ban_expires;
69686+#endif
69687+
69688 /* Hash table maintenance information */
69689 struct hlist_node uidhash_node;
69690 kuid_t uid;
69691@@ -1116,7 +1146,7 @@ struct sched_class {
69692 #ifdef CONFIG_FAIR_GROUP_SCHED
69693 void (*task_move_group) (struct task_struct *p, int on_rq);
69694 #endif
69695-};
69696+} __do_const;
69697
69698 struct load_weight {
69699 unsigned long weight, inv_weight;
69700@@ -1360,8 +1390,8 @@ struct task_struct {
69701 struct list_head thread_group;
69702
69703 struct completion *vfork_done; /* for vfork() */
69704- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
69705- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69706+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
69707+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69708
69709 cputime_t utime, stime, utimescaled, stimescaled;
69710 cputime_t gtime;
69711@@ -1377,11 +1407,6 @@ struct task_struct {
69712 struct task_cputime cputime_expires;
69713 struct list_head cpu_timers[3];
69714
69715-/* process credentials */
69716- const struct cred __rcu *real_cred; /* objective and real subjective task
69717- * credentials (COW) */
69718- const struct cred __rcu *cred; /* effective (overridable) subjective task
69719- * credentials (COW) */
69720 char comm[TASK_COMM_LEN]; /* executable name excluding path
69721 - access with [gs]et_task_comm (which lock
69722 it with task_lock())
69723@@ -1398,6 +1423,10 @@ struct task_struct {
69724 #endif
69725 /* CPU-specific state of this task */
69726 struct thread_struct thread;
69727+/* thread_info moved to task_struct */
69728+#ifdef CONFIG_X86
69729+ struct thread_info tinfo;
69730+#endif
69731 /* filesystem information */
69732 struct fs_struct *fs;
69733 /* open file information */
69734@@ -1471,6 +1500,10 @@ struct task_struct {
69735 gfp_t lockdep_reclaim_gfp;
69736 #endif
69737
69738+/* process credentials */
69739+ const struct cred __rcu *real_cred; /* objective and real subjective task
69740+ * credentials (COW) */
69741+
69742 /* journalling filesystem info */
69743 void *journal_info;
69744
69745@@ -1509,6 +1542,10 @@ struct task_struct {
69746 /* cg_list protected by css_set_lock and tsk->alloc_lock */
69747 struct list_head cg_list;
69748 #endif
69749+
69750+ const struct cred __rcu *cred; /* effective (overridable) subjective task
69751+ * credentials (COW) */
69752+
69753 #ifdef CONFIG_FUTEX
69754 struct robust_list_head __user *robust_list;
69755 #ifdef CONFIG_COMPAT
69756@@ -1605,8 +1642,74 @@ struct task_struct {
69757 #ifdef CONFIG_UPROBES
69758 struct uprobe_task *utask;
69759 #endif
69760+
69761+#ifdef CONFIG_GRKERNSEC
69762+ /* grsecurity */
69763+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69764+ u64 exec_id;
69765+#endif
69766+#ifdef CONFIG_GRKERNSEC_SETXID
69767+ const struct cred *delayed_cred;
69768+#endif
69769+ struct dentry *gr_chroot_dentry;
69770+ struct acl_subject_label *acl;
69771+ struct acl_role_label *role;
69772+ struct file *exec_file;
69773+ unsigned long brute_expires;
69774+ u16 acl_role_id;
69775+ /* is this the task that authenticated to the special role */
69776+ u8 acl_sp_role;
69777+ u8 is_writable;
69778+ u8 brute;
69779+ u8 gr_is_chrooted;
69780+#endif
69781+
69782 };
69783
69784+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
69785+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
69786+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
69787+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
69788+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
69789+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
69790+
69791+#ifdef CONFIG_PAX_SOFTMODE
69792+extern int pax_softmode;
69793+#endif
69794+
69795+extern int pax_check_flags(unsigned long *);
69796+
69797+/* if tsk != current then task_lock must be held on it */
69798+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
69799+static inline unsigned long pax_get_flags(struct task_struct *tsk)
69800+{
69801+ if (likely(tsk->mm))
69802+ return tsk->mm->pax_flags;
69803+ else
69804+ return 0UL;
69805+}
69806+
69807+/* if tsk != current then task_lock must be held on it */
69808+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
69809+{
69810+ if (likely(tsk->mm)) {
69811+ tsk->mm->pax_flags = flags;
69812+ return 0;
69813+ }
69814+ return -EINVAL;
69815+}
69816+#endif
69817+
69818+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
69819+extern void pax_set_initial_flags(struct linux_binprm *bprm);
69820+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
69821+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
69822+#endif
69823+
69824+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
69825+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
69826+extern void pax_report_refcount_overflow(struct pt_regs *regs);
69827+
69828 /* Future-safe accessor for struct task_struct's cpus_allowed. */
69829 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
69830
69831@@ -1696,7 +1799,7 @@ struct pid_namespace;
69832 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
69833 struct pid_namespace *ns);
69834
69835-static inline pid_t task_pid_nr(struct task_struct *tsk)
69836+static inline pid_t task_pid_nr(const struct task_struct *tsk)
69837 {
69838 return tsk->pid;
69839 }
69840@@ -2155,7 +2258,9 @@ void yield(void);
69841 extern struct exec_domain default_exec_domain;
69842
69843 union thread_union {
69844+#ifndef CONFIG_X86
69845 struct thread_info thread_info;
69846+#endif
69847 unsigned long stack[THREAD_SIZE/sizeof(long)];
69848 };
69849
69850@@ -2188,6 +2293,7 @@ extern struct pid_namespace init_pid_ns;
69851 */
69852
69853 extern struct task_struct *find_task_by_vpid(pid_t nr);
69854+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
69855 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
69856 struct pid_namespace *ns);
69857
69858@@ -2344,7 +2450,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
69859 extern void exit_itimers(struct signal_struct *);
69860 extern void flush_itimer_signals(void);
69861
69862-extern void do_group_exit(int);
69863+extern __noreturn void do_group_exit(int);
69864
69865 extern int allow_signal(int);
69866 extern int disallow_signal(int);
69867@@ -2545,9 +2651,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
69868
69869 #endif
69870
69871-static inline int object_is_on_stack(void *obj)
69872+static inline int object_starts_on_stack(void *obj)
69873 {
69874- void *stack = task_stack_page(current);
69875+ const void *stack = task_stack_page(current);
69876
69877 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
69878 }
69879diff --git a/include/linux/security.h b/include/linux/security.h
69880index eee7478..290f7ba 100644
69881--- a/include/linux/security.h
69882+++ b/include/linux/security.h
69883@@ -26,6 +26,7 @@
69884 #include <linux/capability.h>
69885 #include <linux/slab.h>
69886 #include <linux/err.h>
69887+#include <linux/grsecurity.h>
69888
69889 struct linux_binprm;
69890 struct cred;
69891diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
69892index 68a04a3..866e6a1 100644
69893--- a/include/linux/seq_file.h
69894+++ b/include/linux/seq_file.h
69895@@ -26,6 +26,9 @@ struct seq_file {
69896 struct mutex lock;
69897 const struct seq_operations *op;
69898 int poll_event;
69899+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69900+ u64 exec_id;
69901+#endif
69902 #ifdef CONFIG_USER_NS
69903 struct user_namespace *user_ns;
69904 #endif
69905@@ -38,6 +41,7 @@ struct seq_operations {
69906 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
69907 int (*show) (struct seq_file *m, void *v);
69908 };
69909+typedef struct seq_operations __no_const seq_operations_no_const;
69910
69911 #define SEQ_SKIP 1
69912
69913diff --git a/include/linux/shm.h b/include/linux/shm.h
69914index 429c199..4d42e38 100644
69915--- a/include/linux/shm.h
69916+++ b/include/linux/shm.h
69917@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
69918
69919 /* The task created the shm object. NULL if the task is dead. */
69920 struct task_struct *shm_creator;
69921+#ifdef CONFIG_GRKERNSEC
69922+ time_t shm_createtime;
69923+ pid_t shm_lapid;
69924+#endif
69925 };
69926
69927 /* shm_mode upper byte flags */
69928diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
69929index 320e976..fd52553 100644
69930--- a/include/linux/skbuff.h
69931+++ b/include/linux/skbuff.h
69932@@ -590,7 +590,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
69933 extern struct sk_buff *__alloc_skb(unsigned int size,
69934 gfp_t priority, int flags, int node);
69935 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
69936-static inline struct sk_buff *alloc_skb(unsigned int size,
69937+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
69938 gfp_t priority)
69939 {
69940 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
69941@@ -700,7 +700,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
69942 */
69943 static inline int skb_queue_empty(const struct sk_buff_head *list)
69944 {
69945- return list->next == (struct sk_buff *)list;
69946+ return list->next == (const struct sk_buff *)list;
69947 }
69948
69949 /**
69950@@ -713,7 +713,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
69951 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69952 const struct sk_buff *skb)
69953 {
69954- return skb->next == (struct sk_buff *)list;
69955+ return skb->next == (const struct sk_buff *)list;
69956 }
69957
69958 /**
69959@@ -726,7 +726,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69960 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
69961 const struct sk_buff *skb)
69962 {
69963- return skb->prev == (struct sk_buff *)list;
69964+ return skb->prev == (const struct sk_buff *)list;
69965 }
69966
69967 /**
69968@@ -1722,7 +1722,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
69969 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
69970 */
69971 #ifndef NET_SKB_PAD
69972-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
69973+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
69974 #endif
69975
69976 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
69977@@ -2300,7 +2300,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
69978 int noblock, int *err);
69979 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
69980 struct poll_table_struct *wait);
69981-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
69982+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
69983 int offset, struct iovec *to,
69984 int size);
69985 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
69986diff --git a/include/linux/slab.h b/include/linux/slab.h
69987index 5d168d7..720bff3 100644
69988--- a/include/linux/slab.h
69989+++ b/include/linux/slab.h
69990@@ -12,13 +12,20 @@
69991 #include <linux/gfp.h>
69992 #include <linux/types.h>
69993 #include <linux/workqueue.h>
69994-
69995+#include <linux/err.h>
69996
69997 /*
69998 * Flags to pass to kmem_cache_create().
69999 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
70000 */
70001 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
70002+
70003+#ifdef CONFIG_PAX_USERCOPY_SLABS
70004+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
70005+#else
70006+#define SLAB_USERCOPY 0x00000000UL
70007+#endif
70008+
70009 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
70010 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
70011 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
70012@@ -89,10 +96,13 @@
70013 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
70014 * Both make kfree a no-op.
70015 */
70016-#define ZERO_SIZE_PTR ((void *)16)
70017+#define ZERO_SIZE_PTR \
70018+({ \
70019+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
70020+ (void *)(-MAX_ERRNO-1L); \
70021+})
70022
70023-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
70024- (unsigned long)ZERO_SIZE_PTR)
70025+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
70026
70027 /*
70028 * Common fields provided in kmem_cache by all slab allocators
70029@@ -112,7 +122,7 @@ struct kmem_cache {
70030 unsigned int align; /* Alignment as calculated */
70031 unsigned long flags; /* Active flags on the slab */
70032 const char *name; /* Slab name for sysfs */
70033- int refcount; /* Use counter */
70034+ atomic_t refcount; /* Use counter */
70035 void (*ctor)(void *); /* Called on object slot creation */
70036 struct list_head list; /* List of all slab caches on the system */
70037 };
70038@@ -232,6 +242,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
70039 void kfree(const void *);
70040 void kzfree(const void *);
70041 size_t ksize(const void *);
70042+const char *check_heap_object(const void *ptr, unsigned long n);
70043+bool is_usercopy_object(const void *ptr);
70044
70045 /*
70046 * Allocator specific definitions. These are mainly used to establish optimized
70047@@ -311,6 +323,7 @@ size_t ksize(const void *);
70048 * for general use, and so are not documented here. For a full list of
70049 * potential flags, always refer to linux/gfp.h.
70050 */
70051+
70052 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
70053 {
70054 if (size != 0 && n > SIZE_MAX / size)
70055@@ -370,7 +383,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
70056 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
70057 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
70058 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
70059-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
70060+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
70061 #define kmalloc_track_caller(size, flags) \
70062 __kmalloc_track_caller(size, flags, _RET_IP_)
70063 #else
70064@@ -390,7 +403,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
70065 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
70066 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
70067 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
70068-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
70069+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
70070 #define kmalloc_node_track_caller(size, flags, node) \
70071 __kmalloc_node_track_caller(size, flags, node, \
70072 _RET_IP_)
70073diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
70074index 8bb6e0e..8eb0dbe 100644
70075--- a/include/linux/slab_def.h
70076+++ b/include/linux/slab_def.h
70077@@ -52,7 +52,7 @@ struct kmem_cache {
70078 /* 4) cache creation/removal */
70079 const char *name;
70080 struct list_head list;
70081- int refcount;
70082+ atomic_t refcount;
70083 int object_size;
70084 int align;
70085
70086@@ -68,10 +68,10 @@ struct kmem_cache {
70087 unsigned long node_allocs;
70088 unsigned long node_frees;
70089 unsigned long node_overflow;
70090- atomic_t allochit;
70091- atomic_t allocmiss;
70092- atomic_t freehit;
70093- atomic_t freemiss;
70094+ atomic_unchecked_t allochit;
70095+ atomic_unchecked_t allocmiss;
70096+ atomic_unchecked_t freehit;
70097+ atomic_unchecked_t freemiss;
70098
70099 /*
70100 * If debugging is enabled, then the allocator can add additional
70101@@ -111,11 +111,16 @@ struct cache_sizes {
70102 #ifdef CONFIG_ZONE_DMA
70103 struct kmem_cache *cs_dmacachep;
70104 #endif
70105+
70106+#ifdef CONFIG_PAX_USERCOPY_SLABS
70107+ struct kmem_cache *cs_usercopycachep;
70108+#endif
70109+
70110 };
70111 extern struct cache_sizes malloc_sizes[];
70112
70113 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
70114-void *__kmalloc(size_t size, gfp_t flags);
70115+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
70116
70117 #ifdef CONFIG_TRACING
70118 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
70119@@ -152,6 +157,13 @@ found:
70120 cachep = malloc_sizes[i].cs_dmacachep;
70121 else
70122 #endif
70123+
70124+#ifdef CONFIG_PAX_USERCOPY_SLABS
70125+ if (flags & GFP_USERCOPY)
70126+ cachep = malloc_sizes[i].cs_usercopycachep;
70127+ else
70128+#endif
70129+
70130 cachep = malloc_sizes[i].cs_cachep;
70131
70132 ret = kmem_cache_alloc_trace(cachep, flags, size);
70133@@ -162,7 +174,7 @@ found:
70134 }
70135
70136 #ifdef CONFIG_NUMA
70137-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
70138+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
70139 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
70140
70141 #ifdef CONFIG_TRACING
70142@@ -205,6 +217,13 @@ found:
70143 cachep = malloc_sizes[i].cs_dmacachep;
70144 else
70145 #endif
70146+
70147+#ifdef CONFIG_PAX_USERCOPY_SLABS
70148+ if (flags & GFP_USERCOPY)
70149+ cachep = malloc_sizes[i].cs_usercopycachep;
70150+ else
70151+#endif
70152+
70153 cachep = malloc_sizes[i].cs_cachep;
70154
70155 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
70156diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
70157index f28e14a..7831211 100644
70158--- a/include/linux/slob_def.h
70159+++ b/include/linux/slob_def.h
70160@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
70161 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
70162 }
70163
70164-void *__kmalloc_node(size_t size, gfp_t flags, int node);
70165+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
70166
70167 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
70168 {
70169@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
70170 return __kmalloc_node(size, flags, NUMA_NO_NODE);
70171 }
70172
70173-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
70174+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
70175 {
70176 return kmalloc(size, flags);
70177 }
70178diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
70179index 9db4825..ed42fb5 100644
70180--- a/include/linux/slub_def.h
70181+++ b/include/linux/slub_def.h
70182@@ -91,7 +91,7 @@ struct kmem_cache {
70183 struct kmem_cache_order_objects max;
70184 struct kmem_cache_order_objects min;
70185 gfp_t allocflags; /* gfp flags to use on each alloc */
70186- int refcount; /* Refcount for slab cache destroy */
70187+ atomic_t refcount; /* Refcount for slab cache destroy */
70188 void (*ctor)(void *);
70189 int inuse; /* Offset to metadata */
70190 int align; /* Alignment */
70191@@ -156,7 +156,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
70192 * Sorry that the following has to be that ugly but some versions of GCC
70193 * have trouble with constant propagation and loops.
70194 */
70195-static __always_inline int kmalloc_index(size_t size)
70196+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
70197 {
70198 if (!size)
70199 return 0;
70200@@ -221,7 +221,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
70201 }
70202
70203 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
70204-void *__kmalloc(size_t size, gfp_t flags);
70205+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
70206
70207 static __always_inline void *
70208 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
70209@@ -265,7 +265,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
70210 }
70211 #endif
70212
70213-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
70214+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
70215 {
70216 unsigned int order = get_order(size);
70217 return kmalloc_order_trace(size, flags, order);
70218@@ -290,7 +290,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
70219 }
70220
70221 #ifdef CONFIG_NUMA
70222-void *__kmalloc_node(size_t size, gfp_t flags, int node);
70223+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
70224 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
70225
70226 #ifdef CONFIG_TRACING
70227diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
70228index e8d702e..0a56eb4 100644
70229--- a/include/linux/sock_diag.h
70230+++ b/include/linux/sock_diag.h
70231@@ -10,7 +10,7 @@ struct sock;
70232 struct sock_diag_handler {
70233 __u8 family;
70234 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
70235-};
70236+} __do_const;
70237
70238 int sock_diag_register(const struct sock_diag_handler *h);
70239 void sock_diag_unregister(const struct sock_diag_handler *h);
70240diff --git a/include/linux/sonet.h b/include/linux/sonet.h
70241index 680f9a3..f13aeb0 100644
70242--- a/include/linux/sonet.h
70243+++ b/include/linux/sonet.h
70244@@ -7,7 +7,7 @@
70245 #include <uapi/linux/sonet.h>
70246
70247 struct k_sonet_stats {
70248-#define __HANDLE_ITEM(i) atomic_t i
70249+#define __HANDLE_ITEM(i) atomic_unchecked_t i
70250 __SONET_ITEMS
70251 #undef __HANDLE_ITEM
70252 };
70253diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
70254index 34206b8..3db7f1c 100644
70255--- a/include/linux/sunrpc/clnt.h
70256+++ b/include/linux/sunrpc/clnt.h
70257@@ -96,7 +96,7 @@ struct rpc_procinfo {
70258 unsigned int p_timer; /* Which RTT timer to use */
70259 u32 p_statidx; /* Which procedure to account */
70260 const char * p_name; /* name of procedure */
70261-};
70262+} __do_const;
70263
70264 #ifdef __KERNEL__
70265
70266@@ -176,9 +176,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
70267 {
70268 switch (sap->sa_family) {
70269 case AF_INET:
70270- return ntohs(((struct sockaddr_in *)sap)->sin_port);
70271+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
70272 case AF_INET6:
70273- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
70274+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
70275 }
70276 return 0;
70277 }
70278@@ -211,7 +211,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
70279 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
70280 const struct sockaddr *src)
70281 {
70282- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
70283+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
70284 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
70285
70286 dsin->sin_family = ssin->sin_family;
70287@@ -314,7 +314,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
70288 if (sa->sa_family != AF_INET6)
70289 return 0;
70290
70291- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
70292+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
70293 }
70294
70295 #endif /* __KERNEL__ */
70296diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
70297index 676ddf5..4c519a1 100644
70298--- a/include/linux/sunrpc/svc.h
70299+++ b/include/linux/sunrpc/svc.h
70300@@ -410,7 +410,7 @@ struct svc_procedure {
70301 unsigned int pc_count; /* call count */
70302 unsigned int pc_cachetype; /* cache info (NFS) */
70303 unsigned int pc_xdrressize; /* maximum size of XDR reply */
70304-};
70305+} __do_const;
70306
70307 /*
70308 * Function prototypes.
70309diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
70310index 0b8e3e6..33e0a01 100644
70311--- a/include/linux/sunrpc/svc_rdma.h
70312+++ b/include/linux/sunrpc/svc_rdma.h
70313@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
70314 extern unsigned int svcrdma_max_requests;
70315 extern unsigned int svcrdma_max_req_size;
70316
70317-extern atomic_t rdma_stat_recv;
70318-extern atomic_t rdma_stat_read;
70319-extern atomic_t rdma_stat_write;
70320-extern atomic_t rdma_stat_sq_starve;
70321-extern atomic_t rdma_stat_rq_starve;
70322-extern atomic_t rdma_stat_rq_poll;
70323-extern atomic_t rdma_stat_rq_prod;
70324-extern atomic_t rdma_stat_sq_poll;
70325-extern atomic_t rdma_stat_sq_prod;
70326+extern atomic_unchecked_t rdma_stat_recv;
70327+extern atomic_unchecked_t rdma_stat_read;
70328+extern atomic_unchecked_t rdma_stat_write;
70329+extern atomic_unchecked_t rdma_stat_sq_starve;
70330+extern atomic_unchecked_t rdma_stat_rq_starve;
70331+extern atomic_unchecked_t rdma_stat_rq_poll;
70332+extern atomic_unchecked_t rdma_stat_rq_prod;
70333+extern atomic_unchecked_t rdma_stat_sq_poll;
70334+extern atomic_unchecked_t rdma_stat_sq_prod;
70335
70336 #define RPCRDMA_VERSION 1
70337
70338diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
70339index dd74084a..7f509d5 100644
70340--- a/include/linux/sunrpc/svcauth.h
70341+++ b/include/linux/sunrpc/svcauth.h
70342@@ -109,7 +109,7 @@ struct auth_ops {
70343 int (*release)(struct svc_rqst *rq);
70344 void (*domain_release)(struct auth_domain *);
70345 int (*set_client)(struct svc_rqst *rq);
70346-};
70347+} __do_const;
70348
70349 #define SVC_GARBAGE 1
70350 #define SVC_SYSERR 2
70351diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
70352index 071d62c..4ccc7ac 100644
70353--- a/include/linux/swiotlb.h
70354+++ b/include/linux/swiotlb.h
70355@@ -59,7 +59,8 @@ extern void
70356
70357 extern void
70358 swiotlb_free_coherent(struct device *hwdev, size_t size,
70359- void *vaddr, dma_addr_t dma_handle);
70360+ void *vaddr, dma_addr_t dma_handle,
70361+ struct dma_attrs *attrs);
70362
70363 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
70364 unsigned long offset, size_t size,
70365diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
70366index 27b3b0b..e093dd9 100644
70367--- a/include/linux/syscore_ops.h
70368+++ b/include/linux/syscore_ops.h
70369@@ -16,7 +16,7 @@ struct syscore_ops {
70370 int (*suspend)(void);
70371 void (*resume)(void);
70372 void (*shutdown)(void);
70373-};
70374+} __do_const;
70375
70376 extern void register_syscore_ops(struct syscore_ops *ops);
70377 extern void unregister_syscore_ops(struct syscore_ops *ops);
70378diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
70379index 14a8ff2..af52bad 100644
70380--- a/include/linux/sysctl.h
70381+++ b/include/linux/sysctl.h
70382@@ -34,13 +34,13 @@ struct ctl_table_root;
70383 struct ctl_table_header;
70384 struct ctl_dir;
70385
70386-typedef struct ctl_table ctl_table;
70387-
70388 typedef int proc_handler (struct ctl_table *ctl, int write,
70389 void __user *buffer, size_t *lenp, loff_t *ppos);
70390
70391 extern int proc_dostring(struct ctl_table *, int,
70392 void __user *, size_t *, loff_t *);
70393+extern int proc_dostring_modpriv(struct ctl_table *, int,
70394+ void __user *, size_t *, loff_t *);
70395 extern int proc_dointvec(struct ctl_table *, int,
70396 void __user *, size_t *, loff_t *);
70397 extern int proc_dointvec_minmax(struct ctl_table *, int,
70398@@ -115,7 +115,9 @@ struct ctl_table
70399 struct ctl_table_poll *poll;
70400 void *extra1;
70401 void *extra2;
70402-};
70403+} __do_const;
70404+typedef struct ctl_table __no_const ctl_table_no_const;
70405+typedef struct ctl_table ctl_table;
70406
70407 struct ctl_node {
70408 struct rb_node node;
70409diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
70410index 381f06d..dc16cc7 100644
70411--- a/include/linux/sysfs.h
70412+++ b/include/linux/sysfs.h
70413@@ -31,7 +31,8 @@ struct attribute {
70414 struct lock_class_key *key;
70415 struct lock_class_key skey;
70416 #endif
70417-};
70418+} __do_const;
70419+typedef struct attribute __no_const attribute_no_const;
70420
70421 /**
70422 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
70423@@ -59,8 +60,8 @@ struct attribute_group {
70424 umode_t (*is_visible)(struct kobject *,
70425 struct attribute *, int);
70426 struct attribute **attrs;
70427-};
70428-
70429+} __do_const;
70430+typedef struct attribute_group __no_const attribute_group_no_const;
70431
70432
70433 /**
70434@@ -107,7 +108,8 @@ struct bin_attribute {
70435 char *, loff_t, size_t);
70436 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
70437 struct vm_area_struct *vma);
70438-};
70439+} __do_const;
70440+typedef struct bin_attribute __no_const bin_attribute_no_const;
70441
70442 /**
70443 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
70444diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
70445index 7faf933..4657127 100644
70446--- a/include/linux/sysrq.h
70447+++ b/include/linux/sysrq.h
70448@@ -15,7 +15,9 @@
70449 #define _LINUX_SYSRQ_H
70450
70451 #include <linux/errno.h>
70452+#include <linux/compiler.h>
70453 #include <linux/types.h>
70454+#include <linux/compiler.h>
70455
70456 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
70457 #define SYSRQ_DEFAULT_ENABLE 1
70458@@ -36,7 +38,7 @@ struct sysrq_key_op {
70459 char *help_msg;
70460 char *action_msg;
70461 int enable_mask;
70462-};
70463+} __do_const;
70464
70465 #ifdef CONFIG_MAGIC_SYSRQ
70466
70467diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
70468index e7e0473..39b7b52 100644
70469--- a/include/linux/thread_info.h
70470+++ b/include/linux/thread_info.h
70471@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
70472 #error "no set_restore_sigmask() provided and default one won't work"
70473 #endif
70474
70475+extern void __check_object_size(const void *ptr, unsigned long n, bool to);
70476+static inline void check_object_size(const void *ptr, unsigned long n, bool to)
70477+{
70478+#ifndef CONFIG_PAX_USERCOPY_DEBUG
70479+ if (!__builtin_constant_p(n))
70480+#endif
70481+ __check_object_size(ptr, n, to);
70482+}
70483+
70484 #endif /* __KERNEL__ */
70485
70486 #endif /* _LINUX_THREAD_INFO_H */
70487diff --git a/include/linux/tty.h b/include/linux/tty.h
70488index 8db1b56..c16a040 100644
70489--- a/include/linux/tty.h
70490+++ b/include/linux/tty.h
70491@@ -194,7 +194,7 @@ struct tty_port {
70492 const struct tty_port_operations *ops; /* Port operations */
70493 spinlock_t lock; /* Lock protecting tty field */
70494 int blocked_open; /* Waiting to open */
70495- int count; /* Usage count */
70496+ atomic_t count; /* Usage count */
70497 wait_queue_head_t open_wait; /* Open waiters */
70498 wait_queue_head_t close_wait; /* Close waiters */
70499 wait_queue_head_t delta_msr_wait; /* Modem status change */
70500@@ -490,7 +490,7 @@ extern int tty_port_open(struct tty_port *port,
70501 struct tty_struct *tty, struct file *filp);
70502 static inline int tty_port_users(struct tty_port *port)
70503 {
70504- return port->count + port->blocked_open;
70505+ return atomic_read(&port->count) + port->blocked_open;
70506 }
70507
70508 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
70509diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
70510index dd976cf..e272742 100644
70511--- a/include/linux/tty_driver.h
70512+++ b/include/linux/tty_driver.h
70513@@ -284,7 +284,7 @@ struct tty_operations {
70514 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
70515 #endif
70516 const struct file_operations *proc_fops;
70517-};
70518+} __do_const;
70519
70520 struct tty_driver {
70521 int magic; /* magic number for this structure */
70522diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
70523index fb79dd8d..07d4773 100644
70524--- a/include/linux/tty_ldisc.h
70525+++ b/include/linux/tty_ldisc.h
70526@@ -149,7 +149,7 @@ struct tty_ldisc_ops {
70527
70528 struct module *owner;
70529
70530- int refcount;
70531+ atomic_t refcount;
70532 };
70533
70534 struct tty_ldisc {
70535diff --git a/include/linux/types.h b/include/linux/types.h
70536index 4d118ba..c3ee9bf 100644
70537--- a/include/linux/types.h
70538+++ b/include/linux/types.h
70539@@ -176,10 +176,26 @@ typedef struct {
70540 int counter;
70541 } atomic_t;
70542
70543+#ifdef CONFIG_PAX_REFCOUNT
70544+typedef struct {
70545+ int counter;
70546+} atomic_unchecked_t;
70547+#else
70548+typedef atomic_t atomic_unchecked_t;
70549+#endif
70550+
70551 #ifdef CONFIG_64BIT
70552 typedef struct {
70553 long counter;
70554 } atomic64_t;
70555+
70556+#ifdef CONFIG_PAX_REFCOUNT
70557+typedef struct {
70558+ long counter;
70559+} atomic64_unchecked_t;
70560+#else
70561+typedef atomic64_t atomic64_unchecked_t;
70562+#endif
70563 #endif
70564
70565 struct list_head {
70566diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
70567index 5ca0951..ab496a5 100644
70568--- a/include/linux/uaccess.h
70569+++ b/include/linux/uaccess.h
70570@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70571 long ret; \
70572 mm_segment_t old_fs = get_fs(); \
70573 \
70574- set_fs(KERNEL_DS); \
70575 pagefault_disable(); \
70576- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
70577- pagefault_enable(); \
70578+ set_fs(KERNEL_DS); \
70579+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
70580 set_fs(old_fs); \
70581+ pagefault_enable(); \
70582 ret; \
70583 })
70584
70585diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
70586index 8e522cbc..aa8572d 100644
70587--- a/include/linux/uidgid.h
70588+++ b/include/linux/uidgid.h
70589@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
70590
70591 #endif /* CONFIG_USER_NS */
70592
70593+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
70594+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
70595+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
70596+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
70597+
70598 #endif /* _LINUX_UIDGID_H */
70599diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
70600index 99c1b4d..bb94261 100644
70601--- a/include/linux/unaligned/access_ok.h
70602+++ b/include/linux/unaligned/access_ok.h
70603@@ -6,32 +6,32 @@
70604
70605 static inline u16 get_unaligned_le16(const void *p)
70606 {
70607- return le16_to_cpup((__le16 *)p);
70608+ return le16_to_cpup((const __le16 *)p);
70609 }
70610
70611 static inline u32 get_unaligned_le32(const void *p)
70612 {
70613- return le32_to_cpup((__le32 *)p);
70614+ return le32_to_cpup((const __le32 *)p);
70615 }
70616
70617 static inline u64 get_unaligned_le64(const void *p)
70618 {
70619- return le64_to_cpup((__le64 *)p);
70620+ return le64_to_cpup((const __le64 *)p);
70621 }
70622
70623 static inline u16 get_unaligned_be16(const void *p)
70624 {
70625- return be16_to_cpup((__be16 *)p);
70626+ return be16_to_cpup((const __be16 *)p);
70627 }
70628
70629 static inline u32 get_unaligned_be32(const void *p)
70630 {
70631- return be32_to_cpup((__be32 *)p);
70632+ return be32_to_cpup((const __be32 *)p);
70633 }
70634
70635 static inline u64 get_unaligned_be64(const void *p)
70636 {
70637- return be64_to_cpup((__be64 *)p);
70638+ return be64_to_cpup((const __be64 *)p);
70639 }
70640
70641 static inline void put_unaligned_le16(u16 val, void *p)
70642diff --git a/include/linux/usb.h b/include/linux/usb.h
70643index 4d22d0f..ac43c2f 100644
70644--- a/include/linux/usb.h
70645+++ b/include/linux/usb.h
70646@@ -554,7 +554,7 @@ struct usb_device {
70647 int maxchild;
70648
70649 u32 quirks;
70650- atomic_t urbnum;
70651+ atomic_unchecked_t urbnum;
70652
70653 unsigned long active_duration;
70654
70655diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
70656index c5d36c6..108f4f9 100644
70657--- a/include/linux/usb/renesas_usbhs.h
70658+++ b/include/linux/usb/renesas_usbhs.h
70659@@ -39,7 +39,7 @@ enum {
70660 */
70661 struct renesas_usbhs_driver_callback {
70662 int (*notify_hotplug)(struct platform_device *pdev);
70663-};
70664+} __no_const;
70665
70666 /*
70667 * callback functions for platform
70668diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
70669index b9bd2e6..4ce0093 100644
70670--- a/include/linux/user_namespace.h
70671+++ b/include/linux/user_namespace.h
70672@@ -21,7 +21,7 @@ struct user_namespace {
70673 struct uid_gid_map uid_map;
70674 struct uid_gid_map gid_map;
70675 struct uid_gid_map projid_map;
70676- struct kref kref;
70677+ atomic_t count;
70678 struct user_namespace *parent;
70679 kuid_t owner;
70680 kgid_t group;
70681@@ -35,18 +35,18 @@ extern struct user_namespace init_user_ns;
70682 static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
70683 {
70684 if (ns)
70685- kref_get(&ns->kref);
70686+ atomic_inc(&ns->count);
70687 return ns;
70688 }
70689
70690 extern int create_user_ns(struct cred *new);
70691 extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
70692-extern void free_user_ns(struct kref *kref);
70693+extern void free_user_ns(struct user_namespace *ns);
70694
70695 static inline void put_user_ns(struct user_namespace *ns)
70696 {
70697- if (ns)
70698- kref_put(&ns->kref, free_user_ns);
70699+ if (ns && atomic_dec_and_test(&ns->count))
70700+ free_user_ns(ns);
70701 }
70702
70703 struct seq_operations;
70704diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
70705index 6f8fbcf..8259001 100644
70706--- a/include/linux/vermagic.h
70707+++ b/include/linux/vermagic.h
70708@@ -25,9 +25,35 @@
70709 #define MODULE_ARCH_VERMAGIC ""
70710 #endif
70711
70712+#ifdef CONFIG_PAX_REFCOUNT
70713+#define MODULE_PAX_REFCOUNT "REFCOUNT "
70714+#else
70715+#define MODULE_PAX_REFCOUNT ""
70716+#endif
70717+
70718+#ifdef CONSTIFY_PLUGIN
70719+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
70720+#else
70721+#define MODULE_CONSTIFY_PLUGIN ""
70722+#endif
70723+
70724+#ifdef STACKLEAK_PLUGIN
70725+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
70726+#else
70727+#define MODULE_STACKLEAK_PLUGIN ""
70728+#endif
70729+
70730+#ifdef CONFIG_GRKERNSEC
70731+#define MODULE_GRSEC "GRSEC "
70732+#else
70733+#define MODULE_GRSEC ""
70734+#endif
70735+
70736 #define VERMAGIC_STRING \
70737 UTS_RELEASE " " \
70738 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
70739 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
70740- MODULE_ARCH_VERMAGIC
70741+ MODULE_ARCH_VERMAGIC \
70742+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
70743+ MODULE_GRSEC
70744
70745diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
70746index 6071e91..ca6a489 100644
70747--- a/include/linux/vmalloc.h
70748+++ b/include/linux/vmalloc.h
70749@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
70750 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
70751 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
70752 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
70753+
70754+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70755+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
70756+#endif
70757+
70758 /* bits [20..32] reserved for arch specific ioremap internals */
70759
70760 /*
70761@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
70762 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
70763 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
70764 unsigned long start, unsigned long end, gfp_t gfp_mask,
70765- pgprot_t prot, int node, const void *caller);
70766+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
70767 extern void vfree(const void *addr);
70768
70769 extern void *vmap(struct page **pages, unsigned int count,
70770@@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
70771 extern void free_vm_area(struct vm_struct *area);
70772
70773 /* for /dev/kmem */
70774-extern long vread(char *buf, char *addr, unsigned long count);
70775-extern long vwrite(char *buf, char *addr, unsigned long count);
70776+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
70777+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
70778
70779 /*
70780 * Internals. Dont't use..
70781diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
70782index a13291f..af51fa3 100644
70783--- a/include/linux/vmstat.h
70784+++ b/include/linux/vmstat.h
70785@@ -95,18 +95,18 @@ static inline void vm_events_fold_cpu(int cpu)
70786 /*
70787 * Zone based page accounting with per cpu differentials.
70788 */
70789-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70790+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70791
70792 static inline void zone_page_state_add(long x, struct zone *zone,
70793 enum zone_stat_item item)
70794 {
70795- atomic_long_add(x, &zone->vm_stat[item]);
70796- atomic_long_add(x, &vm_stat[item]);
70797+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
70798+ atomic_long_add_unchecked(x, &vm_stat[item]);
70799 }
70800
70801 static inline unsigned long global_page_state(enum zone_stat_item item)
70802 {
70803- long x = atomic_long_read(&vm_stat[item]);
70804+ long x = atomic_long_read_unchecked(&vm_stat[item]);
70805 #ifdef CONFIG_SMP
70806 if (x < 0)
70807 x = 0;
70808@@ -117,7 +117,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
70809 static inline unsigned long zone_page_state(struct zone *zone,
70810 enum zone_stat_item item)
70811 {
70812- long x = atomic_long_read(&zone->vm_stat[item]);
70813+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70814 #ifdef CONFIG_SMP
70815 if (x < 0)
70816 x = 0;
70817@@ -134,7 +134,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
70818 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
70819 enum zone_stat_item item)
70820 {
70821- long x = atomic_long_read(&zone->vm_stat[item]);
70822+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70823
70824 #ifdef CONFIG_SMP
70825 int cpu;
70826@@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
70827
70828 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
70829 {
70830- atomic_long_inc(&zone->vm_stat[item]);
70831- atomic_long_inc(&vm_stat[item]);
70832+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
70833+ atomic_long_inc_unchecked(&vm_stat[item]);
70834 }
70835
70836 static inline void __inc_zone_page_state(struct page *page,
70837@@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page,
70838
70839 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
70840 {
70841- atomic_long_dec(&zone->vm_stat[item]);
70842- atomic_long_dec(&vm_stat[item]);
70843+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
70844+ atomic_long_dec_unchecked(&vm_stat[item]);
70845 }
70846
70847 static inline void __dec_zone_page_state(struct page *page,
70848diff --git a/include/linux/xattr.h b/include/linux/xattr.h
70849index fdbafc6..b7ffd47 100644
70850--- a/include/linux/xattr.h
70851+++ b/include/linux/xattr.h
70852@@ -28,7 +28,7 @@ struct xattr_handler {
70853 size_t size, int handler_flags);
70854 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
70855 size_t size, int flags, int handler_flags);
70856-};
70857+} __do_const;
70858
70859 struct xattr {
70860 char *name;
70861diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
70862index 95d1c91..6798cca 100644
70863--- a/include/media/v4l2-dev.h
70864+++ b/include/media/v4l2-dev.h
70865@@ -76,7 +76,7 @@ struct v4l2_file_operations {
70866 int (*mmap) (struct file *, struct vm_area_struct *);
70867 int (*open) (struct file *);
70868 int (*release) (struct file *);
70869-};
70870+} __do_const;
70871
70872 /*
70873 * Newer version of video_device, handled by videodev2.c
70874diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
70875index 4118ad1..cb7e25f 100644
70876--- a/include/media/v4l2-ioctl.h
70877+++ b/include/media/v4l2-ioctl.h
70878@@ -284,7 +284,6 @@ struct v4l2_ioctl_ops {
70879 bool valid_prio, int cmd, void *arg);
70880 };
70881
70882-
70883 /* v4l debugging and diagnostics */
70884
70885 /* Debug bitmask flags to be used on V4L2 */
70886diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
70887index adcbb20..62c2559 100644
70888--- a/include/net/9p/transport.h
70889+++ b/include/net/9p/transport.h
70890@@ -57,7 +57,7 @@ struct p9_trans_module {
70891 int (*cancel) (struct p9_client *, struct p9_req_t *req);
70892 int (*zc_request)(struct p9_client *, struct p9_req_t *,
70893 char *, char *, int , int, int, int);
70894-};
70895+} __do_const;
70896
70897 void v9fs_register_trans(struct p9_trans_module *m);
70898 void v9fs_unregister_trans(struct p9_trans_module *m);
70899diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
70900index 7588ef4..e62d35f 100644
70901--- a/include/net/bluetooth/l2cap.h
70902+++ b/include/net/bluetooth/l2cap.h
70903@@ -552,7 +552,7 @@ struct l2cap_ops {
70904 void (*defer) (struct l2cap_chan *chan);
70905 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
70906 unsigned long len, int nb);
70907-};
70908+} __do_const;
70909
70910 struct l2cap_conn {
70911 struct hci_conn *hcon;
70912diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
70913index 9e5425b..8136ffc 100644
70914--- a/include/net/caif/cfctrl.h
70915+++ b/include/net/caif/cfctrl.h
70916@@ -52,7 +52,7 @@ struct cfctrl_rsp {
70917 void (*radioset_rsp)(void);
70918 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
70919 struct cflayer *client_layer);
70920-};
70921+} __no_const;
70922
70923 /* Link Setup Parameters for CAIF-Links. */
70924 struct cfctrl_link_param {
70925@@ -101,8 +101,8 @@ struct cfctrl_request_info {
70926 struct cfctrl {
70927 struct cfsrvl serv;
70928 struct cfctrl_rsp res;
70929- atomic_t req_seq_no;
70930- atomic_t rsp_seq_no;
70931+ atomic_unchecked_t req_seq_no;
70932+ atomic_unchecked_t rsp_seq_no;
70933 struct list_head list;
70934 /* Protects from simultaneous access to first_req list */
70935 spinlock_t info_list_lock;
70936diff --git a/include/net/flow.h b/include/net/flow.h
70937index 628e11b..4c475df 100644
70938--- a/include/net/flow.h
70939+++ b/include/net/flow.h
70940@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
70941
70942 extern void flow_cache_flush(void);
70943 extern void flow_cache_flush_deferred(void);
70944-extern atomic_t flow_cache_genid;
70945+extern atomic_unchecked_t flow_cache_genid;
70946
70947 #endif
70948diff --git a/include/net/genetlink.h b/include/net/genetlink.h
70949index bdfbe68..4402ebe 100644
70950--- a/include/net/genetlink.h
70951+++ b/include/net/genetlink.h
70952@@ -118,7 +118,7 @@ struct genl_ops {
70953 struct netlink_callback *cb);
70954 int (*done)(struct netlink_callback *cb);
70955 struct list_head ops_list;
70956-};
70957+} __do_const;
70958
70959 extern int genl_register_family(struct genl_family *family);
70960 extern int genl_register_family_with_ops(struct genl_family *family,
70961diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
70962index e5062c9..48a9a4b 100644
70963--- a/include/net/gro_cells.h
70964+++ b/include/net/gro_cells.h
70965@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
70966 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
70967
70968 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
70969- atomic_long_inc(&dev->rx_dropped);
70970+ atomic_long_inc_unchecked(&dev->rx_dropped);
70971 kfree_skb(skb);
70972 return;
70973 }
70974@@ -73,8 +73,8 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
70975 int i;
70976
70977 gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
70978- gcells->cells = kcalloc(sizeof(struct gro_cell),
70979- gcells->gro_cells_mask + 1,
70980+ gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
70981+ sizeof(struct gro_cell),
70982 GFP_KERNEL);
70983 if (!gcells->cells)
70984 return -ENOMEM;
70985diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
70986index 1832927..ce39aea 100644
70987--- a/include/net/inet_connection_sock.h
70988+++ b/include/net/inet_connection_sock.h
70989@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
70990 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
70991 int (*bind_conflict)(const struct sock *sk,
70992 const struct inet_bind_bucket *tb, bool relax);
70993-};
70994+} __do_const;
70995
70996 /** inet_connection_sock - INET connection oriented sock
70997 *
70998diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
70999index 53f464d..ba76aaa 100644
71000--- a/include/net/inetpeer.h
71001+++ b/include/net/inetpeer.h
71002@@ -47,8 +47,8 @@ struct inet_peer {
71003 */
71004 union {
71005 struct {
71006- atomic_t rid; /* Frag reception counter */
71007- atomic_t ip_id_count; /* IP ID for the next packet */
71008+ atomic_unchecked_t rid; /* Frag reception counter */
71009+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
71010 };
71011 struct rcu_head rcu;
71012 struct inet_peer *gc_next;
71013@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
71014 more++;
71015 inet_peer_refcheck(p);
71016 do {
71017- old = atomic_read(&p->ip_id_count);
71018+ old = atomic_read_unchecked(&p->ip_id_count);
71019 new = old + more;
71020 if (!new)
71021 new = 1;
71022- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
71023+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
71024 return new;
71025 }
71026
71027diff --git a/include/net/ip.h b/include/net/ip.h
71028index a68f838..74518ab 100644
71029--- a/include/net/ip.h
71030+++ b/include/net/ip.h
71031@@ -202,7 +202,7 @@ extern struct local_ports {
71032 } sysctl_local_ports;
71033 extern void inet_get_local_port_range(int *low, int *high);
71034
71035-extern unsigned long *sysctl_local_reserved_ports;
71036+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
71037 static inline int inet_is_reserved_local_port(int port)
71038 {
71039 return test_bit(port, sysctl_local_reserved_ports);
71040diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
71041index 9497be1..5a4fafe 100644
71042--- a/include/net/ip_fib.h
71043+++ b/include/net/ip_fib.h
71044@@ -169,7 +169,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
71045
71046 #define FIB_RES_SADDR(net, res) \
71047 ((FIB_RES_NH(res).nh_saddr_genid == \
71048- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
71049+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
71050 FIB_RES_NH(res).nh_saddr : \
71051 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
71052 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
71053diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
71054index 68c69d5..bdab192 100644
71055--- a/include/net/ip_vs.h
71056+++ b/include/net/ip_vs.h
71057@@ -599,7 +599,7 @@ struct ip_vs_conn {
71058 struct ip_vs_conn *control; /* Master control connection */
71059 atomic_t n_control; /* Number of controlled ones */
71060 struct ip_vs_dest *dest; /* real server */
71061- atomic_t in_pkts; /* incoming packet counter */
71062+ atomic_unchecked_t in_pkts; /* incoming packet counter */
71063
71064 /* packet transmitter for different forwarding methods. If it
71065 mangles the packet, it must return NF_DROP or better NF_STOLEN,
71066@@ -737,7 +737,7 @@ struct ip_vs_dest {
71067 __be16 port; /* port number of the server */
71068 union nf_inet_addr addr; /* IP address of the server */
71069 volatile unsigned int flags; /* dest status flags */
71070- atomic_t conn_flags; /* flags to copy to conn */
71071+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
71072 atomic_t weight; /* server weight */
71073
71074 atomic_t refcnt; /* reference counter */
71075@@ -980,11 +980,11 @@ struct netns_ipvs {
71076 /* ip_vs_lblc */
71077 int sysctl_lblc_expiration;
71078 struct ctl_table_header *lblc_ctl_header;
71079- struct ctl_table *lblc_ctl_table;
71080+ ctl_table_no_const *lblc_ctl_table;
71081 /* ip_vs_lblcr */
71082 int sysctl_lblcr_expiration;
71083 struct ctl_table_header *lblcr_ctl_header;
71084- struct ctl_table *lblcr_ctl_table;
71085+ ctl_table_no_const *lblcr_ctl_table;
71086 /* ip_vs_est */
71087 struct list_head est_list; /* estimator list */
71088 spinlock_t est_lock;
71089diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
71090index 80ffde3..968b0f4 100644
71091--- a/include/net/irda/ircomm_tty.h
71092+++ b/include/net/irda/ircomm_tty.h
71093@@ -35,6 +35,7 @@
71094 #include <linux/termios.h>
71095 #include <linux/timer.h>
71096 #include <linux/tty.h> /* struct tty_struct */
71097+#include <asm/local.h>
71098
71099 #include <net/irda/irias_object.h>
71100 #include <net/irda/ircomm_core.h>
71101diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
71102index cc7c197..9f2da2a 100644
71103--- a/include/net/iucv/af_iucv.h
71104+++ b/include/net/iucv/af_iucv.h
71105@@ -141,7 +141,7 @@ struct iucv_sock {
71106 struct iucv_sock_list {
71107 struct hlist_head head;
71108 rwlock_t lock;
71109- atomic_t autobind_name;
71110+ atomic_unchecked_t autobind_name;
71111 };
71112
71113 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
71114diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
71115index df83f69..9b640b8 100644
71116--- a/include/net/llc_c_ac.h
71117+++ b/include/net/llc_c_ac.h
71118@@ -87,7 +87,7 @@
71119 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
71120 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
71121
71122-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
71123+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
71124
71125 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
71126 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
71127diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
71128index 6ca3113..f8026dd 100644
71129--- a/include/net/llc_c_ev.h
71130+++ b/include/net/llc_c_ev.h
71131@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
71132 return (struct llc_conn_state_ev *)skb->cb;
71133 }
71134
71135-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
71136-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
71137+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
71138+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
71139
71140 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
71141 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
71142diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
71143index 0e79cfb..f46db31 100644
71144--- a/include/net/llc_c_st.h
71145+++ b/include/net/llc_c_st.h
71146@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
71147 u8 next_state;
71148 llc_conn_ev_qfyr_t *ev_qualifiers;
71149 llc_conn_action_t *ev_actions;
71150-};
71151+} __do_const;
71152
71153 struct llc_conn_state {
71154 u8 current_state;
71155diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
71156index 37a3bbd..55a4241 100644
71157--- a/include/net/llc_s_ac.h
71158+++ b/include/net/llc_s_ac.h
71159@@ -23,7 +23,7 @@
71160 #define SAP_ACT_TEST_IND 9
71161
71162 /* All action functions must look like this */
71163-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
71164+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
71165
71166 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
71167 struct sk_buff *skb);
71168diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
71169index 567c681..cd73ac0 100644
71170--- a/include/net/llc_s_st.h
71171+++ b/include/net/llc_s_st.h
71172@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
71173 llc_sap_ev_t ev;
71174 u8 next_state;
71175 llc_sap_action_t *ev_actions;
71176-};
71177+} __do_const;
71178
71179 struct llc_sap_state {
71180 u8 curr_state;
71181diff --git a/include/net/mac80211.h b/include/net/mac80211.h
71182index ee50c5e..1bc3b1a 100644
71183--- a/include/net/mac80211.h
71184+++ b/include/net/mac80211.h
71185@@ -3996,7 +3996,7 @@ struct rate_control_ops {
71186 void (*add_sta_debugfs)(void *priv, void *priv_sta,
71187 struct dentry *dir);
71188 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
71189-};
71190+} __do_const;
71191
71192 static inline int rate_supported(struct ieee80211_sta *sta,
71193 enum ieee80211_band band,
71194diff --git a/include/net/neighbour.h b/include/net/neighbour.h
71195index 0dab173..1b76af0 100644
71196--- a/include/net/neighbour.h
71197+++ b/include/net/neighbour.h
71198@@ -123,7 +123,7 @@ struct neigh_ops {
71199 void (*error_report)(struct neighbour *, struct sk_buff *);
71200 int (*output)(struct neighbour *, struct sk_buff *);
71201 int (*connected_output)(struct neighbour *, struct sk_buff *);
71202-};
71203+} __do_const;
71204
71205 struct pneigh_entry {
71206 struct pneigh_entry *next;
71207diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
71208index de644bc..351fd4e 100644
71209--- a/include/net/net_namespace.h
71210+++ b/include/net/net_namespace.h
71211@@ -115,7 +115,7 @@ struct net {
71212 #endif
71213 struct netns_ipvs *ipvs;
71214 struct sock *diag_nlsk;
71215- atomic_t rt_genid;
71216+ atomic_unchecked_t rt_genid;
71217 };
71218
71219 /*
71220@@ -282,7 +282,7 @@ struct pernet_operations {
71221 void (*exit_batch)(struct list_head *net_exit_list);
71222 int *id;
71223 size_t size;
71224-};
71225+} __do_const;
71226
71227 /*
71228 * Use these carefully. If you implement a network device and it
71229@@ -330,12 +330,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
71230
71231 static inline int rt_genid(struct net *net)
71232 {
71233- return atomic_read(&net->rt_genid);
71234+ return atomic_read_unchecked(&net->rt_genid);
71235 }
71236
71237 static inline void rt_genid_bump(struct net *net)
71238 {
71239- atomic_inc(&net->rt_genid);
71240+ atomic_inc_unchecked(&net->rt_genid);
71241 }
71242
71243 #endif /* __NET_NET_NAMESPACE_H */
71244diff --git a/include/net/netdma.h b/include/net/netdma.h
71245index 8ba8ce2..99b7fff 100644
71246--- a/include/net/netdma.h
71247+++ b/include/net/netdma.h
71248@@ -24,7 +24,7 @@
71249 #include <linux/dmaengine.h>
71250 #include <linux/skbuff.h>
71251
71252-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
71253+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
71254 struct sk_buff *skb, int offset, struct iovec *to,
71255 size_t len, struct dma_pinned_list *pinned_list);
71256
71257diff --git a/include/net/netlink.h b/include/net/netlink.h
71258index 9690b0f..87aded7 100644
71259--- a/include/net/netlink.h
71260+++ b/include/net/netlink.h
71261@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
71262 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
71263 {
71264 if (mark)
71265- skb_trim(skb, (unsigned char *) mark - skb->data);
71266+ skb_trim(skb, (const unsigned char *) mark - skb->data);
71267 }
71268
71269 /**
71270diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
71271index 923cb20..deae816 100644
71272--- a/include/net/netns/conntrack.h
71273+++ b/include/net/netns/conntrack.h
71274@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
71275 struct nf_proto_net {
71276 #ifdef CONFIG_SYSCTL
71277 struct ctl_table_header *ctl_table_header;
71278- struct ctl_table *ctl_table;
71279+ ctl_table_no_const *ctl_table;
71280 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
71281 struct ctl_table_header *ctl_compat_header;
71282- struct ctl_table *ctl_compat_table;
71283+ ctl_table_no_const *ctl_compat_table;
71284 #endif
71285 #endif
71286 unsigned int users;
71287@@ -58,7 +58,7 @@ struct nf_ip_net {
71288 struct nf_icmp_net icmpv6;
71289 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
71290 struct ctl_table_header *ctl_table_header;
71291- struct ctl_table *ctl_table;
71292+ ctl_table_no_const *ctl_table;
71293 #endif
71294 };
71295
71296diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
71297index 2ae2b83..dbdc85e 100644
71298--- a/include/net/netns/ipv4.h
71299+++ b/include/net/netns/ipv4.h
71300@@ -64,7 +64,7 @@ struct netns_ipv4 {
71301 kgid_t sysctl_ping_group_range[2];
71302 long sysctl_tcp_mem[3];
71303
71304- atomic_t dev_addr_genid;
71305+ atomic_unchecked_t dev_addr_genid;
71306
71307 #ifdef CONFIG_IP_MROUTE
71308 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
71309diff --git a/include/net/protocol.h b/include/net/protocol.h
71310index 047c047..b9dad15 100644
71311--- a/include/net/protocol.h
71312+++ b/include/net/protocol.h
71313@@ -44,7 +44,7 @@ struct net_protocol {
71314 void (*err_handler)(struct sk_buff *skb, u32 info);
71315 unsigned int no_policy:1,
71316 netns_ok:1;
71317-};
71318+} __do_const;
71319
71320 #if IS_ENABLED(CONFIG_IPV6)
71321 struct inet6_protocol {
71322@@ -57,7 +57,7 @@ struct inet6_protocol {
71323 u8 type, u8 code, int offset,
71324 __be32 info);
71325 unsigned int flags; /* INET6_PROTO_xxx */
71326-};
71327+} __do_const;
71328
71329 #define INET6_PROTO_NOPOLICY 0x1
71330 #define INET6_PROTO_FINAL 0x2
71331diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
71332index 5a15fab..d799ea7 100644
71333--- a/include/net/rtnetlink.h
71334+++ b/include/net/rtnetlink.h
71335@@ -81,7 +81,7 @@ struct rtnl_link_ops {
71336 const struct net_device *dev);
71337 unsigned int (*get_num_tx_queues)(void);
71338 unsigned int (*get_num_rx_queues)(void);
71339-};
71340+} __do_const;
71341
71342 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
71343 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
71344diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
71345index 7fdf298..197e9f7 100644
71346--- a/include/net/sctp/sctp.h
71347+++ b/include/net/sctp/sctp.h
71348@@ -330,9 +330,9 @@ do { \
71349
71350 #else /* SCTP_DEBUG */
71351
71352-#define SCTP_DEBUG_PRINTK(whatever...)
71353-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
71354-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
71355+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
71356+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
71357+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
71358 #define SCTP_ENABLE_DEBUG
71359 #define SCTP_DISABLE_DEBUG
71360 #define SCTP_ASSERT(expr, str, func)
71361diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
71362index 2a82d13..62a31c2 100644
71363--- a/include/net/sctp/sm.h
71364+++ b/include/net/sctp/sm.h
71365@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
71366 typedef struct {
71367 sctp_state_fn_t *fn;
71368 const char *name;
71369-} sctp_sm_table_entry_t;
71370+} __do_const sctp_sm_table_entry_t;
71371
71372 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
71373 * currently in use.
71374@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
71375 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
71376
71377 /* Extern declarations for major data structures. */
71378-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
71379+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
71380
71381
71382 /* Get the size of a DATA chunk payload. */
71383diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
71384index fdeb85a..1329d95 100644
71385--- a/include/net/sctp/structs.h
71386+++ b/include/net/sctp/structs.h
71387@@ -517,7 +517,7 @@ struct sctp_pf {
71388 struct sctp_association *asoc);
71389 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
71390 struct sctp_af *af;
71391-};
71392+} __do_const;
71393
71394
71395 /* Structure to track chunk fragments that have been acked, but peer
71396diff --git a/include/net/sock.h b/include/net/sock.h
71397index 25afaa0..8bb0070 100644
71398--- a/include/net/sock.h
71399+++ b/include/net/sock.h
71400@@ -322,7 +322,7 @@ struct sock {
71401 #ifdef CONFIG_RPS
71402 __u32 sk_rxhash;
71403 #endif
71404- atomic_t sk_drops;
71405+ atomic_unchecked_t sk_drops;
71406 int sk_rcvbuf;
71407
71408 struct sk_filter __rcu *sk_filter;
71409@@ -1781,7 +1781,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
71410 }
71411
71412 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
71413- char __user *from, char *to,
71414+ char __user *from, unsigned char *to,
71415 int copy, int offset)
71416 {
71417 if (skb->ip_summed == CHECKSUM_NONE) {
71418@@ -2040,7 +2040,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
71419 }
71420 }
71421
71422-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
71423+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
71424
71425 /**
71426 * sk_page_frag - return an appropriate page_frag
71427diff --git a/include/net/tcp.h b/include/net/tcp.h
71428index aed42c7..43890c6 100644
71429--- a/include/net/tcp.h
71430+++ b/include/net/tcp.h
71431@@ -530,7 +530,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
71432 extern void tcp_xmit_retransmit_queue(struct sock *);
71433 extern void tcp_simple_retransmit(struct sock *);
71434 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
71435-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
71436+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
71437
71438 extern void tcp_send_probe0(struct sock *);
71439 extern void tcp_send_partial(struct sock *);
71440@@ -701,8 +701,8 @@ struct tcp_skb_cb {
71441 struct inet6_skb_parm h6;
71442 #endif
71443 } header; /* For incoming frames */
71444- __u32 seq; /* Starting sequence number */
71445- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
71446+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
71447+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
71448 __u32 when; /* used to compute rtt's */
71449 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
71450
71451@@ -716,7 +716,7 @@ struct tcp_skb_cb {
71452
71453 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
71454 /* 1 byte hole */
71455- __u32 ack_seq; /* Sequence number ACK'd */
71456+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
71457 };
71458
71459 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
71460diff --git a/include/net/xfrm.h b/include/net/xfrm.h
71461index 63445ed..d6fc34f 100644
71462--- a/include/net/xfrm.h
71463+++ b/include/net/xfrm.h
71464@@ -304,7 +304,7 @@ struct xfrm_policy_afinfo {
71465 struct net_device *dev,
71466 const struct flowi *fl);
71467 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
71468-};
71469+} __do_const;
71470
71471 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
71472 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
71473@@ -340,7 +340,7 @@ struct xfrm_state_afinfo {
71474 struct sk_buff *skb);
71475 int (*transport_finish)(struct sk_buff *skb,
71476 int async);
71477-};
71478+} __do_const;
71479
71480 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
71481 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
71482@@ -423,7 +423,7 @@ struct xfrm_mode {
71483 struct module *owner;
71484 unsigned int encap;
71485 int flags;
71486-};
71487+} __do_const;
71488
71489 /* Flags for xfrm_mode. */
71490 enum {
71491@@ -514,7 +514,7 @@ struct xfrm_policy {
71492 struct timer_list timer;
71493
71494 struct flow_cache_object flo;
71495- atomic_t genid;
71496+ atomic_unchecked_t genid;
71497 u32 priority;
71498 u32 index;
71499 struct xfrm_mark mark;
71500diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
71501index 1a046b1..ee0bef0 100644
71502--- a/include/rdma/iw_cm.h
71503+++ b/include/rdma/iw_cm.h
71504@@ -122,7 +122,7 @@ struct iw_cm_verbs {
71505 int backlog);
71506
71507 int (*destroy_listen)(struct iw_cm_id *cm_id);
71508-};
71509+} __no_const;
71510
71511 /**
71512 * iw_create_cm_id - Create an IW CM identifier.
71513diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
71514index 399162b..b337f1a 100644
71515--- a/include/scsi/libfc.h
71516+++ b/include/scsi/libfc.h
71517@@ -762,6 +762,7 @@ struct libfc_function_template {
71518 */
71519 void (*disc_stop_final) (struct fc_lport *);
71520 };
71521+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
71522
71523 /**
71524 * struct fc_disc - Discovery context
71525@@ -866,7 +867,7 @@ struct fc_lport {
71526 struct fc_vport *vport;
71527
71528 /* Operational Information */
71529- struct libfc_function_template tt;
71530+ libfc_function_template_no_const tt;
71531 u8 link_up;
71532 u8 qfull;
71533 enum fc_lport_state state;
71534diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
71535index e65c62e..aa2e5a2 100644
71536--- a/include/scsi/scsi_device.h
71537+++ b/include/scsi/scsi_device.h
71538@@ -170,9 +170,9 @@ struct scsi_device {
71539 unsigned int max_device_blocked; /* what device_blocked counts down from */
71540 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
71541
71542- atomic_t iorequest_cnt;
71543- atomic_t iodone_cnt;
71544- atomic_t ioerr_cnt;
71545+ atomic_unchecked_t iorequest_cnt;
71546+ atomic_unchecked_t iodone_cnt;
71547+ atomic_unchecked_t ioerr_cnt;
71548
71549 struct device sdev_gendev,
71550 sdev_dev;
71551diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
71552index b797e8f..8e2c3aa 100644
71553--- a/include/scsi/scsi_transport_fc.h
71554+++ b/include/scsi/scsi_transport_fc.h
71555@@ -751,7 +751,8 @@ struct fc_function_template {
71556 unsigned long show_host_system_hostname:1;
71557
71558 unsigned long disable_target_scan:1;
71559-};
71560+} __do_const;
71561+typedef struct fc_function_template __no_const fc_function_template_no_const;
71562
71563
71564 /**
71565diff --git a/include/sound/soc.h b/include/sound/soc.h
71566index bc56738..a4be132 100644
71567--- a/include/sound/soc.h
71568+++ b/include/sound/soc.h
71569@@ -771,7 +771,7 @@ struct snd_soc_codec_driver {
71570 /* probe ordering - for components with runtime dependencies */
71571 int probe_order;
71572 int remove_order;
71573-};
71574+} __do_const;
71575
71576 /* SoC platform interface */
71577 struct snd_soc_platform_driver {
71578@@ -817,7 +817,7 @@ struct snd_soc_platform_driver {
71579 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
71580 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
71581 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
71582-};
71583+} __do_const;
71584
71585 struct snd_soc_platform {
71586 const char *name;
71587diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
71588index 663e34a..91b306a 100644
71589--- a/include/target/target_core_base.h
71590+++ b/include/target/target_core_base.h
71591@@ -654,7 +654,7 @@ struct se_device {
71592 spinlock_t stats_lock;
71593 /* Active commands on this virtual SE device */
71594 atomic_t simple_cmds;
71595- atomic_t dev_ordered_id;
71596+ atomic_unchecked_t dev_ordered_id;
71597 atomic_t dev_ordered_sync;
71598 atomic_t dev_qf_count;
71599 int export_count;
71600diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
71601new file mode 100644
71602index 0000000..fb634b7
71603--- /dev/null
71604+++ b/include/trace/events/fs.h
71605@@ -0,0 +1,53 @@
71606+#undef TRACE_SYSTEM
71607+#define TRACE_SYSTEM fs
71608+
71609+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
71610+#define _TRACE_FS_H
71611+
71612+#include <linux/fs.h>
71613+#include <linux/tracepoint.h>
71614+
71615+TRACE_EVENT(do_sys_open,
71616+
71617+ TP_PROTO(const char *filename, int flags, int mode),
71618+
71619+ TP_ARGS(filename, flags, mode),
71620+
71621+ TP_STRUCT__entry(
71622+ __string( filename, filename )
71623+ __field( int, flags )
71624+ __field( int, mode )
71625+ ),
71626+
71627+ TP_fast_assign(
71628+ __assign_str(filename, filename);
71629+ __entry->flags = flags;
71630+ __entry->mode = mode;
71631+ ),
71632+
71633+ TP_printk("\"%s\" %x %o",
71634+ __get_str(filename), __entry->flags, __entry->mode)
71635+);
71636+
71637+TRACE_EVENT(open_exec,
71638+
71639+ TP_PROTO(const char *filename),
71640+
71641+ TP_ARGS(filename),
71642+
71643+ TP_STRUCT__entry(
71644+ __string( filename, filename )
71645+ ),
71646+
71647+ TP_fast_assign(
71648+ __assign_str(filename, filename);
71649+ ),
71650+
71651+ TP_printk("\"%s\"",
71652+ __get_str(filename))
71653+);
71654+
71655+#endif /* _TRACE_FS_H */
71656+
71657+/* This part must be outside protection */
71658+#include <trace/define_trace.h>
71659diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
71660index 1c09820..7f5ec79 100644
71661--- a/include/trace/events/irq.h
71662+++ b/include/trace/events/irq.h
71663@@ -36,7 +36,7 @@ struct softirq_action;
71664 */
71665 TRACE_EVENT(irq_handler_entry,
71666
71667- TP_PROTO(int irq, struct irqaction *action),
71668+ TP_PROTO(int irq, const struct irqaction *action),
71669
71670 TP_ARGS(irq, action),
71671
71672@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
71673 */
71674 TRACE_EVENT(irq_handler_exit,
71675
71676- TP_PROTO(int irq, struct irqaction *action, int ret),
71677+ TP_PROTO(int irq, const struct irqaction *action, int ret),
71678
71679 TP_ARGS(irq, action, ret),
71680
71681diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
71682index 7caf44c..23c6f27 100644
71683--- a/include/uapi/linux/a.out.h
71684+++ b/include/uapi/linux/a.out.h
71685@@ -39,6 +39,14 @@ enum machine_type {
71686 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
71687 };
71688
71689+/* Constants for the N_FLAGS field */
71690+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
71691+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
71692+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
71693+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
71694+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
71695+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
71696+
71697 #if !defined (N_MAGIC)
71698 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
71699 #endif
71700diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
71701index d876736..b36014e 100644
71702--- a/include/uapi/linux/byteorder/little_endian.h
71703+++ b/include/uapi/linux/byteorder/little_endian.h
71704@@ -42,51 +42,51 @@
71705
71706 static inline __le64 __cpu_to_le64p(const __u64 *p)
71707 {
71708- return (__force __le64)*p;
71709+ return (__force const __le64)*p;
71710 }
71711 static inline __u64 __le64_to_cpup(const __le64 *p)
71712 {
71713- return (__force __u64)*p;
71714+ return (__force const __u64)*p;
71715 }
71716 static inline __le32 __cpu_to_le32p(const __u32 *p)
71717 {
71718- return (__force __le32)*p;
71719+ return (__force const __le32)*p;
71720 }
71721 static inline __u32 __le32_to_cpup(const __le32 *p)
71722 {
71723- return (__force __u32)*p;
71724+ return (__force const __u32)*p;
71725 }
71726 static inline __le16 __cpu_to_le16p(const __u16 *p)
71727 {
71728- return (__force __le16)*p;
71729+ return (__force const __le16)*p;
71730 }
71731 static inline __u16 __le16_to_cpup(const __le16 *p)
71732 {
71733- return (__force __u16)*p;
71734+ return (__force const __u16)*p;
71735 }
71736 static inline __be64 __cpu_to_be64p(const __u64 *p)
71737 {
71738- return (__force __be64)__swab64p(p);
71739+ return (__force const __be64)__swab64p(p);
71740 }
71741 static inline __u64 __be64_to_cpup(const __be64 *p)
71742 {
71743- return __swab64p((__u64 *)p);
71744+ return __swab64p((const __u64 *)p);
71745 }
71746 static inline __be32 __cpu_to_be32p(const __u32 *p)
71747 {
71748- return (__force __be32)__swab32p(p);
71749+ return (__force const __be32)__swab32p(p);
71750 }
71751 static inline __u32 __be32_to_cpup(const __be32 *p)
71752 {
71753- return __swab32p((__u32 *)p);
71754+ return __swab32p((const __u32 *)p);
71755 }
71756 static inline __be16 __cpu_to_be16p(const __u16 *p)
71757 {
71758- return (__force __be16)__swab16p(p);
71759+ return (__force const __be16)__swab16p(p);
71760 }
71761 static inline __u16 __be16_to_cpup(const __be16 *p)
71762 {
71763- return __swab16p((__u16 *)p);
71764+ return __swab16p((const __u16 *)p);
71765 }
71766 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
71767 #define __le64_to_cpus(x) do { (void)(x); } while (0)
71768diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
71769index 126a817..d522bd1 100644
71770--- a/include/uapi/linux/elf.h
71771+++ b/include/uapi/linux/elf.h
71772@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
71773 #define PT_GNU_EH_FRAME 0x6474e550
71774
71775 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
71776+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
71777+
71778+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
71779+
71780+/* Constants for the e_flags field */
71781+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
71782+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
71783+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
71784+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
71785+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
71786+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
71787
71788 /*
71789 * Extended Numbering
71790@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
71791 #define DT_DEBUG 21
71792 #define DT_TEXTREL 22
71793 #define DT_JMPREL 23
71794+#define DT_FLAGS 30
71795+ #define DF_TEXTREL 0x00000004
71796 #define DT_ENCODING 32
71797 #define OLD_DT_LOOS 0x60000000
71798 #define DT_LOOS 0x6000000d
71799@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
71800 #define PF_W 0x2
71801 #define PF_X 0x1
71802
71803+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
71804+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
71805+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
71806+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
71807+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
71808+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
71809+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
71810+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
71811+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
71812+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
71813+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
71814+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
71815+
71816 typedef struct elf32_phdr{
71817 Elf32_Word p_type;
71818 Elf32_Off p_offset;
71819@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
71820 #define EI_OSABI 7
71821 #define EI_PAD 8
71822
71823+#define EI_PAX 14
71824+
71825 #define ELFMAG0 0x7f /* EI_MAG */
71826 #define ELFMAG1 'E'
71827 #define ELFMAG2 'L'
71828diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
71829index aa169c4..6a2771d 100644
71830--- a/include/uapi/linux/personality.h
71831+++ b/include/uapi/linux/personality.h
71832@@ -30,6 +30,7 @@ enum {
71833 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
71834 ADDR_NO_RANDOMIZE | \
71835 ADDR_COMPAT_LAYOUT | \
71836+ ADDR_LIMIT_3GB | \
71837 MMAP_PAGE_ZERO)
71838
71839 /*
71840diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
71841index 7530e74..e714828 100644
71842--- a/include/uapi/linux/screen_info.h
71843+++ b/include/uapi/linux/screen_info.h
71844@@ -43,7 +43,8 @@ struct screen_info {
71845 __u16 pages; /* 0x32 */
71846 __u16 vesa_attributes; /* 0x34 */
71847 __u32 capabilities; /* 0x36 */
71848- __u8 _reserved[6]; /* 0x3a */
71849+ __u16 vesapm_size; /* 0x3a */
71850+ __u8 _reserved[4]; /* 0x3c */
71851 } __attribute__((packed));
71852
71853 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
71854diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
71855index 6d67213..8dab561 100644
71856--- a/include/uapi/linux/sysctl.h
71857+++ b/include/uapi/linux/sysctl.h
71858@@ -155,7 +155,11 @@ enum
71859 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
71860 };
71861
71862-
71863+#ifdef CONFIG_PAX_SOFTMODE
71864+enum {
71865+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
71866+};
71867+#endif
71868
71869 /* CTL_VM names: */
71870 enum
71871diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
71872index 26607bd..588b65f 100644
71873--- a/include/uapi/linux/xattr.h
71874+++ b/include/uapi/linux/xattr.h
71875@@ -60,5 +60,9 @@
71876 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
71877 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
71878
71879+/* User namespace */
71880+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
71881+#define XATTR_PAX_FLAGS_SUFFIX "flags"
71882+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
71883
71884 #endif /* _UAPI_LINUX_XATTR_H */
71885diff --git a/include/video/udlfb.h b/include/video/udlfb.h
71886index f9466fa..f4e2b81 100644
71887--- a/include/video/udlfb.h
71888+++ b/include/video/udlfb.h
71889@@ -53,10 +53,10 @@ struct dlfb_data {
71890 u32 pseudo_palette[256];
71891 int blank_mode; /*one of FB_BLANK_ */
71892 /* blit-only rendering path metrics, exposed through sysfs */
71893- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
71894- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
71895- atomic_t bytes_sent; /* to usb, after compression including overhead */
71896- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
71897+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
71898+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
71899+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
71900+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
71901 };
71902
71903 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
71904diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
71905index 0993a22..32ba2fe 100644
71906--- a/include/video/uvesafb.h
71907+++ b/include/video/uvesafb.h
71908@@ -177,6 +177,7 @@ struct uvesafb_par {
71909 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
71910 u8 pmi_setpal; /* PMI for palette changes */
71911 u16 *pmi_base; /* protected mode interface location */
71912+ u8 *pmi_code; /* protected mode code location */
71913 void *pmi_start;
71914 void *pmi_pal;
71915 u8 *vbe_state_orig; /*
71916diff --git a/init/Kconfig b/init/Kconfig
71917index be8b7f5..1eeca9b 100644
71918--- a/init/Kconfig
71919+++ b/init/Kconfig
71920@@ -990,6 +990,7 @@ endif # CGROUPS
71921
71922 config CHECKPOINT_RESTORE
71923 bool "Checkpoint/restore support" if EXPERT
71924+ depends on !GRKERNSEC
71925 default n
71926 help
71927 Enables additional kernel features in a sake of checkpoint/restore.
71928@@ -1468,7 +1469,7 @@ config SLUB_DEBUG
71929
71930 config COMPAT_BRK
71931 bool "Disable heap randomization"
71932- default y
71933+ default n
71934 help
71935 Randomizing heap placement makes heap exploits harder, but it
71936 also breaks ancient binaries (including anything libc5 based).
71937@@ -1711,7 +1712,7 @@ config INIT_ALL_POSSIBLE
71938 config STOP_MACHINE
71939 bool
71940 default y
71941- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
71942+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
71943 help
71944 Need stop_machine() primitive.
71945
71946diff --git a/init/Makefile b/init/Makefile
71947index 7bc47ee..6da2dc7 100644
71948--- a/init/Makefile
71949+++ b/init/Makefile
71950@@ -2,6 +2,9 @@
71951 # Makefile for the linux kernel.
71952 #
71953
71954+ccflags-y := $(GCC_PLUGINS_CFLAGS)
71955+asflags-y := $(GCC_PLUGINS_AFLAGS)
71956+
71957 obj-y := main.o version.o mounts.o
71958 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
71959 obj-y += noinitramfs.o
71960diff --git a/init/do_mounts.c b/init/do_mounts.c
71961index 1d1b634..a1c810f 100644
71962--- a/init/do_mounts.c
71963+++ b/init/do_mounts.c
71964@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
71965 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
71966 {
71967 struct super_block *s;
71968- int err = sys_mount(name, "/root", fs, flags, data);
71969+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
71970 if (err)
71971 return err;
71972
71973- sys_chdir("/root");
71974+ sys_chdir((const char __force_user *)"/root");
71975 s = current->fs->pwd.dentry->d_sb;
71976 ROOT_DEV = s->s_dev;
71977 printk(KERN_INFO
71978@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
71979 va_start(args, fmt);
71980 vsprintf(buf, fmt, args);
71981 va_end(args);
71982- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
71983+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
71984 if (fd >= 0) {
71985 sys_ioctl(fd, FDEJECT, 0);
71986 sys_close(fd);
71987 }
71988 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
71989- fd = sys_open("/dev/console", O_RDWR, 0);
71990+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
71991 if (fd >= 0) {
71992 sys_ioctl(fd, TCGETS, (long)&termios);
71993 termios.c_lflag &= ~ICANON;
71994 sys_ioctl(fd, TCSETSF, (long)&termios);
71995- sys_read(fd, &c, 1);
71996+ sys_read(fd, (char __user *)&c, 1);
71997 termios.c_lflag |= ICANON;
71998 sys_ioctl(fd, TCSETSF, (long)&termios);
71999 sys_close(fd);
72000@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
72001 mount_root();
72002 out:
72003 devtmpfs_mount("dev");
72004- sys_mount(".", "/", NULL, MS_MOVE, NULL);
72005- sys_chroot(".");
72006+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
72007+ sys_chroot((const char __force_user *)".");
72008 }
72009diff --git a/init/do_mounts.h b/init/do_mounts.h
72010index f5b978a..69dbfe8 100644
72011--- a/init/do_mounts.h
72012+++ b/init/do_mounts.h
72013@@ -15,15 +15,15 @@ extern int root_mountflags;
72014
72015 static inline int create_dev(char *name, dev_t dev)
72016 {
72017- sys_unlink(name);
72018- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
72019+ sys_unlink((char __force_user *)name);
72020+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
72021 }
72022
72023 #if BITS_PER_LONG == 32
72024 static inline u32 bstat(char *name)
72025 {
72026 struct stat64 stat;
72027- if (sys_stat64(name, &stat) != 0)
72028+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
72029 return 0;
72030 if (!S_ISBLK(stat.st_mode))
72031 return 0;
72032@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
72033 static inline u32 bstat(char *name)
72034 {
72035 struct stat stat;
72036- if (sys_newstat(name, &stat) != 0)
72037+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
72038 return 0;
72039 if (!S_ISBLK(stat.st_mode))
72040 return 0;
72041diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
72042index f9acf71..1e19144 100644
72043--- a/init/do_mounts_initrd.c
72044+++ b/init/do_mounts_initrd.c
72045@@ -58,8 +58,8 @@ static void __init handle_initrd(void)
72046 create_dev("/dev/root.old", Root_RAM0);
72047 /* mount initrd on rootfs' /root */
72048 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
72049- sys_mkdir("/old", 0700);
72050- sys_chdir("/old");
72051+ sys_mkdir((const char __force_user *)"/old", 0700);
72052+ sys_chdir((const char __force_user *)"/old");
72053
72054 /*
72055 * In case that a resume from disk is carried out by linuxrc or one of
72056@@ -73,31 +73,31 @@ static void __init handle_initrd(void)
72057 current->flags &= ~PF_FREEZER_SKIP;
72058
72059 /* move initrd to rootfs' /old */
72060- sys_mount("..", ".", NULL, MS_MOVE, NULL);
72061+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
72062 /* switch root and cwd back to / of rootfs */
72063- sys_chroot("..");
72064+ sys_chroot((const char __force_user *)"..");
72065
72066 if (new_decode_dev(real_root_dev) == Root_RAM0) {
72067- sys_chdir("/old");
72068+ sys_chdir((const char __force_user *)"/old");
72069 return;
72070 }
72071
72072- sys_chdir("/");
72073+ sys_chdir((const char __force_user *)"/");
72074 ROOT_DEV = new_decode_dev(real_root_dev);
72075 mount_root();
72076
72077 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
72078- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
72079+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
72080 if (!error)
72081 printk("okay\n");
72082 else {
72083- int fd = sys_open("/dev/root.old", O_RDWR, 0);
72084+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
72085 if (error == -ENOENT)
72086 printk("/initrd does not exist. Ignored.\n");
72087 else
72088 printk("failed\n");
72089 printk(KERN_NOTICE "Unmounting old root\n");
72090- sys_umount("/old", MNT_DETACH);
72091+ sys_umount((char __force_user *)"/old", MNT_DETACH);
72092 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
72093 if (fd < 0) {
72094 error = fd;
72095@@ -120,11 +120,11 @@ int __init initrd_load(void)
72096 * mounted in the normal path.
72097 */
72098 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
72099- sys_unlink("/initrd.image");
72100+ sys_unlink((const char __force_user *)"/initrd.image");
72101 handle_initrd();
72102 return 1;
72103 }
72104 }
72105- sys_unlink("/initrd.image");
72106+ sys_unlink((const char __force_user *)"/initrd.image");
72107 return 0;
72108 }
72109diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
72110index 8cb6db5..d729f50 100644
72111--- a/init/do_mounts_md.c
72112+++ b/init/do_mounts_md.c
72113@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
72114 partitioned ? "_d" : "", minor,
72115 md_setup_args[ent].device_names);
72116
72117- fd = sys_open(name, 0, 0);
72118+ fd = sys_open((char __force_user *)name, 0, 0);
72119 if (fd < 0) {
72120 printk(KERN_ERR "md: open failed - cannot start "
72121 "array %s\n", name);
72122@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
72123 * array without it
72124 */
72125 sys_close(fd);
72126- fd = sys_open(name, 0, 0);
72127+ fd = sys_open((char __force_user *)name, 0, 0);
72128 sys_ioctl(fd, BLKRRPART, 0);
72129 }
72130 sys_close(fd);
72131@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
72132
72133 wait_for_device_probe();
72134
72135- fd = sys_open("/dev/md0", 0, 0);
72136+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
72137 if (fd >= 0) {
72138 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
72139 sys_close(fd);
72140diff --git a/init/init_task.c b/init/init_task.c
72141index 8b2f399..f0797c9 100644
72142--- a/init/init_task.c
72143+++ b/init/init_task.c
72144@@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
72145 * Initial thread structure. Alignment of this is handled by a special
72146 * linker map entry.
72147 */
72148+#ifdef CONFIG_X86
72149+union thread_union init_thread_union __init_task_data;
72150+#else
72151 union thread_union init_thread_union __init_task_data =
72152 { INIT_THREAD_INFO(init_task) };
72153+#endif
72154diff --git a/init/initramfs.c b/init/initramfs.c
72155index 84c6bf1..8899338 100644
72156--- a/init/initramfs.c
72157+++ b/init/initramfs.c
72158@@ -84,7 +84,7 @@ static void __init free_hash(void)
72159 }
72160 }
72161
72162-static long __init do_utime(char *filename, time_t mtime)
72163+static long __init do_utime(char __force_user *filename, time_t mtime)
72164 {
72165 struct timespec t[2];
72166
72167@@ -119,7 +119,7 @@ static void __init dir_utime(void)
72168 struct dir_entry *de, *tmp;
72169 list_for_each_entry_safe(de, tmp, &dir_list, list) {
72170 list_del(&de->list);
72171- do_utime(de->name, de->mtime);
72172+ do_utime((char __force_user *)de->name, de->mtime);
72173 kfree(de->name);
72174 kfree(de);
72175 }
72176@@ -281,7 +281,7 @@ static int __init maybe_link(void)
72177 if (nlink >= 2) {
72178 char *old = find_link(major, minor, ino, mode, collected);
72179 if (old)
72180- return (sys_link(old, collected) < 0) ? -1 : 1;
72181+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
72182 }
72183 return 0;
72184 }
72185@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
72186 {
72187 struct stat st;
72188
72189- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
72190+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
72191 if (S_ISDIR(st.st_mode))
72192- sys_rmdir(path);
72193+ sys_rmdir((char __force_user *)path);
72194 else
72195- sys_unlink(path);
72196+ sys_unlink((char __force_user *)path);
72197 }
72198 }
72199
72200@@ -315,7 +315,7 @@ static int __init do_name(void)
72201 int openflags = O_WRONLY|O_CREAT;
72202 if (ml != 1)
72203 openflags |= O_TRUNC;
72204- wfd = sys_open(collected, openflags, mode);
72205+ wfd = sys_open((char __force_user *)collected, openflags, mode);
72206
72207 if (wfd >= 0) {
72208 sys_fchown(wfd, uid, gid);
72209@@ -327,17 +327,17 @@ static int __init do_name(void)
72210 }
72211 }
72212 } else if (S_ISDIR(mode)) {
72213- sys_mkdir(collected, mode);
72214- sys_chown(collected, uid, gid);
72215- sys_chmod(collected, mode);
72216+ sys_mkdir((char __force_user *)collected, mode);
72217+ sys_chown((char __force_user *)collected, uid, gid);
72218+ sys_chmod((char __force_user *)collected, mode);
72219 dir_add(collected, mtime);
72220 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
72221 S_ISFIFO(mode) || S_ISSOCK(mode)) {
72222 if (maybe_link() == 0) {
72223- sys_mknod(collected, mode, rdev);
72224- sys_chown(collected, uid, gid);
72225- sys_chmod(collected, mode);
72226- do_utime(collected, mtime);
72227+ sys_mknod((char __force_user *)collected, mode, rdev);
72228+ sys_chown((char __force_user *)collected, uid, gid);
72229+ sys_chmod((char __force_user *)collected, mode);
72230+ do_utime((char __force_user *)collected, mtime);
72231 }
72232 }
72233 return 0;
72234@@ -346,15 +346,15 @@ static int __init do_name(void)
72235 static int __init do_copy(void)
72236 {
72237 if (count >= body_len) {
72238- sys_write(wfd, victim, body_len);
72239+ sys_write(wfd, (char __force_user *)victim, body_len);
72240 sys_close(wfd);
72241- do_utime(vcollected, mtime);
72242+ do_utime((char __force_user *)vcollected, mtime);
72243 kfree(vcollected);
72244 eat(body_len);
72245 state = SkipIt;
72246 return 0;
72247 } else {
72248- sys_write(wfd, victim, count);
72249+ sys_write(wfd, (char __force_user *)victim, count);
72250 body_len -= count;
72251 eat(count);
72252 return 1;
72253@@ -365,9 +365,9 @@ static int __init do_symlink(void)
72254 {
72255 collected[N_ALIGN(name_len) + body_len] = '\0';
72256 clean_path(collected, 0);
72257- sys_symlink(collected + N_ALIGN(name_len), collected);
72258- sys_lchown(collected, uid, gid);
72259- do_utime(collected, mtime);
72260+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
72261+ sys_lchown((char __force_user *)collected, uid, gid);
72262+ do_utime((char __force_user *)collected, mtime);
72263 state = SkipIt;
72264 next_state = Reset;
72265 return 0;
72266diff --git a/init/main.c b/init/main.c
72267index cee4b5c..9c267d9 100644
72268--- a/init/main.c
72269+++ b/init/main.c
72270@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
72271 extern void tc_init(void);
72272 #endif
72273
72274+extern void grsecurity_init(void);
72275+
72276 /*
72277 * Debug helper: via this flag we know that we are in 'early bootup code'
72278 * where only the boot processor is running with IRQ disabled. This means
72279@@ -149,6 +151,61 @@ static int __init set_reset_devices(char *str)
72280
72281 __setup("reset_devices", set_reset_devices);
72282
72283+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72284+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
72285+static int __init setup_grsec_proc_gid(char *str)
72286+{
72287+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
72288+ return 1;
72289+}
72290+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
72291+#endif
72292+
72293+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
72294+extern char pax_enter_kernel_user[];
72295+extern char pax_exit_kernel_user[];
72296+extern pgdval_t clone_pgd_mask;
72297+#endif
72298+
72299+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
72300+static int __init setup_pax_nouderef(char *str)
72301+{
72302+#ifdef CONFIG_X86_32
72303+ unsigned int cpu;
72304+ struct desc_struct *gdt;
72305+
72306+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
72307+ gdt = get_cpu_gdt_table(cpu);
72308+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
72309+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
72310+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
72311+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
72312+ }
72313+ loadsegment(ds, __KERNEL_DS);
72314+ loadsegment(es, __KERNEL_DS);
72315+ loadsegment(ss, __KERNEL_DS);
72316+#else
72317+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
72318+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
72319+ clone_pgd_mask = ~(pgdval_t)0UL;
72320+#endif
72321+
72322+ return 0;
72323+}
72324+early_param("pax_nouderef", setup_pax_nouderef);
72325+#endif
72326+
72327+#ifdef CONFIG_PAX_SOFTMODE
72328+int pax_softmode;
72329+
72330+static int __init setup_pax_softmode(char *str)
72331+{
72332+ get_option(&str, &pax_softmode);
72333+ return 1;
72334+}
72335+__setup("pax_softmode=", setup_pax_softmode);
72336+#endif
72337+
72338 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
72339 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
72340 static const char *panic_later, *panic_param;
72341@@ -681,6 +738,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
72342 {
72343 int count = preempt_count();
72344 int ret;
72345+ const char *msg1 = "", *msg2 = "";
72346
72347 if (initcall_debug)
72348 ret = do_one_initcall_debug(fn);
72349@@ -693,15 +751,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
72350 sprintf(msgbuf, "error code %d ", ret);
72351
72352 if (preempt_count() != count) {
72353- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
72354+ msg1 = " preemption imbalance";
72355 preempt_count() = count;
72356 }
72357 if (irqs_disabled()) {
72358- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
72359+ msg2 = " disabled interrupts";
72360 local_irq_enable();
72361 }
72362- if (msgbuf[0]) {
72363- printk("initcall %pF returned with %s\n", fn, msgbuf);
72364+ if (msgbuf[0] || *msg1 || *msg2) {
72365+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
72366 }
72367
72368 return ret;
72369@@ -743,6 +801,10 @@ static char *initcall_level_names[] __initdata = {
72370 "late",
72371 };
72372
72373+#ifdef CONFIG_PAX_LATENT_ENTROPY
72374+u64 latent_entropy;
72375+#endif
72376+
72377 static void __init do_initcall_level(int level)
72378 {
72379 extern const struct kernel_param __start___param[], __stop___param[];
72380@@ -755,8 +817,14 @@ static void __init do_initcall_level(int level)
72381 level, level,
72382 &repair_env_string);
72383
72384- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
72385+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
72386 do_one_initcall(*fn);
72387+
72388+#ifdef CONFIG_PAX_LATENT_ENTROPY
72389+ add_device_randomness(&latent_entropy, sizeof(latent_entropy));
72390+#endif
72391+
72392+ }
72393 }
72394
72395 static void __init do_initcalls(void)
72396@@ -790,8 +858,14 @@ static void __init do_pre_smp_initcalls(void)
72397 {
72398 initcall_t *fn;
72399
72400- for (fn = __initcall_start; fn < __initcall0_start; fn++)
72401+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
72402 do_one_initcall(*fn);
72403+
72404+#ifdef CONFIG_PAX_LATENT_ENTROPY
72405+ add_device_randomness(&latent_entropy, sizeof(latent_entropy));
72406+#endif
72407+
72408+ }
72409 }
72410
72411 static int run_init_process(const char *init_filename)
72412@@ -877,7 +951,7 @@ static noinline void __init kernel_init_freeable(void)
72413 do_basic_setup();
72414
72415 /* Open the /dev/console on the rootfs, this should never fail */
72416- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
72417+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
72418 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
72419
72420 (void) sys_dup(0);
72421@@ -890,11 +964,13 @@ static noinline void __init kernel_init_freeable(void)
72422 if (!ramdisk_execute_command)
72423 ramdisk_execute_command = "/init";
72424
72425- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
72426+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
72427 ramdisk_execute_command = NULL;
72428 prepare_namespace();
72429 }
72430
72431+ grsecurity_init();
72432+
72433 /*
72434 * Ok, we have completed the initial bootup, and
72435 * we're essentially up and running. Get rid of the
72436diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
72437index 130dfec..cc88451 100644
72438--- a/ipc/ipc_sysctl.c
72439+++ b/ipc/ipc_sysctl.c
72440@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
72441 static int proc_ipc_dointvec(ctl_table *table, int write,
72442 void __user *buffer, size_t *lenp, loff_t *ppos)
72443 {
72444- struct ctl_table ipc_table;
72445+ ctl_table_no_const ipc_table;
72446
72447 memcpy(&ipc_table, table, sizeof(ipc_table));
72448 ipc_table.data = get_ipc(table);
72449@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
72450 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
72451 void __user *buffer, size_t *lenp, loff_t *ppos)
72452 {
72453- struct ctl_table ipc_table;
72454+ ctl_table_no_const ipc_table;
72455
72456 memcpy(&ipc_table, table, sizeof(ipc_table));
72457 ipc_table.data = get_ipc(table);
72458@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
72459 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
72460 void __user *buffer, size_t *lenp, loff_t *ppos)
72461 {
72462- struct ctl_table ipc_table;
72463+ ctl_table_no_const ipc_table;
72464 size_t lenp_bef = *lenp;
72465 int rc;
72466
72467@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
72468 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
72469 void __user *buffer, size_t *lenp, loff_t *ppos)
72470 {
72471- struct ctl_table ipc_table;
72472+ ctl_table_no_const ipc_table;
72473 memcpy(&ipc_table, table, sizeof(ipc_table));
72474 ipc_table.data = get_ipc(table);
72475
72476@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
72477 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
72478 void __user *buffer, size_t *lenp, loff_t *ppos)
72479 {
72480- struct ctl_table ipc_table;
72481+ ctl_table_no_const ipc_table;
72482 size_t lenp_bef = *lenp;
72483 int oldval;
72484 int rc;
72485diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
72486index 383d638..943fdbb 100644
72487--- a/ipc/mq_sysctl.c
72488+++ b/ipc/mq_sysctl.c
72489@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
72490 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
72491 void __user *buffer, size_t *lenp, loff_t *ppos)
72492 {
72493- struct ctl_table mq_table;
72494+ ctl_table_no_const mq_table;
72495 memcpy(&mq_table, table, sizeof(mq_table));
72496 mq_table.data = get_mq(table);
72497
72498diff --git a/ipc/mqueue.c b/ipc/mqueue.c
72499index 71a3ca1..cc330ee 100644
72500--- a/ipc/mqueue.c
72501+++ b/ipc/mqueue.c
72502@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
72503 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
72504 info->attr.mq_msgsize);
72505
72506+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
72507 spin_lock(&mq_lock);
72508 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
72509 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
72510diff --git a/ipc/msg.c b/ipc/msg.c
72511index 950572f..266c15f 100644
72512--- a/ipc/msg.c
72513+++ b/ipc/msg.c
72514@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
72515 return security_msg_queue_associate(msq, msgflg);
72516 }
72517
72518+static struct ipc_ops msg_ops = {
72519+ .getnew = newque,
72520+ .associate = msg_security,
72521+ .more_checks = NULL
72522+};
72523+
72524 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
72525 {
72526 struct ipc_namespace *ns;
72527- struct ipc_ops msg_ops;
72528 struct ipc_params msg_params;
72529
72530 ns = current->nsproxy->ipc_ns;
72531
72532- msg_ops.getnew = newque;
72533- msg_ops.associate = msg_security;
72534- msg_ops.more_checks = NULL;
72535-
72536 msg_params.key = key;
72537 msg_params.flg = msgflg;
72538
72539diff --git a/ipc/sem.c b/ipc/sem.c
72540index 58d31f1..cce7a55 100644
72541--- a/ipc/sem.c
72542+++ b/ipc/sem.c
72543@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
72544 return 0;
72545 }
72546
72547+static struct ipc_ops sem_ops = {
72548+ .getnew = newary,
72549+ .associate = sem_security,
72550+ .more_checks = sem_more_checks
72551+};
72552+
72553 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
72554 {
72555 struct ipc_namespace *ns;
72556- struct ipc_ops sem_ops;
72557 struct ipc_params sem_params;
72558
72559 ns = current->nsproxy->ipc_ns;
72560@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
72561 if (nsems < 0 || nsems > ns->sc_semmsl)
72562 return -EINVAL;
72563
72564- sem_ops.getnew = newary;
72565- sem_ops.associate = sem_security;
72566- sem_ops.more_checks = sem_more_checks;
72567-
72568 sem_params.key = key;
72569 sem_params.flg = semflg;
72570 sem_params.u.nsems = nsems;
72571diff --git a/ipc/shm.c b/ipc/shm.c
72572index 4fa6d8f..55cff14 100644
72573--- a/ipc/shm.c
72574+++ b/ipc/shm.c
72575@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
72576 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
72577 #endif
72578
72579+#ifdef CONFIG_GRKERNSEC
72580+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72581+ const time_t shm_createtime, const kuid_t cuid,
72582+ const int shmid);
72583+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72584+ const time_t shm_createtime);
72585+#endif
72586+
72587 void shm_init_ns(struct ipc_namespace *ns)
72588 {
72589 ns->shm_ctlmax = SHMMAX;
72590@@ -521,6 +529,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
72591 shp->shm_lprid = 0;
72592 shp->shm_atim = shp->shm_dtim = 0;
72593 shp->shm_ctim = get_seconds();
72594+#ifdef CONFIG_GRKERNSEC
72595+ {
72596+ struct timespec timeval;
72597+ do_posix_clock_monotonic_gettime(&timeval);
72598+
72599+ shp->shm_createtime = timeval.tv_sec;
72600+ }
72601+#endif
72602 shp->shm_segsz = size;
72603 shp->shm_nattch = 0;
72604 shp->shm_file = file;
72605@@ -572,18 +588,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
72606 return 0;
72607 }
72608
72609+static struct ipc_ops shm_ops = {
72610+ .getnew = newseg,
72611+ .associate = shm_security,
72612+ .more_checks = shm_more_checks
72613+};
72614+
72615 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
72616 {
72617 struct ipc_namespace *ns;
72618- struct ipc_ops shm_ops;
72619 struct ipc_params shm_params;
72620
72621 ns = current->nsproxy->ipc_ns;
72622
72623- shm_ops.getnew = newseg;
72624- shm_ops.associate = shm_security;
72625- shm_ops.more_checks = shm_more_checks;
72626-
72627 shm_params.key = key;
72628 shm_params.flg = shmflg;
72629 shm_params.u.size = size;
72630@@ -1004,6 +1021,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
72631 f_mode = FMODE_READ | FMODE_WRITE;
72632 }
72633 if (shmflg & SHM_EXEC) {
72634+
72635+#ifdef CONFIG_PAX_MPROTECT
72636+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
72637+ goto out;
72638+#endif
72639+
72640 prot |= PROT_EXEC;
72641 acc_mode |= S_IXUGO;
72642 }
72643@@ -1027,9 +1050,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
72644 if (err)
72645 goto out_unlock;
72646
72647+#ifdef CONFIG_GRKERNSEC
72648+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
72649+ shp->shm_perm.cuid, shmid) ||
72650+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
72651+ err = -EACCES;
72652+ goto out_unlock;
72653+ }
72654+#endif
72655+
72656 path = shp->shm_file->f_path;
72657 path_get(&path);
72658 shp->shm_nattch++;
72659+#ifdef CONFIG_GRKERNSEC
72660+ shp->shm_lapid = current->pid;
72661+#endif
72662 size = i_size_read(path.dentry->d_inode);
72663 shm_unlock(shp);
72664
72665diff --git a/kernel/acct.c b/kernel/acct.c
72666index 051e071..15e0920 100644
72667--- a/kernel/acct.c
72668+++ b/kernel/acct.c
72669@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
72670 */
72671 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
72672 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
72673- file->f_op->write(file, (char *)&ac,
72674+ file->f_op->write(file, (char __force_user *)&ac,
72675 sizeof(acct_t), &file->f_pos);
72676 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
72677 set_fs(fs);
72678diff --git a/kernel/audit.c b/kernel/audit.c
72679index d596e53..dbef3c3 100644
72680--- a/kernel/audit.c
72681+++ b/kernel/audit.c
72682@@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
72683 3) suppressed due to audit_rate_limit
72684 4) suppressed due to audit_backlog_limit
72685 */
72686-static atomic_t audit_lost = ATOMIC_INIT(0);
72687+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
72688
72689 /* The netlink socket. */
72690 static struct sock *audit_sock;
72691@@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
72692 unsigned long now;
72693 int print;
72694
72695- atomic_inc(&audit_lost);
72696+ atomic_inc_unchecked(&audit_lost);
72697
72698 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
72699
72700@@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
72701 printk(KERN_WARNING
72702 "audit: audit_lost=%d audit_rate_limit=%d "
72703 "audit_backlog_limit=%d\n",
72704- atomic_read(&audit_lost),
72705+ atomic_read_unchecked(&audit_lost),
72706 audit_rate_limit,
72707 audit_backlog_limit);
72708 audit_panic(message);
72709@@ -681,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
72710 status_set.pid = audit_pid;
72711 status_set.rate_limit = audit_rate_limit;
72712 status_set.backlog_limit = audit_backlog_limit;
72713- status_set.lost = atomic_read(&audit_lost);
72714+ status_set.lost = atomic_read_unchecked(&audit_lost);
72715 status_set.backlog = skb_queue_len(&audit_skb_queue);
72716 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
72717 &status_set, sizeof(status_set));
72718diff --git a/kernel/auditsc.c b/kernel/auditsc.c
72719index a371f85..da826c1 100644
72720--- a/kernel/auditsc.c
72721+++ b/kernel/auditsc.c
72722@@ -2292,7 +2292,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
72723 }
72724
72725 /* global counter which is incremented every time something logs in */
72726-static atomic_t session_id = ATOMIC_INIT(0);
72727+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
72728
72729 /**
72730 * audit_set_loginuid - set current task's audit_context loginuid
72731@@ -2316,7 +2316,7 @@ int audit_set_loginuid(kuid_t loginuid)
72732 return -EPERM;
72733 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
72734
72735- sessionid = atomic_inc_return(&session_id);
72736+ sessionid = atomic_inc_return_unchecked(&session_id);
72737 if (context && context->in_syscall) {
72738 struct audit_buffer *ab;
72739
72740diff --git a/kernel/capability.c b/kernel/capability.c
72741index 493d972..f87dfbd 100644
72742--- a/kernel/capability.c
72743+++ b/kernel/capability.c
72744@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
72745 * before modification is attempted and the application
72746 * fails.
72747 */
72748+ if (tocopy > ARRAY_SIZE(kdata))
72749+ return -EFAULT;
72750+
72751 if (copy_to_user(dataptr, kdata, tocopy
72752 * sizeof(struct __user_cap_data_struct))) {
72753 return -EFAULT;
72754@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
72755 int ret;
72756
72757 rcu_read_lock();
72758- ret = security_capable(__task_cred(t), ns, cap);
72759+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
72760+ gr_task_is_capable(t, __task_cred(t), cap);
72761 rcu_read_unlock();
72762
72763- return (ret == 0);
72764+ return ret;
72765 }
72766
72767 /**
72768@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
72769 int ret;
72770
72771 rcu_read_lock();
72772- ret = security_capable_noaudit(__task_cred(t), ns, cap);
72773+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
72774 rcu_read_unlock();
72775
72776- return (ret == 0);
72777+ return ret;
72778 }
72779
72780 /**
72781@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
72782 BUG();
72783 }
72784
72785- if (security_capable(current_cred(), ns, cap) == 0) {
72786+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
72787 current->flags |= PF_SUPERPRIV;
72788 return true;
72789 }
72790@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
72791 }
72792 EXPORT_SYMBOL(ns_capable);
72793
72794+bool ns_capable_nolog(struct user_namespace *ns, int cap)
72795+{
72796+ if (unlikely(!cap_valid(cap))) {
72797+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
72798+ BUG();
72799+ }
72800+
72801+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
72802+ current->flags |= PF_SUPERPRIV;
72803+ return true;
72804+ }
72805+ return false;
72806+}
72807+EXPORT_SYMBOL(ns_capable_nolog);
72808+
72809 /**
72810 * capable - Determine if the current task has a superior capability in effect
72811 * @cap: The capability to be tested for
72812@@ -408,6 +427,12 @@ bool capable(int cap)
72813 }
72814 EXPORT_SYMBOL(capable);
72815
72816+bool capable_nolog(int cap)
72817+{
72818+ return ns_capable_nolog(&init_user_ns, cap);
72819+}
72820+EXPORT_SYMBOL(capable_nolog);
72821+
72822 /**
72823 * nsown_capable - Check superior capability to one's own user_ns
72824 * @cap: The capability in question
72825@@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
72826
72827 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
72828 }
72829+
72830+bool inode_capable_nolog(const struct inode *inode, int cap)
72831+{
72832+ struct user_namespace *ns = current_user_ns();
72833+
72834+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
72835+}
72836diff --git a/kernel/cgroup.c b/kernel/cgroup.c
72837index 1e23664..570a83d 100644
72838--- a/kernel/cgroup.c
72839+++ b/kernel/cgroup.c
72840@@ -5543,7 +5543,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
72841 struct css_set *cg = link->cg;
72842 struct task_struct *task;
72843 int count = 0;
72844- seq_printf(seq, "css_set %p\n", cg);
72845+ seq_printf(seq, "css_set %pK\n", cg);
72846 list_for_each_entry(task, &cg->tasks, cg_list) {
72847 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
72848 seq_puts(seq, " ...\n");
72849diff --git a/kernel/compat.c b/kernel/compat.c
72850index 36700e9..73d770c 100644
72851--- a/kernel/compat.c
72852+++ b/kernel/compat.c
72853@@ -13,6 +13,7 @@
72854
72855 #include <linux/linkage.h>
72856 #include <linux/compat.h>
72857+#include <linux/module.h>
72858 #include <linux/errno.h>
72859 #include <linux/time.h>
72860 #include <linux/signal.h>
72861@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
72862 mm_segment_t oldfs;
72863 long ret;
72864
72865- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
72866+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
72867 oldfs = get_fs();
72868 set_fs(KERNEL_DS);
72869 ret = hrtimer_nanosleep_restart(restart);
72870@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
72871 oldfs = get_fs();
72872 set_fs(KERNEL_DS);
72873 ret = hrtimer_nanosleep(&tu,
72874- rmtp ? (struct timespec __user *)&rmt : NULL,
72875+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
72876 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
72877 set_fs(oldfs);
72878
72879@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
72880 mm_segment_t old_fs = get_fs();
72881
72882 set_fs(KERNEL_DS);
72883- ret = sys_sigpending((old_sigset_t __user *) &s);
72884+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
72885 set_fs(old_fs);
72886 if (ret == 0)
72887 ret = put_user(s, set);
72888@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
72889 mm_segment_t old_fs = get_fs();
72890
72891 set_fs(KERNEL_DS);
72892- ret = sys_old_getrlimit(resource, &r);
72893+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
72894 set_fs(old_fs);
72895
72896 if (!ret) {
72897@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
72898 mm_segment_t old_fs = get_fs();
72899
72900 set_fs(KERNEL_DS);
72901- ret = sys_getrusage(who, (struct rusage __user *) &r);
72902+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
72903 set_fs(old_fs);
72904
72905 if (ret)
72906@@ -552,8 +553,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
72907 set_fs (KERNEL_DS);
72908 ret = sys_wait4(pid,
72909 (stat_addr ?
72910- (unsigned int __user *) &status : NULL),
72911- options, (struct rusage __user *) &r);
72912+ (unsigned int __force_user *) &status : NULL),
72913+ options, (struct rusage __force_user *) &r);
72914 set_fs (old_fs);
72915
72916 if (ret > 0) {
72917@@ -579,8 +580,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
72918 memset(&info, 0, sizeof(info));
72919
72920 set_fs(KERNEL_DS);
72921- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
72922- uru ? (struct rusage __user *)&ru : NULL);
72923+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
72924+ uru ? (struct rusage __force_user *)&ru : NULL);
72925 set_fs(old_fs);
72926
72927 if ((ret < 0) || (info.si_signo == 0))
72928@@ -714,8 +715,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
72929 oldfs = get_fs();
72930 set_fs(KERNEL_DS);
72931 err = sys_timer_settime(timer_id, flags,
72932- (struct itimerspec __user *) &newts,
72933- (struct itimerspec __user *) &oldts);
72934+ (struct itimerspec __force_user *) &newts,
72935+ (struct itimerspec __force_user *) &oldts);
72936 set_fs(oldfs);
72937 if (!err && old && put_compat_itimerspec(old, &oldts))
72938 return -EFAULT;
72939@@ -732,7 +733,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
72940 oldfs = get_fs();
72941 set_fs(KERNEL_DS);
72942 err = sys_timer_gettime(timer_id,
72943- (struct itimerspec __user *) &ts);
72944+ (struct itimerspec __force_user *) &ts);
72945 set_fs(oldfs);
72946 if (!err && put_compat_itimerspec(setting, &ts))
72947 return -EFAULT;
72948@@ -751,7 +752,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
72949 oldfs = get_fs();
72950 set_fs(KERNEL_DS);
72951 err = sys_clock_settime(which_clock,
72952- (struct timespec __user *) &ts);
72953+ (struct timespec __force_user *) &ts);
72954 set_fs(oldfs);
72955 return err;
72956 }
72957@@ -766,7 +767,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
72958 oldfs = get_fs();
72959 set_fs(KERNEL_DS);
72960 err = sys_clock_gettime(which_clock,
72961- (struct timespec __user *) &ts);
72962+ (struct timespec __force_user *) &ts);
72963 set_fs(oldfs);
72964 if (!err && put_compat_timespec(&ts, tp))
72965 return -EFAULT;
72966@@ -786,7 +787,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
72967
72968 oldfs = get_fs();
72969 set_fs(KERNEL_DS);
72970- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
72971+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
72972 set_fs(oldfs);
72973
72974 err = compat_put_timex(utp, &txc);
72975@@ -806,7 +807,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
72976 oldfs = get_fs();
72977 set_fs(KERNEL_DS);
72978 err = sys_clock_getres(which_clock,
72979- (struct timespec __user *) &ts);
72980+ (struct timespec __force_user *) &ts);
72981 set_fs(oldfs);
72982 if (!err && tp && put_compat_timespec(&ts, tp))
72983 return -EFAULT;
72984@@ -818,9 +819,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
72985 long err;
72986 mm_segment_t oldfs;
72987 struct timespec tu;
72988- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
72989+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
72990
72991- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
72992+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
72993 oldfs = get_fs();
72994 set_fs(KERNEL_DS);
72995 err = clock_nanosleep_restart(restart);
72996@@ -852,8 +853,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
72997 oldfs = get_fs();
72998 set_fs(KERNEL_DS);
72999 err = sys_clock_nanosleep(which_clock, flags,
73000- (struct timespec __user *) &in,
73001- (struct timespec __user *) &out);
73002+ (struct timespec __force_user *) &in,
73003+ (struct timespec __force_user *) &out);
73004 set_fs(oldfs);
73005
73006 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
73007diff --git a/kernel/configs.c b/kernel/configs.c
73008index 42e8fa0..9e7406b 100644
73009--- a/kernel/configs.c
73010+++ b/kernel/configs.c
73011@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
73012 struct proc_dir_entry *entry;
73013
73014 /* create the current config file */
73015+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
73016+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
73017+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
73018+ &ikconfig_file_ops);
73019+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73020+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
73021+ &ikconfig_file_ops);
73022+#endif
73023+#else
73024 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
73025 &ikconfig_file_ops);
73026+#endif
73027+
73028 if (!entry)
73029 return -ENOMEM;
73030
73031diff --git a/kernel/cred.c b/kernel/cred.c
73032index e0573a4..3874e41 100644
73033--- a/kernel/cred.c
73034+++ b/kernel/cred.c
73035@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
73036 validate_creds(cred);
73037 alter_cred_subscribers(cred, -1);
73038 put_cred(cred);
73039+
73040+#ifdef CONFIG_GRKERNSEC_SETXID
73041+ cred = (struct cred *) tsk->delayed_cred;
73042+ if (cred != NULL) {
73043+ tsk->delayed_cred = NULL;
73044+ validate_creds(cred);
73045+ alter_cred_subscribers(cred, -1);
73046+ put_cred(cred);
73047+ }
73048+#endif
73049 }
73050
73051 /**
73052@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
73053 * Always returns 0 thus allowing this function to be tail-called at the end
73054 * of, say, sys_setgid().
73055 */
73056-int commit_creds(struct cred *new)
73057+static int __commit_creds(struct cred *new)
73058 {
73059 struct task_struct *task = current;
73060 const struct cred *old = task->real_cred;
73061@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
73062
73063 get_cred(new); /* we will require a ref for the subj creds too */
73064
73065+ gr_set_role_label(task, new->uid, new->gid);
73066+
73067 /* dumpability changes */
73068 if (!uid_eq(old->euid, new->euid) ||
73069 !gid_eq(old->egid, new->egid) ||
73070@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
73071 put_cred(old);
73072 return 0;
73073 }
73074+#ifdef CONFIG_GRKERNSEC_SETXID
73075+extern int set_user(struct cred *new);
73076+
73077+void gr_delayed_cred_worker(void)
73078+{
73079+ const struct cred *new = current->delayed_cred;
73080+ struct cred *ncred;
73081+
73082+ current->delayed_cred = NULL;
73083+
73084+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
73085+ // from doing get_cred on it when queueing this
73086+ put_cred(new);
73087+ return;
73088+ } else if (new == NULL)
73089+ return;
73090+
73091+ ncred = prepare_creds();
73092+ if (!ncred)
73093+ goto die;
73094+ // uids
73095+ ncred->uid = new->uid;
73096+ ncred->euid = new->euid;
73097+ ncred->suid = new->suid;
73098+ ncred->fsuid = new->fsuid;
73099+ // gids
73100+ ncred->gid = new->gid;
73101+ ncred->egid = new->egid;
73102+ ncred->sgid = new->sgid;
73103+ ncred->fsgid = new->fsgid;
73104+ // groups
73105+ if (set_groups(ncred, new->group_info) < 0) {
73106+ abort_creds(ncred);
73107+ goto die;
73108+ }
73109+ // caps
73110+ ncred->securebits = new->securebits;
73111+ ncred->cap_inheritable = new->cap_inheritable;
73112+ ncred->cap_permitted = new->cap_permitted;
73113+ ncred->cap_effective = new->cap_effective;
73114+ ncred->cap_bset = new->cap_bset;
73115+
73116+ if (set_user(ncred)) {
73117+ abort_creds(ncred);
73118+ goto die;
73119+ }
73120+
73121+ // from doing get_cred on it when queueing this
73122+ put_cred(new);
73123+
73124+ __commit_creds(ncred);
73125+ return;
73126+die:
73127+ // from doing get_cred on it when queueing this
73128+ put_cred(new);
73129+ do_group_exit(SIGKILL);
73130+}
73131+#endif
73132+
73133+int commit_creds(struct cred *new)
73134+{
73135+#ifdef CONFIG_GRKERNSEC_SETXID
73136+ int ret;
73137+ int schedule_it = 0;
73138+ struct task_struct *t;
73139+
73140+ /* we won't get called with tasklist_lock held for writing
73141+ and interrupts disabled as the cred struct in that case is
73142+ init_cred
73143+ */
73144+ if (grsec_enable_setxid && !current_is_single_threaded() &&
73145+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
73146+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
73147+ schedule_it = 1;
73148+ }
73149+ ret = __commit_creds(new);
73150+ if (schedule_it) {
73151+ rcu_read_lock();
73152+ read_lock(&tasklist_lock);
73153+ for (t = next_thread(current); t != current;
73154+ t = next_thread(t)) {
73155+ if (t->delayed_cred == NULL) {
73156+ t->delayed_cred = get_cred(new);
73157+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
73158+ set_tsk_need_resched(t);
73159+ }
73160+ }
73161+ read_unlock(&tasklist_lock);
73162+ rcu_read_unlock();
73163+ }
73164+ return ret;
73165+#else
73166+ return __commit_creds(new);
73167+#endif
73168+}
73169+
73170 EXPORT_SYMBOL(commit_creds);
73171
73172 /**
73173diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
73174index 9a61738..c5c8f3a 100644
73175--- a/kernel/debug/debug_core.c
73176+++ b/kernel/debug/debug_core.c
73177@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
73178 */
73179 static atomic_t masters_in_kgdb;
73180 static atomic_t slaves_in_kgdb;
73181-static atomic_t kgdb_break_tasklet_var;
73182+static atomic_unchecked_t kgdb_break_tasklet_var;
73183 atomic_t kgdb_setting_breakpoint;
73184
73185 struct task_struct *kgdb_usethread;
73186@@ -132,7 +132,7 @@ int kgdb_single_step;
73187 static pid_t kgdb_sstep_pid;
73188
73189 /* to keep track of the CPU which is doing the single stepping*/
73190-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
73191+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
73192
73193 /*
73194 * If you are debugging a problem where roundup (the collection of
73195@@ -540,7 +540,7 @@ return_normal:
73196 * kernel will only try for the value of sstep_tries before
73197 * giving up and continuing on.
73198 */
73199- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
73200+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
73201 (kgdb_info[cpu].task &&
73202 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
73203 atomic_set(&kgdb_active, -1);
73204@@ -634,8 +634,8 @@ cpu_master_loop:
73205 }
73206
73207 kgdb_restore:
73208- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
73209- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
73210+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
73211+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
73212 if (kgdb_info[sstep_cpu].task)
73213 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
73214 else
73215@@ -887,18 +887,18 @@ static void kgdb_unregister_callbacks(void)
73216 static void kgdb_tasklet_bpt(unsigned long ing)
73217 {
73218 kgdb_breakpoint();
73219- atomic_set(&kgdb_break_tasklet_var, 0);
73220+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
73221 }
73222
73223 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
73224
73225 void kgdb_schedule_breakpoint(void)
73226 {
73227- if (atomic_read(&kgdb_break_tasklet_var) ||
73228+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
73229 atomic_read(&kgdb_active) != -1 ||
73230 atomic_read(&kgdb_setting_breakpoint))
73231 return;
73232- atomic_inc(&kgdb_break_tasklet_var);
73233+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
73234 tasklet_schedule(&kgdb_tasklet_breakpoint);
73235 }
73236 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
73237diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
73238index 8875254..7cf4928 100644
73239--- a/kernel/debug/kdb/kdb_main.c
73240+++ b/kernel/debug/kdb/kdb_main.c
73241@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
73242 continue;
73243
73244 kdb_printf("%-20s%8u 0x%p ", mod->name,
73245- mod->core_size, (void *)mod);
73246+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
73247 #ifdef CONFIG_MODULE_UNLOAD
73248 kdb_printf("%4ld ", module_refcount(mod));
73249 #endif
73250@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
73251 kdb_printf(" (Loading)");
73252 else
73253 kdb_printf(" (Live)");
73254- kdb_printf(" 0x%p", mod->module_core);
73255+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
73256
73257 #ifdef CONFIG_MODULE_UNLOAD
73258 {
73259diff --git a/kernel/events/core.c b/kernel/events/core.c
73260index 7b6646a..3cb1135 100644
73261--- a/kernel/events/core.c
73262+++ b/kernel/events/core.c
73263@@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
73264 return 0;
73265 }
73266
73267-static atomic64_t perf_event_id;
73268+static atomic64_unchecked_t perf_event_id;
73269
73270 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
73271 enum event_type_t event_type);
73272@@ -2677,7 +2677,7 @@ static void __perf_event_read(void *info)
73273
73274 static inline u64 perf_event_count(struct perf_event *event)
73275 {
73276- return local64_read(&event->count) + atomic64_read(&event->child_count);
73277+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
73278 }
73279
73280 static u64 perf_event_read(struct perf_event *event)
73281@@ -3007,9 +3007,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
73282 mutex_lock(&event->child_mutex);
73283 total += perf_event_read(event);
73284 *enabled += event->total_time_enabled +
73285- atomic64_read(&event->child_total_time_enabled);
73286+ atomic64_read_unchecked(&event->child_total_time_enabled);
73287 *running += event->total_time_running +
73288- atomic64_read(&event->child_total_time_running);
73289+ atomic64_read_unchecked(&event->child_total_time_running);
73290
73291 list_for_each_entry(child, &event->child_list, child_list) {
73292 total += perf_event_read(child);
73293@@ -3412,10 +3412,10 @@ void perf_event_update_userpage(struct perf_event *event)
73294 userpg->offset -= local64_read(&event->hw.prev_count);
73295
73296 userpg->time_enabled = enabled +
73297- atomic64_read(&event->child_total_time_enabled);
73298+ atomic64_read_unchecked(&event->child_total_time_enabled);
73299
73300 userpg->time_running = running +
73301- atomic64_read(&event->child_total_time_running);
73302+ atomic64_read_unchecked(&event->child_total_time_running);
73303
73304 arch_perf_update_userpage(userpg, now);
73305
73306@@ -3974,11 +3974,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
73307 values[n++] = perf_event_count(event);
73308 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73309 values[n++] = enabled +
73310- atomic64_read(&event->child_total_time_enabled);
73311+ atomic64_read_unchecked(&event->child_total_time_enabled);
73312 }
73313 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73314 values[n++] = running +
73315- atomic64_read(&event->child_total_time_running);
73316+ atomic64_read_unchecked(&event->child_total_time_running);
73317 }
73318 if (read_format & PERF_FORMAT_ID)
73319 values[n++] = primary_event_id(event);
73320@@ -4721,12 +4721,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
73321 * need to add enough zero bytes after the string to handle
73322 * the 64bit alignment we do later.
73323 */
73324- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
73325+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
73326 if (!buf) {
73327 name = strncpy(tmp, "//enomem", sizeof(tmp));
73328 goto got_name;
73329 }
73330- name = d_path(&file->f_path, buf, PATH_MAX);
73331+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
73332 if (IS_ERR(name)) {
73333 name = strncpy(tmp, "//toolong", sizeof(tmp));
73334 goto got_name;
73335@@ -6165,7 +6165,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
73336 event->parent = parent_event;
73337
73338 event->ns = get_pid_ns(task_active_pid_ns(current));
73339- event->id = atomic64_inc_return(&perf_event_id);
73340+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
73341
73342 event->state = PERF_EVENT_STATE_INACTIVE;
73343
73344@@ -6790,10 +6790,10 @@ static void sync_child_event(struct perf_event *child_event,
73345 /*
73346 * Add back the child's count to the parent's count:
73347 */
73348- atomic64_add(child_val, &parent_event->child_count);
73349- atomic64_add(child_event->total_time_enabled,
73350+ atomic64_add_unchecked(child_val, &parent_event->child_count);
73351+ atomic64_add_unchecked(child_event->total_time_enabled,
73352 &parent_event->child_total_time_enabled);
73353- atomic64_add(child_event->total_time_running,
73354+ atomic64_add_unchecked(child_event->total_time_running,
73355 &parent_event->child_total_time_running);
73356
73357 /*
73358diff --git a/kernel/exit.c b/kernel/exit.c
73359index b4df219..f13c02d 100644
73360--- a/kernel/exit.c
73361+++ b/kernel/exit.c
73362@@ -170,6 +170,10 @@ void release_task(struct task_struct * p)
73363 struct task_struct *leader;
73364 int zap_leader;
73365 repeat:
73366+#ifdef CONFIG_NET
73367+ gr_del_task_from_ip_table(p);
73368+#endif
73369+
73370 /* don't need to get the RCU readlock here - the process is dead and
73371 * can't be modifying its own credentials. But shut RCU-lockdep up */
73372 rcu_read_lock();
73373@@ -338,7 +342,7 @@ int allow_signal(int sig)
73374 * know it'll be handled, so that they don't get converted to
73375 * SIGKILL or just silently dropped.
73376 */
73377- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
73378+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
73379 recalc_sigpending();
73380 spin_unlock_irq(&current->sighand->siglock);
73381 return 0;
73382@@ -708,6 +712,8 @@ void do_exit(long code)
73383 struct task_struct *tsk = current;
73384 int group_dead;
73385
73386+ set_fs(USER_DS);
73387+
73388 profile_task_exit(tsk);
73389
73390 WARN_ON(blk_needs_flush_plug(tsk));
73391@@ -724,7 +730,6 @@ void do_exit(long code)
73392 * mm_release()->clear_child_tid() from writing to a user-controlled
73393 * kernel address.
73394 */
73395- set_fs(USER_DS);
73396
73397 ptrace_event(PTRACE_EVENT_EXIT, code);
73398
73399@@ -783,6 +788,9 @@ void do_exit(long code)
73400 tsk->exit_code = code;
73401 taskstats_exit(tsk, group_dead);
73402
73403+ gr_acl_handle_psacct(tsk, code);
73404+ gr_acl_handle_exit();
73405+
73406 exit_mm(tsk);
73407
73408 if (group_dead)
73409@@ -903,7 +911,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
73410 * Take down every thread in the group. This is called by fatal signals
73411 * as well as by sys_exit_group (below).
73412 */
73413-void
73414+__noreturn void
73415 do_group_exit(int exit_code)
73416 {
73417 struct signal_struct *sig = current->signal;
73418diff --git a/kernel/fork.c b/kernel/fork.c
73419index c535f33..1d768f9 100644
73420--- a/kernel/fork.c
73421+++ b/kernel/fork.c
73422@@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
73423 *stackend = STACK_END_MAGIC; /* for overflow detection */
73424
73425 #ifdef CONFIG_CC_STACKPROTECTOR
73426- tsk->stack_canary = get_random_int();
73427+ tsk->stack_canary = pax_get_random_long();
73428 #endif
73429
73430 /*
73431@@ -344,13 +344,81 @@ free_tsk:
73432 }
73433
73434 #ifdef CONFIG_MMU
73435+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
73436+{
73437+ struct vm_area_struct *tmp;
73438+ unsigned long charge;
73439+ struct mempolicy *pol;
73440+ struct file *file;
73441+
73442+ charge = 0;
73443+ if (mpnt->vm_flags & VM_ACCOUNT) {
73444+ unsigned long len = vma_pages(mpnt);
73445+
73446+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
73447+ goto fail_nomem;
73448+ charge = len;
73449+ }
73450+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73451+ if (!tmp)
73452+ goto fail_nomem;
73453+ *tmp = *mpnt;
73454+ tmp->vm_mm = mm;
73455+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
73456+ pol = mpol_dup(vma_policy(mpnt));
73457+ if (IS_ERR(pol))
73458+ goto fail_nomem_policy;
73459+ vma_set_policy(tmp, pol);
73460+ if (anon_vma_fork(tmp, mpnt))
73461+ goto fail_nomem_anon_vma_fork;
73462+ tmp->vm_flags &= ~VM_LOCKED;
73463+ tmp->vm_next = tmp->vm_prev = NULL;
73464+ tmp->vm_mirror = NULL;
73465+ file = tmp->vm_file;
73466+ if (file) {
73467+ struct inode *inode = file->f_path.dentry->d_inode;
73468+ struct address_space *mapping = file->f_mapping;
73469+
73470+ get_file(file);
73471+ if (tmp->vm_flags & VM_DENYWRITE)
73472+ atomic_dec(&inode->i_writecount);
73473+ mutex_lock(&mapping->i_mmap_mutex);
73474+ if (tmp->vm_flags & VM_SHARED)
73475+ mapping->i_mmap_writable++;
73476+ flush_dcache_mmap_lock(mapping);
73477+ /* insert tmp into the share list, just after mpnt */
73478+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
73479+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
73480+ else
73481+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
73482+ flush_dcache_mmap_unlock(mapping);
73483+ mutex_unlock(&mapping->i_mmap_mutex);
73484+ }
73485+
73486+ /*
73487+ * Clear hugetlb-related page reserves for children. This only
73488+ * affects MAP_PRIVATE mappings. Faults generated by the child
73489+ * are not guaranteed to succeed, even if read-only
73490+ */
73491+ if (is_vm_hugetlb_page(tmp))
73492+ reset_vma_resv_huge_pages(tmp);
73493+
73494+ return tmp;
73495+
73496+fail_nomem_anon_vma_fork:
73497+ mpol_put(pol);
73498+fail_nomem_policy:
73499+ kmem_cache_free(vm_area_cachep, tmp);
73500+fail_nomem:
73501+ vm_unacct_memory(charge);
73502+ return NULL;
73503+}
73504+
73505 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
73506 {
73507 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
73508 struct rb_node **rb_link, *rb_parent;
73509 int retval;
73510- unsigned long charge;
73511- struct mempolicy *pol;
73512
73513 uprobe_start_dup_mmap();
73514 down_write(&oldmm->mmap_sem);
73515@@ -364,8 +432,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
73516 mm->locked_vm = 0;
73517 mm->mmap = NULL;
73518 mm->mmap_cache = NULL;
73519- mm->free_area_cache = oldmm->mmap_base;
73520- mm->cached_hole_size = ~0UL;
73521+ mm->free_area_cache = oldmm->free_area_cache;
73522+ mm->cached_hole_size = oldmm->cached_hole_size;
73523 mm->map_count = 0;
73524 cpumask_clear(mm_cpumask(mm));
73525 mm->mm_rb = RB_ROOT;
73526@@ -381,57 +449,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
73527
73528 prev = NULL;
73529 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
73530- struct file *file;
73531-
73532 if (mpnt->vm_flags & VM_DONTCOPY) {
73533 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
73534 -vma_pages(mpnt));
73535 continue;
73536 }
73537- charge = 0;
73538- if (mpnt->vm_flags & VM_ACCOUNT) {
73539- unsigned long len = vma_pages(mpnt);
73540-
73541- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
73542- goto fail_nomem;
73543- charge = len;
73544- }
73545- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73546- if (!tmp)
73547- goto fail_nomem;
73548- *tmp = *mpnt;
73549- INIT_LIST_HEAD(&tmp->anon_vma_chain);
73550- pol = mpol_dup(vma_policy(mpnt));
73551- retval = PTR_ERR(pol);
73552- if (IS_ERR(pol))
73553- goto fail_nomem_policy;
73554- vma_set_policy(tmp, pol);
73555- tmp->vm_mm = mm;
73556- if (anon_vma_fork(tmp, mpnt))
73557- goto fail_nomem_anon_vma_fork;
73558- tmp->vm_flags &= ~VM_LOCKED;
73559- tmp->vm_next = tmp->vm_prev = NULL;
73560- file = tmp->vm_file;
73561- if (file) {
73562- struct inode *inode = file->f_path.dentry->d_inode;
73563- struct address_space *mapping = file->f_mapping;
73564-
73565- get_file(file);
73566- if (tmp->vm_flags & VM_DENYWRITE)
73567- atomic_dec(&inode->i_writecount);
73568- mutex_lock(&mapping->i_mmap_mutex);
73569- if (tmp->vm_flags & VM_SHARED)
73570- mapping->i_mmap_writable++;
73571- flush_dcache_mmap_lock(mapping);
73572- /* insert tmp into the share list, just after mpnt */
73573- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
73574- vma_nonlinear_insert(tmp,
73575- &mapping->i_mmap_nonlinear);
73576- else
73577- vma_interval_tree_insert_after(tmp, mpnt,
73578- &mapping->i_mmap);
73579- flush_dcache_mmap_unlock(mapping);
73580- mutex_unlock(&mapping->i_mmap_mutex);
73581+ tmp = dup_vma(mm, oldmm, mpnt);
73582+ if (!tmp) {
73583+ retval = -ENOMEM;
73584+ goto out;
73585 }
73586
73587 /*
73588@@ -463,6 +489,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
73589 if (retval)
73590 goto out;
73591 }
73592+
73593+#ifdef CONFIG_PAX_SEGMEXEC
73594+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
73595+ struct vm_area_struct *mpnt_m;
73596+
73597+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
73598+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
73599+
73600+ if (!mpnt->vm_mirror)
73601+ continue;
73602+
73603+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
73604+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
73605+ mpnt->vm_mirror = mpnt_m;
73606+ } else {
73607+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
73608+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
73609+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
73610+ mpnt->vm_mirror->vm_mirror = mpnt;
73611+ }
73612+ }
73613+ BUG_ON(mpnt_m);
73614+ }
73615+#endif
73616+
73617 /* a new mm has just been created */
73618 arch_dup_mmap(oldmm, mm);
73619 retval = 0;
73620@@ -472,14 +523,6 @@ out:
73621 up_write(&oldmm->mmap_sem);
73622 uprobe_end_dup_mmap();
73623 return retval;
73624-fail_nomem_anon_vma_fork:
73625- mpol_put(pol);
73626-fail_nomem_policy:
73627- kmem_cache_free(vm_area_cachep, tmp);
73628-fail_nomem:
73629- retval = -ENOMEM;
73630- vm_unacct_memory(charge);
73631- goto out;
73632 }
73633
73634 static inline int mm_alloc_pgd(struct mm_struct *mm)
73635@@ -694,8 +737,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
73636 return ERR_PTR(err);
73637
73638 mm = get_task_mm(task);
73639- if (mm && mm != current->mm &&
73640- !ptrace_may_access(task, mode)) {
73641+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
73642+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
73643 mmput(mm);
73644 mm = ERR_PTR(-EACCES);
73645 }
73646@@ -917,13 +960,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
73647 spin_unlock(&fs->lock);
73648 return -EAGAIN;
73649 }
73650- fs->users++;
73651+ atomic_inc(&fs->users);
73652 spin_unlock(&fs->lock);
73653 return 0;
73654 }
73655 tsk->fs = copy_fs_struct(fs);
73656 if (!tsk->fs)
73657 return -ENOMEM;
73658+ /* Carry through gr_chroot_dentry and is_chrooted instead
73659+ of recomputing it here. Already copied when the task struct
73660+ is duplicated. This allows pivot_root to not be treated as
73661+ a chroot
73662+ */
73663+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
73664+
73665 return 0;
73666 }
73667
73668@@ -1193,6 +1243,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
73669 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
73670 #endif
73671 retval = -EAGAIN;
73672+
73673+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
73674+
73675 if (atomic_read(&p->real_cred->user->processes) >=
73676 task_rlimit(p, RLIMIT_NPROC)) {
73677 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
73678@@ -1432,6 +1485,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
73679 goto bad_fork_free_pid;
73680 }
73681
73682+ /* synchronizes with gr_set_acls()
73683+ we need to call this past the point of no return for fork()
73684+ */
73685+ gr_copy_label(p);
73686+
73687 if (clone_flags & CLONE_THREAD) {
73688 current->signal->nr_threads++;
73689 atomic_inc(&current->signal->live);
73690@@ -1515,6 +1573,8 @@ bad_fork_cleanup_count:
73691 bad_fork_free:
73692 free_task(p);
73693 fork_out:
73694+ gr_log_forkfail(retval);
73695+
73696 return ERR_PTR(retval);
73697 }
73698
73699@@ -1565,6 +1625,23 @@ long do_fork(unsigned long clone_flags,
73700 return -EINVAL;
73701 }
73702
73703+#ifdef CONFIG_GRKERNSEC
73704+ if (clone_flags & CLONE_NEWUSER) {
73705+ /*
73706+ * This doesn't really inspire confidence:
73707+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
73708+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
73709+ * Increases kernel attack surface in areas developers
73710+ * previously cared little about ("low importance due
73711+ * to requiring "root" capability")
73712+ * To be removed when this code receives *proper* review
73713+ */
73714+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
73715+ !capable(CAP_SETGID))
73716+ return -EPERM;
73717+ }
73718+#endif
73719+
73720 /*
73721 * Determine whether and which event to report to ptracer. When
73722 * called from kernel_thread or CLONE_UNTRACED is explicitly
73723@@ -1599,6 +1676,8 @@ long do_fork(unsigned long clone_flags,
73724 if (clone_flags & CLONE_PARENT_SETTID)
73725 put_user(nr, parent_tidptr);
73726
73727+ gr_handle_brute_check();
73728+
73729 if (clone_flags & CLONE_VFORK) {
73730 p->vfork_done = &vfork;
73731 init_completion(&vfork);
73732@@ -1752,7 +1831,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
73733 return 0;
73734
73735 /* don't need lock here; in the worst case we'll do useless copy */
73736- if (fs->users == 1)
73737+ if (atomic_read(&fs->users) == 1)
73738 return 0;
73739
73740 *new_fsp = copy_fs_struct(fs);
73741@@ -1866,7 +1945,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
73742 fs = current->fs;
73743 spin_lock(&fs->lock);
73744 current->fs = new_fs;
73745- if (--fs->users)
73746+ gr_set_chroot_entries(current, &current->fs->root);
73747+ if (atomic_dec_return(&fs->users))
73748 new_fs = NULL;
73749 else
73750 new_fs = fs;
73751diff --git a/kernel/futex.c b/kernel/futex.c
73752index 8879430..31696f1 100644
73753--- a/kernel/futex.c
73754+++ b/kernel/futex.c
73755@@ -54,6 +54,7 @@
73756 #include <linux/mount.h>
73757 #include <linux/pagemap.h>
73758 #include <linux/syscalls.h>
73759+#include <linux/ptrace.h>
73760 #include <linux/signal.h>
73761 #include <linux/export.h>
73762 #include <linux/magic.h>
73763@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
73764 struct page *page, *page_head;
73765 int err, ro = 0;
73766
73767+#ifdef CONFIG_PAX_SEGMEXEC
73768+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
73769+ return -EFAULT;
73770+#endif
73771+
73772 /*
73773 * The futex address must be "naturally" aligned.
73774 */
73775@@ -2731,6 +2737,7 @@ static int __init futex_init(void)
73776 {
73777 u32 curval;
73778 int i;
73779+ mm_segment_t oldfs;
73780
73781 /*
73782 * This will fail and we want it. Some arch implementations do
73783@@ -2742,8 +2749,11 @@ static int __init futex_init(void)
73784 * implementation, the non-functional ones will return
73785 * -ENOSYS.
73786 */
73787+ oldfs = get_fs();
73788+ set_fs(USER_DS);
73789 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
73790 futex_cmpxchg_enabled = 1;
73791+ set_fs(oldfs);
73792
73793 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
73794 plist_head_init(&futex_queues[i].chain);
73795diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
73796index 9b22d03..6295b62 100644
73797--- a/kernel/gcov/base.c
73798+++ b/kernel/gcov/base.c
73799@@ -102,11 +102,6 @@ void gcov_enable_events(void)
73800 }
73801
73802 #ifdef CONFIG_MODULES
73803-static inline int within(void *addr, void *start, unsigned long size)
73804-{
73805- return ((addr >= start) && (addr < start + size));
73806-}
73807-
73808 /* Update list and generate events when modules are unloaded. */
73809 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
73810 void *data)
73811@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
73812 prev = NULL;
73813 /* Remove entries located in module from linked list. */
73814 for (info = gcov_info_head; info; info = info->next) {
73815- if (within(info, mod->module_core, mod->core_size)) {
73816+ if (within_module_core_rw((unsigned long)info, mod)) {
73817 if (prev)
73818 prev->next = info->next;
73819 else
73820diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
73821index cdd5607..c3fc919 100644
73822--- a/kernel/hrtimer.c
73823+++ b/kernel/hrtimer.c
73824@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
73825 local_irq_restore(flags);
73826 }
73827
73828-static void run_hrtimer_softirq(struct softirq_action *h)
73829+static void run_hrtimer_softirq(void)
73830 {
73831 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
73832
73833@@ -1751,7 +1751,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
73834 return NOTIFY_OK;
73835 }
73836
73837-static struct notifier_block __cpuinitdata hrtimers_nb = {
73838+static struct notifier_block hrtimers_nb = {
73839 .notifier_call = hrtimer_cpu_notify,
73840 };
73841
73842diff --git a/kernel/jump_label.c b/kernel/jump_label.c
73843index 60f48fa..7f3a770 100644
73844--- a/kernel/jump_label.c
73845+++ b/kernel/jump_label.c
73846@@ -13,6 +13,7 @@
73847 #include <linux/sort.h>
73848 #include <linux/err.h>
73849 #include <linux/static_key.h>
73850+#include <linux/mm.h>
73851
73852 #ifdef HAVE_JUMP_LABEL
73853
73854@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
73855
73856 size = (((unsigned long)stop - (unsigned long)start)
73857 / sizeof(struct jump_entry));
73858+ pax_open_kernel();
73859 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
73860+ pax_close_kernel();
73861 }
73862
73863 static void jump_label_update(struct static_key *key, int enable);
73864@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
73865 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
73866 struct jump_entry *iter;
73867
73868+ pax_open_kernel();
73869 for (iter = iter_start; iter < iter_stop; iter++) {
73870 if (within_module_init(iter->code, mod))
73871 iter->code = 0;
73872 }
73873+ pax_close_kernel();
73874 }
73875
73876 static int
73877diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
73878index 2169fee..706ccca 100644
73879--- a/kernel/kallsyms.c
73880+++ b/kernel/kallsyms.c
73881@@ -11,6 +11,9 @@
73882 * Changed the compression method from stem compression to "table lookup"
73883 * compression (see scripts/kallsyms.c for a more complete description)
73884 */
73885+#ifdef CONFIG_GRKERNSEC_HIDESYM
73886+#define __INCLUDED_BY_HIDESYM 1
73887+#endif
73888 #include <linux/kallsyms.h>
73889 #include <linux/module.h>
73890 #include <linux/init.h>
73891@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
73892
73893 static inline int is_kernel_inittext(unsigned long addr)
73894 {
73895+ if (system_state != SYSTEM_BOOTING)
73896+ return 0;
73897+
73898 if (addr >= (unsigned long)_sinittext
73899 && addr <= (unsigned long)_einittext)
73900 return 1;
73901 return 0;
73902 }
73903
73904+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73905+#ifdef CONFIG_MODULES
73906+static inline int is_module_text(unsigned long addr)
73907+{
73908+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
73909+ return 1;
73910+
73911+ addr = ktla_ktva(addr);
73912+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
73913+}
73914+#else
73915+static inline int is_module_text(unsigned long addr)
73916+{
73917+ return 0;
73918+}
73919+#endif
73920+#endif
73921+
73922 static inline int is_kernel_text(unsigned long addr)
73923 {
73924 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
73925@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
73926
73927 static inline int is_kernel(unsigned long addr)
73928 {
73929+
73930+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73931+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
73932+ return 1;
73933+
73934+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
73935+#else
73936 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
73937+#endif
73938+
73939 return 1;
73940 return in_gate_area_no_mm(addr);
73941 }
73942
73943 static int is_ksym_addr(unsigned long addr)
73944 {
73945+
73946+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73947+ if (is_module_text(addr))
73948+ return 0;
73949+#endif
73950+
73951 if (all_var)
73952 return is_kernel(addr);
73953
73954@@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
73955
73956 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
73957 {
73958- iter->name[0] = '\0';
73959 iter->nameoff = get_symbol_offset(new_pos);
73960 iter->pos = new_pos;
73961 }
73962@@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
73963 {
73964 struct kallsym_iter *iter = m->private;
73965
73966+#ifdef CONFIG_GRKERNSEC_HIDESYM
73967+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
73968+ return 0;
73969+#endif
73970+
73971 /* Some debugging symbols have no name. Ignore them. */
73972 if (!iter->name[0])
73973 return 0;
73974@@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
73975 */
73976 type = iter->exported ? toupper(iter->type) :
73977 tolower(iter->type);
73978+
73979 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
73980 type, iter->name, iter->module_name);
73981 } else
73982@@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
73983 struct kallsym_iter *iter;
73984 int ret;
73985
73986- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
73987+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
73988 if (!iter)
73989 return -ENOMEM;
73990 reset_iter(iter, 0);
73991diff --git a/kernel/kcmp.c b/kernel/kcmp.c
73992index e30ac0f..3528cac 100644
73993--- a/kernel/kcmp.c
73994+++ b/kernel/kcmp.c
73995@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
73996 struct task_struct *task1, *task2;
73997 int ret;
73998
73999+#ifdef CONFIG_GRKERNSEC
74000+ return -ENOSYS;
74001+#endif
74002+
74003 rcu_read_lock();
74004
74005 /*
74006diff --git a/kernel/kexec.c b/kernel/kexec.c
74007index 5e4bd78..00c5b91 100644
74008--- a/kernel/kexec.c
74009+++ b/kernel/kexec.c
74010@@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
74011 unsigned long flags)
74012 {
74013 struct compat_kexec_segment in;
74014- struct kexec_segment out, __user *ksegments;
74015+ struct kexec_segment out;
74016+ struct kexec_segment __user *ksegments;
74017 unsigned long i, result;
74018
74019 /* Don't allow clients that don't understand the native
74020diff --git a/kernel/kmod.c b/kernel/kmod.c
74021index 0023a87..9c0c068 100644
74022--- a/kernel/kmod.c
74023+++ b/kernel/kmod.c
74024@@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
74025 kfree(info->argv);
74026 }
74027
74028-static int call_modprobe(char *module_name, int wait)
74029+static int call_modprobe(char *module_name, char *module_param, int wait)
74030 {
74031 static char *envp[] = {
74032 "HOME=/",
74033@@ -83,7 +83,7 @@ static int call_modprobe(char *module_name, int wait)
74034 NULL
74035 };
74036
74037- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
74038+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
74039 if (!argv)
74040 goto out;
74041
74042@@ -95,7 +95,8 @@ static int call_modprobe(char *module_name, int wait)
74043 argv[1] = "-q";
74044 argv[2] = "--";
74045 argv[3] = module_name; /* check free_modprobe_argv() */
74046- argv[4] = NULL;
74047+ argv[4] = module_param;
74048+ argv[5] = NULL;
74049
74050 return call_usermodehelper_fns(modprobe_path, argv, envp,
74051 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
74052@@ -120,9 +121,8 @@ out:
74053 * If module auto-loading support is disabled then this function
74054 * becomes a no-operation.
74055 */
74056-int __request_module(bool wait, const char *fmt, ...)
74057+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
74058 {
74059- va_list args;
74060 char module_name[MODULE_NAME_LEN];
74061 unsigned int max_modprobes;
74062 int ret;
74063@@ -130,9 +130,7 @@ int __request_module(bool wait, const char *fmt, ...)
74064 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
74065 static int kmod_loop_msg;
74066
74067- va_start(args, fmt);
74068- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
74069- va_end(args);
74070+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
74071 if (ret >= MODULE_NAME_LEN)
74072 return -ENAMETOOLONG;
74073
74074@@ -140,6 +138,20 @@ int __request_module(bool wait, const char *fmt, ...)
74075 if (ret)
74076 return ret;
74077
74078+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74079+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
74080+ /* hack to workaround consolekit/udisks stupidity */
74081+ read_lock(&tasklist_lock);
74082+ if (!strcmp(current->comm, "mount") &&
74083+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
74084+ read_unlock(&tasklist_lock);
74085+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
74086+ return -EPERM;
74087+ }
74088+ read_unlock(&tasklist_lock);
74089+ }
74090+#endif
74091+
74092 /* If modprobe needs a service that is in a module, we get a recursive
74093 * loop. Limit the number of running kmod threads to max_threads/2 or
74094 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
74095@@ -168,11 +180,52 @@ int __request_module(bool wait, const char *fmt, ...)
74096
74097 trace_module_request(module_name, wait, _RET_IP_);
74098
74099- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
74100+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
74101
74102 atomic_dec(&kmod_concurrent);
74103 return ret;
74104 }
74105+
74106+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
74107+{
74108+ va_list args;
74109+ int ret;
74110+
74111+ va_start(args, fmt);
74112+ ret = ____request_module(wait, module_param, fmt, args);
74113+ va_end(args);
74114+
74115+ return ret;
74116+}
74117+
74118+int __request_module(bool wait, const char *fmt, ...)
74119+{
74120+ va_list args;
74121+ int ret;
74122+
74123+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74124+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
74125+ char module_param[MODULE_NAME_LEN];
74126+
74127+ memset(module_param, 0, sizeof(module_param));
74128+
74129+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
74130+
74131+ va_start(args, fmt);
74132+ ret = ____request_module(wait, module_param, fmt, args);
74133+ va_end(args);
74134+
74135+ return ret;
74136+ }
74137+#endif
74138+
74139+ va_start(args, fmt);
74140+ ret = ____request_module(wait, NULL, fmt, args);
74141+ va_end(args);
74142+
74143+ return ret;
74144+}
74145+
74146 EXPORT_SYMBOL(__request_module);
74147 #endif /* CONFIG_MODULES */
74148
74149@@ -283,7 +336,7 @@ static int wait_for_helper(void *data)
74150 *
74151 * Thus the __user pointer cast is valid here.
74152 */
74153- sys_wait4(pid, (int __user *)&ret, 0, NULL);
74154+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
74155
74156 /*
74157 * If ret is 0, either ____call_usermodehelper failed and the
74158@@ -635,7 +688,7 @@ EXPORT_SYMBOL(call_usermodehelper_fns);
74159 static int proc_cap_handler(struct ctl_table *table, int write,
74160 void __user *buffer, size_t *lenp, loff_t *ppos)
74161 {
74162- struct ctl_table t;
74163+ ctl_table_no_const t;
74164 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
74165 kernel_cap_t new_cap;
74166 int err, i;
74167diff --git a/kernel/kprobes.c b/kernel/kprobes.c
74168index 098f396..fe85ff1 100644
74169--- a/kernel/kprobes.c
74170+++ b/kernel/kprobes.c
74171@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
74172 * kernel image and loaded module images reside. This is required
74173 * so x86_64 can correctly handle the %rip-relative fixups.
74174 */
74175- kip->insns = module_alloc(PAGE_SIZE);
74176+ kip->insns = module_alloc_exec(PAGE_SIZE);
74177 if (!kip->insns) {
74178 kfree(kip);
74179 return NULL;
74180@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
74181 */
74182 if (!list_is_singular(&kip->list)) {
74183 list_del(&kip->list);
74184- module_free(NULL, kip->insns);
74185+ module_free_exec(NULL, kip->insns);
74186 kfree(kip);
74187 }
74188 return 1;
74189@@ -2063,7 +2063,7 @@ static int __init init_kprobes(void)
74190 {
74191 int i, err = 0;
74192 unsigned long offset = 0, size = 0;
74193- char *modname, namebuf[128];
74194+ char *modname, namebuf[KSYM_NAME_LEN];
74195 const char *symbol_name;
74196 void *addr;
74197 struct kprobe_blackpoint *kb;
74198@@ -2148,11 +2148,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
74199 kprobe_type = "k";
74200
74201 if (sym)
74202- seq_printf(pi, "%p %s %s+0x%x %s ",
74203+ seq_printf(pi, "%pK %s %s+0x%x %s ",
74204 p->addr, kprobe_type, sym, offset,
74205 (modname ? modname : " "));
74206 else
74207- seq_printf(pi, "%p %s %p ",
74208+ seq_printf(pi, "%pK %s %pK ",
74209 p->addr, kprobe_type, p->addr);
74210
74211 if (!pp)
74212@@ -2190,7 +2190,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
74213 const char *sym = NULL;
74214 unsigned int i = *(loff_t *) v;
74215 unsigned long offset = 0;
74216- char *modname, namebuf[128];
74217+ char *modname, namebuf[KSYM_NAME_LEN];
74218
74219 head = &kprobe_table[i];
74220 preempt_disable();
74221diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
74222index 6ada93c..dce7d5d 100644
74223--- a/kernel/ksysfs.c
74224+++ b/kernel/ksysfs.c
74225@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
74226 {
74227 if (count+1 > UEVENT_HELPER_PATH_LEN)
74228 return -ENOENT;
74229+ if (!capable(CAP_SYS_ADMIN))
74230+ return -EPERM;
74231 memcpy(uevent_helper, buf, count);
74232 uevent_helper[count] = '\0';
74233 if (count && uevent_helper[count-1] == '\n')
74234@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
74235 return count;
74236 }
74237
74238-static struct bin_attribute notes_attr = {
74239+static bin_attribute_no_const notes_attr __read_only = {
74240 .attr = {
74241 .name = "notes",
74242 .mode = S_IRUGO,
74243diff --git a/kernel/lockdep.c b/kernel/lockdep.c
74244index 7981e5b..7f2105c 100644
74245--- a/kernel/lockdep.c
74246+++ b/kernel/lockdep.c
74247@@ -590,6 +590,10 @@ static int static_obj(void *obj)
74248 end = (unsigned long) &_end,
74249 addr = (unsigned long) obj;
74250
74251+#ifdef CONFIG_PAX_KERNEXEC
74252+ start = ktla_ktva(start);
74253+#endif
74254+
74255 /*
74256 * static variable?
74257 */
74258@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
74259 if (!static_obj(lock->key)) {
74260 debug_locks_off();
74261 printk("INFO: trying to register non-static key.\n");
74262+ printk("lock:%pS key:%pS.\n", lock, lock->key);
74263 printk("the code is fine but needs lockdep annotation.\n");
74264 printk("turning off the locking correctness validator.\n");
74265 dump_stack();
74266@@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
74267 if (!class)
74268 return 0;
74269 }
74270- atomic_inc((atomic_t *)&class->ops);
74271+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
74272 if (very_verbose(class)) {
74273 printk("\nacquire class [%p] %s", class->key, class->name);
74274 if (class->name_version > 1)
74275diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
74276index b2c71c5..7b88d63 100644
74277--- a/kernel/lockdep_proc.c
74278+++ b/kernel/lockdep_proc.c
74279@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
74280 return 0;
74281 }
74282
74283- seq_printf(m, "%p", class->key);
74284+ seq_printf(m, "%pK", class->key);
74285 #ifdef CONFIG_DEBUG_LOCKDEP
74286 seq_printf(m, " OPS:%8ld", class->ops);
74287 #endif
74288@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
74289
74290 list_for_each_entry(entry, &class->locks_after, entry) {
74291 if (entry->distance == 1) {
74292- seq_printf(m, " -> [%p] ", entry->class->key);
74293+ seq_printf(m, " -> [%pK] ", entry->class->key);
74294 print_name(m, entry->class);
74295 seq_puts(m, "\n");
74296 }
74297@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
74298 if (!class->key)
74299 continue;
74300
74301- seq_printf(m, "[%p] ", class->key);
74302+ seq_printf(m, "[%pK] ", class->key);
74303 print_name(m, class);
74304 seq_puts(m, "\n");
74305 }
74306@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
74307 if (!i)
74308 seq_line(m, '-', 40-namelen, namelen);
74309
74310- snprintf(ip, sizeof(ip), "[<%p>]",
74311+ snprintf(ip, sizeof(ip), "[<%pK>]",
74312 (void *)class->contention_point[i]);
74313 seq_printf(m, "%40s %14lu %29s %pS\n",
74314 name, stats->contention_point[i],
74315@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
74316 if (!i)
74317 seq_line(m, '-', 40-namelen, namelen);
74318
74319- snprintf(ip, sizeof(ip), "[<%p>]",
74320+ snprintf(ip, sizeof(ip), "[<%pK>]",
74321 (void *)class->contending_point[i]);
74322 seq_printf(m, "%40s %14lu %29s %pS\n",
74323 name, stats->contending_point[i],
74324diff --git a/kernel/module.c b/kernel/module.c
74325index eab0827..f488603 100644
74326--- a/kernel/module.c
74327+++ b/kernel/module.c
74328@@ -61,6 +61,7 @@
74329 #include <linux/pfn.h>
74330 #include <linux/bsearch.h>
74331 #include <linux/fips.h>
74332+#include <linux/grsecurity.h>
74333 #include <uapi/linux/module.h>
74334 #include "module-internal.h"
74335
74336@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
74337
74338 /* Bounds of module allocation, for speeding __module_address.
74339 * Protected by module_mutex. */
74340-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
74341+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
74342+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
74343
74344 int register_module_notifier(struct notifier_block * nb)
74345 {
74346@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
74347 return true;
74348
74349 list_for_each_entry_rcu(mod, &modules, list) {
74350- struct symsearch arr[] = {
74351+ struct symsearch modarr[] = {
74352 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
74353 NOT_GPL_ONLY, false },
74354 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
74355@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
74356 if (mod->state == MODULE_STATE_UNFORMED)
74357 continue;
74358
74359- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
74360+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
74361 return true;
74362 }
74363 return false;
74364@@ -484,7 +486,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
74365 static int percpu_modalloc(struct module *mod,
74366 unsigned long size, unsigned long align)
74367 {
74368- if (align > PAGE_SIZE) {
74369+ if (align-1 >= PAGE_SIZE) {
74370 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
74371 mod->name, align, PAGE_SIZE);
74372 align = PAGE_SIZE;
74373@@ -1088,7 +1090,7 @@ struct module_attribute module_uevent =
74374 static ssize_t show_coresize(struct module_attribute *mattr,
74375 struct module_kobject *mk, char *buffer)
74376 {
74377- return sprintf(buffer, "%u\n", mk->mod->core_size);
74378+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
74379 }
74380
74381 static struct module_attribute modinfo_coresize =
74382@@ -1097,7 +1099,7 @@ static struct module_attribute modinfo_coresize =
74383 static ssize_t show_initsize(struct module_attribute *mattr,
74384 struct module_kobject *mk, char *buffer)
74385 {
74386- return sprintf(buffer, "%u\n", mk->mod->init_size);
74387+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
74388 }
74389
74390 static struct module_attribute modinfo_initsize =
74391@@ -1311,7 +1313,7 @@ resolve_symbol_wait(struct module *mod,
74392 */
74393 #ifdef CONFIG_SYSFS
74394
74395-#ifdef CONFIG_KALLSYMS
74396+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74397 static inline bool sect_empty(const Elf_Shdr *sect)
74398 {
74399 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
74400@@ -1451,7 +1453,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
74401 {
74402 unsigned int notes, loaded, i;
74403 struct module_notes_attrs *notes_attrs;
74404- struct bin_attribute *nattr;
74405+ bin_attribute_no_const *nattr;
74406
74407 /* failed to create section attributes, so can't create notes */
74408 if (!mod->sect_attrs)
74409@@ -1563,7 +1565,7 @@ static void del_usage_links(struct module *mod)
74410 static int module_add_modinfo_attrs(struct module *mod)
74411 {
74412 struct module_attribute *attr;
74413- struct module_attribute *temp_attr;
74414+ module_attribute_no_const *temp_attr;
74415 int error = 0;
74416 int i;
74417
74418@@ -1777,21 +1779,21 @@ static void set_section_ro_nx(void *base,
74419
74420 static void unset_module_core_ro_nx(struct module *mod)
74421 {
74422- set_page_attributes(mod->module_core + mod->core_text_size,
74423- mod->module_core + mod->core_size,
74424+ set_page_attributes(mod->module_core_rw,
74425+ mod->module_core_rw + mod->core_size_rw,
74426 set_memory_x);
74427- set_page_attributes(mod->module_core,
74428- mod->module_core + mod->core_ro_size,
74429+ set_page_attributes(mod->module_core_rx,
74430+ mod->module_core_rx + mod->core_size_rx,
74431 set_memory_rw);
74432 }
74433
74434 static void unset_module_init_ro_nx(struct module *mod)
74435 {
74436- set_page_attributes(mod->module_init + mod->init_text_size,
74437- mod->module_init + mod->init_size,
74438+ set_page_attributes(mod->module_init_rw,
74439+ mod->module_init_rw + mod->init_size_rw,
74440 set_memory_x);
74441- set_page_attributes(mod->module_init,
74442- mod->module_init + mod->init_ro_size,
74443+ set_page_attributes(mod->module_init_rx,
74444+ mod->module_init_rx + mod->init_size_rx,
74445 set_memory_rw);
74446 }
74447
74448@@ -1804,14 +1806,14 @@ void set_all_modules_text_rw(void)
74449 list_for_each_entry_rcu(mod, &modules, list) {
74450 if (mod->state == MODULE_STATE_UNFORMED)
74451 continue;
74452- if ((mod->module_core) && (mod->core_text_size)) {
74453- set_page_attributes(mod->module_core,
74454- mod->module_core + mod->core_text_size,
74455+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
74456+ set_page_attributes(mod->module_core_rx,
74457+ mod->module_core_rx + mod->core_size_rx,
74458 set_memory_rw);
74459 }
74460- if ((mod->module_init) && (mod->init_text_size)) {
74461- set_page_attributes(mod->module_init,
74462- mod->module_init + mod->init_text_size,
74463+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
74464+ set_page_attributes(mod->module_init_rx,
74465+ mod->module_init_rx + mod->init_size_rx,
74466 set_memory_rw);
74467 }
74468 }
74469@@ -1827,14 +1829,14 @@ void set_all_modules_text_ro(void)
74470 list_for_each_entry_rcu(mod, &modules, list) {
74471 if (mod->state == MODULE_STATE_UNFORMED)
74472 continue;
74473- if ((mod->module_core) && (mod->core_text_size)) {
74474- set_page_attributes(mod->module_core,
74475- mod->module_core + mod->core_text_size,
74476+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
74477+ set_page_attributes(mod->module_core_rx,
74478+ mod->module_core_rx + mod->core_size_rx,
74479 set_memory_ro);
74480 }
74481- if ((mod->module_init) && (mod->init_text_size)) {
74482- set_page_attributes(mod->module_init,
74483- mod->module_init + mod->init_text_size,
74484+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
74485+ set_page_attributes(mod->module_init_rx,
74486+ mod->module_init_rx + mod->init_size_rx,
74487 set_memory_ro);
74488 }
74489 }
74490@@ -1880,16 +1882,19 @@ static void free_module(struct module *mod)
74491
74492 /* This may be NULL, but that's OK */
74493 unset_module_init_ro_nx(mod);
74494- module_free(mod, mod->module_init);
74495+ module_free(mod, mod->module_init_rw);
74496+ module_free_exec(mod, mod->module_init_rx);
74497 kfree(mod->args);
74498 percpu_modfree(mod);
74499
74500 /* Free lock-classes: */
74501- lockdep_free_key_range(mod->module_core, mod->core_size);
74502+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
74503+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
74504
74505 /* Finally, free the core (containing the module structure) */
74506 unset_module_core_ro_nx(mod);
74507- module_free(mod, mod->module_core);
74508+ module_free_exec(mod, mod->module_core_rx);
74509+ module_free(mod, mod->module_core_rw);
74510
74511 #ifdef CONFIG_MPU
74512 update_protections(current->mm);
74513@@ -1959,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
74514 int ret = 0;
74515 const struct kernel_symbol *ksym;
74516
74517+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74518+ int is_fs_load = 0;
74519+ int register_filesystem_found = 0;
74520+ char *p;
74521+
74522+ p = strstr(mod->args, "grsec_modharden_fs");
74523+ if (p) {
74524+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
74525+ /* copy \0 as well */
74526+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
74527+ is_fs_load = 1;
74528+ }
74529+#endif
74530+
74531 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
74532 const char *name = info->strtab + sym[i].st_name;
74533
74534+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74535+ /* it's a real shame this will never get ripped and copied
74536+ upstream! ;(
74537+ */
74538+ if (is_fs_load && !strcmp(name, "register_filesystem"))
74539+ register_filesystem_found = 1;
74540+#endif
74541+
74542 switch (sym[i].st_shndx) {
74543 case SHN_COMMON:
74544 /* We compiled with -fno-common. These are not
74545@@ -1982,7 +2009,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
74546 ksym = resolve_symbol_wait(mod, info, name);
74547 /* Ok if resolved. */
74548 if (ksym && !IS_ERR(ksym)) {
74549+ pax_open_kernel();
74550 sym[i].st_value = ksym->value;
74551+ pax_close_kernel();
74552 break;
74553 }
74554
74555@@ -2001,11 +2030,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
74556 secbase = (unsigned long)mod_percpu(mod);
74557 else
74558 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
74559+ pax_open_kernel();
74560 sym[i].st_value += secbase;
74561+ pax_close_kernel();
74562 break;
74563 }
74564 }
74565
74566+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74567+ if (is_fs_load && !register_filesystem_found) {
74568+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
74569+ ret = -EPERM;
74570+ }
74571+#endif
74572+
74573 return ret;
74574 }
74575
74576@@ -2089,22 +2127,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
74577 || s->sh_entsize != ~0UL
74578 || strstarts(sname, ".init"))
74579 continue;
74580- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
74581+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
74582+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
74583+ else
74584+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
74585 pr_debug("\t%s\n", sname);
74586 }
74587- switch (m) {
74588- case 0: /* executable */
74589- mod->core_size = debug_align(mod->core_size);
74590- mod->core_text_size = mod->core_size;
74591- break;
74592- case 1: /* RO: text and ro-data */
74593- mod->core_size = debug_align(mod->core_size);
74594- mod->core_ro_size = mod->core_size;
74595- break;
74596- case 3: /* whole core */
74597- mod->core_size = debug_align(mod->core_size);
74598- break;
74599- }
74600 }
74601
74602 pr_debug("Init section allocation order:\n");
74603@@ -2118,23 +2146,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
74604 || s->sh_entsize != ~0UL
74605 || !strstarts(sname, ".init"))
74606 continue;
74607- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
74608- | INIT_OFFSET_MASK);
74609+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
74610+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
74611+ else
74612+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
74613+ s->sh_entsize |= INIT_OFFSET_MASK;
74614 pr_debug("\t%s\n", sname);
74615 }
74616- switch (m) {
74617- case 0: /* executable */
74618- mod->init_size = debug_align(mod->init_size);
74619- mod->init_text_size = mod->init_size;
74620- break;
74621- case 1: /* RO: text and ro-data */
74622- mod->init_size = debug_align(mod->init_size);
74623- mod->init_ro_size = mod->init_size;
74624- break;
74625- case 3: /* whole init */
74626- mod->init_size = debug_align(mod->init_size);
74627- break;
74628- }
74629 }
74630 }
74631
74632@@ -2306,7 +2324,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
74633
74634 /* Put symbol section at end of init part of module. */
74635 symsect->sh_flags |= SHF_ALLOC;
74636- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
74637+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
74638 info->index.sym) | INIT_OFFSET_MASK;
74639 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
74640
74641@@ -2323,13 +2341,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
74642 }
74643
74644 /* Append room for core symbols at end of core part. */
74645- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
74646- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
74647- mod->core_size += strtab_size;
74648+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
74649+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
74650+ mod->core_size_rx += strtab_size;
74651
74652 /* Put string table section at end of init part of module. */
74653 strsect->sh_flags |= SHF_ALLOC;
74654- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
74655+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
74656 info->index.str) | INIT_OFFSET_MASK;
74657 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
74658 }
74659@@ -2347,12 +2365,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
74660 /* Make sure we get permanent strtab: don't use info->strtab. */
74661 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
74662
74663+ pax_open_kernel();
74664+
74665 /* Set types up while we still have access to sections. */
74666 for (i = 0; i < mod->num_symtab; i++)
74667 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
74668
74669- mod->core_symtab = dst = mod->module_core + info->symoffs;
74670- mod->core_strtab = s = mod->module_core + info->stroffs;
74671+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
74672+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
74673 src = mod->symtab;
74674 for (ndst = i = 0; i < mod->num_symtab; i++) {
74675 if (i == 0 ||
74676@@ -2364,6 +2384,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
74677 }
74678 }
74679 mod->core_num_syms = ndst;
74680+
74681+ pax_close_kernel();
74682 }
74683 #else
74684 static inline void layout_symtab(struct module *mod, struct load_info *info)
74685@@ -2397,17 +2419,33 @@ void * __weak module_alloc(unsigned long size)
74686 return vmalloc_exec(size);
74687 }
74688
74689-static void *module_alloc_update_bounds(unsigned long size)
74690+static void *module_alloc_update_bounds_rw(unsigned long size)
74691 {
74692 void *ret = module_alloc(size);
74693
74694 if (ret) {
74695 mutex_lock(&module_mutex);
74696 /* Update module bounds. */
74697- if ((unsigned long)ret < module_addr_min)
74698- module_addr_min = (unsigned long)ret;
74699- if ((unsigned long)ret + size > module_addr_max)
74700- module_addr_max = (unsigned long)ret + size;
74701+ if ((unsigned long)ret < module_addr_min_rw)
74702+ module_addr_min_rw = (unsigned long)ret;
74703+ if ((unsigned long)ret + size > module_addr_max_rw)
74704+ module_addr_max_rw = (unsigned long)ret + size;
74705+ mutex_unlock(&module_mutex);
74706+ }
74707+ return ret;
74708+}
74709+
74710+static void *module_alloc_update_bounds_rx(unsigned long size)
74711+{
74712+ void *ret = module_alloc_exec(size);
74713+
74714+ if (ret) {
74715+ mutex_lock(&module_mutex);
74716+ /* Update module bounds. */
74717+ if ((unsigned long)ret < module_addr_min_rx)
74718+ module_addr_min_rx = (unsigned long)ret;
74719+ if ((unsigned long)ret + size > module_addr_max_rx)
74720+ module_addr_max_rx = (unsigned long)ret + size;
74721 mutex_unlock(&module_mutex);
74722 }
74723 return ret;
74724@@ -2683,8 +2721,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
74725 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
74726 {
74727 const char *modmagic = get_modinfo(info, "vermagic");
74728+ const char *license = get_modinfo(info, "license");
74729 int err;
74730
74731+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
74732+ if (!license || !license_is_gpl_compatible(license))
74733+ return -ENOEXEC;
74734+#endif
74735+
74736 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
74737 modmagic = NULL;
74738
74739@@ -2710,7 +2754,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
74740 }
74741
74742 /* Set up license info based on the info section */
74743- set_license(mod, get_modinfo(info, "license"));
74744+ set_license(mod, license);
74745
74746 return 0;
74747 }
74748@@ -2804,7 +2848,7 @@ static int move_module(struct module *mod, struct load_info *info)
74749 void *ptr;
74750
74751 /* Do the allocs. */
74752- ptr = module_alloc_update_bounds(mod->core_size);
74753+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
74754 /*
74755 * The pointer to this block is stored in the module structure
74756 * which is inside the block. Just mark it as not being a
74757@@ -2814,11 +2858,11 @@ static int move_module(struct module *mod, struct load_info *info)
74758 if (!ptr)
74759 return -ENOMEM;
74760
74761- memset(ptr, 0, mod->core_size);
74762- mod->module_core = ptr;
74763+ memset(ptr, 0, mod->core_size_rw);
74764+ mod->module_core_rw = ptr;
74765
74766- if (mod->init_size) {
74767- ptr = module_alloc_update_bounds(mod->init_size);
74768+ if (mod->init_size_rw) {
74769+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
74770 /*
74771 * The pointer to this block is stored in the module structure
74772 * which is inside the block. This block doesn't need to be
74773@@ -2827,13 +2871,45 @@ static int move_module(struct module *mod, struct load_info *info)
74774 */
74775 kmemleak_ignore(ptr);
74776 if (!ptr) {
74777- module_free(mod, mod->module_core);
74778+ module_free(mod, mod->module_core_rw);
74779 return -ENOMEM;
74780 }
74781- memset(ptr, 0, mod->init_size);
74782- mod->module_init = ptr;
74783+ memset(ptr, 0, mod->init_size_rw);
74784+ mod->module_init_rw = ptr;
74785 } else
74786- mod->module_init = NULL;
74787+ mod->module_init_rw = NULL;
74788+
74789+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
74790+ kmemleak_not_leak(ptr);
74791+ if (!ptr) {
74792+ if (mod->module_init_rw)
74793+ module_free(mod, mod->module_init_rw);
74794+ module_free(mod, mod->module_core_rw);
74795+ return -ENOMEM;
74796+ }
74797+
74798+ pax_open_kernel();
74799+ memset(ptr, 0, mod->core_size_rx);
74800+ pax_close_kernel();
74801+ mod->module_core_rx = ptr;
74802+
74803+ if (mod->init_size_rx) {
74804+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
74805+ kmemleak_ignore(ptr);
74806+ if (!ptr && mod->init_size_rx) {
74807+ module_free_exec(mod, mod->module_core_rx);
74808+ if (mod->module_init_rw)
74809+ module_free(mod, mod->module_init_rw);
74810+ module_free(mod, mod->module_core_rw);
74811+ return -ENOMEM;
74812+ }
74813+
74814+ pax_open_kernel();
74815+ memset(ptr, 0, mod->init_size_rx);
74816+ pax_close_kernel();
74817+ mod->module_init_rx = ptr;
74818+ } else
74819+ mod->module_init_rx = NULL;
74820
74821 /* Transfer each section which specifies SHF_ALLOC */
74822 pr_debug("final section addresses:\n");
74823@@ -2844,16 +2920,45 @@ static int move_module(struct module *mod, struct load_info *info)
74824 if (!(shdr->sh_flags & SHF_ALLOC))
74825 continue;
74826
74827- if (shdr->sh_entsize & INIT_OFFSET_MASK)
74828- dest = mod->module_init
74829- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
74830- else
74831- dest = mod->module_core + shdr->sh_entsize;
74832+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
74833+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
74834+ dest = mod->module_init_rw
74835+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
74836+ else
74837+ dest = mod->module_init_rx
74838+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
74839+ } else {
74840+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
74841+ dest = mod->module_core_rw + shdr->sh_entsize;
74842+ else
74843+ dest = mod->module_core_rx + shdr->sh_entsize;
74844+ }
74845+
74846+ if (shdr->sh_type != SHT_NOBITS) {
74847+
74848+#ifdef CONFIG_PAX_KERNEXEC
74849+#ifdef CONFIG_X86_64
74850+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
74851+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
74852+#endif
74853+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
74854+ pax_open_kernel();
74855+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
74856+ pax_close_kernel();
74857+ } else
74858+#endif
74859
74860- if (shdr->sh_type != SHT_NOBITS)
74861 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
74862+ }
74863 /* Update sh_addr to point to copy in image. */
74864- shdr->sh_addr = (unsigned long)dest;
74865+
74866+#ifdef CONFIG_PAX_KERNEXEC
74867+ if (shdr->sh_flags & SHF_EXECINSTR)
74868+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
74869+ else
74870+#endif
74871+
74872+ shdr->sh_addr = (unsigned long)dest;
74873 pr_debug("\t0x%lx %s\n",
74874 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
74875 }
74876@@ -2908,12 +3013,12 @@ static void flush_module_icache(const struct module *mod)
74877 * Do it before processing of module parameters, so the module
74878 * can provide parameter accessor functions of its own.
74879 */
74880- if (mod->module_init)
74881- flush_icache_range((unsigned long)mod->module_init,
74882- (unsigned long)mod->module_init
74883- + mod->init_size);
74884- flush_icache_range((unsigned long)mod->module_core,
74885- (unsigned long)mod->module_core + mod->core_size);
74886+ if (mod->module_init_rx)
74887+ flush_icache_range((unsigned long)mod->module_init_rx,
74888+ (unsigned long)mod->module_init_rx
74889+ + mod->init_size_rx);
74890+ flush_icache_range((unsigned long)mod->module_core_rx,
74891+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
74892
74893 set_fs(old_fs);
74894 }
74895@@ -2983,8 +3088,10 @@ out:
74896 static void module_deallocate(struct module *mod, struct load_info *info)
74897 {
74898 percpu_modfree(mod);
74899- module_free(mod, mod->module_init);
74900- module_free(mod, mod->module_core);
74901+ module_free_exec(mod, mod->module_init_rx);
74902+ module_free_exec(mod, mod->module_core_rx);
74903+ module_free(mod, mod->module_init_rw);
74904+ module_free(mod, mod->module_core_rw);
74905 }
74906
74907 int __weak module_finalize(const Elf_Ehdr *hdr,
74908@@ -2997,7 +3104,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
74909 static int post_relocation(struct module *mod, const struct load_info *info)
74910 {
74911 /* Sort exception table now relocations are done. */
74912+ pax_open_kernel();
74913 sort_extable(mod->extable, mod->extable + mod->num_exentries);
74914+ pax_close_kernel();
74915
74916 /* Copy relocated percpu area over. */
74917 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
74918@@ -3051,16 +3160,16 @@ static int do_init_module(struct module *mod)
74919 MODULE_STATE_COMING, mod);
74920
74921 /* Set RO and NX regions for core */
74922- set_section_ro_nx(mod->module_core,
74923- mod->core_text_size,
74924- mod->core_ro_size,
74925- mod->core_size);
74926+ set_section_ro_nx(mod->module_core_rx,
74927+ mod->core_size_rx,
74928+ mod->core_size_rx,
74929+ mod->core_size_rx);
74930
74931 /* Set RO and NX regions for init */
74932- set_section_ro_nx(mod->module_init,
74933- mod->init_text_size,
74934- mod->init_ro_size,
74935- mod->init_size);
74936+ set_section_ro_nx(mod->module_init_rx,
74937+ mod->init_size_rx,
74938+ mod->init_size_rx,
74939+ mod->init_size_rx);
74940
74941 do_mod_ctors(mod);
74942 /* Start the module */
74943@@ -3122,11 +3231,12 @@ static int do_init_module(struct module *mod)
74944 mod->strtab = mod->core_strtab;
74945 #endif
74946 unset_module_init_ro_nx(mod);
74947- module_free(mod, mod->module_init);
74948- mod->module_init = NULL;
74949- mod->init_size = 0;
74950- mod->init_ro_size = 0;
74951- mod->init_text_size = 0;
74952+ module_free(mod, mod->module_init_rw);
74953+ module_free_exec(mod, mod->module_init_rx);
74954+ mod->module_init_rw = NULL;
74955+ mod->module_init_rx = NULL;
74956+ mod->init_size_rw = 0;
74957+ mod->init_size_rx = 0;
74958 mutex_unlock(&module_mutex);
74959 wake_up_all(&module_wq);
74960
74961@@ -3209,9 +3319,38 @@ again:
74962 if (err)
74963 goto free_unload;
74964
74965+ /* Now copy in args */
74966+ mod->args = strndup_user(uargs, ~0UL >> 1);
74967+ if (IS_ERR(mod->args)) {
74968+ err = PTR_ERR(mod->args);
74969+ goto free_unload;
74970+ }
74971+
74972 /* Set up MODINFO_ATTR fields */
74973 setup_modinfo(mod, info);
74974
74975+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74976+ {
74977+ char *p, *p2;
74978+
74979+ if (strstr(mod->args, "grsec_modharden_netdev")) {
74980+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
74981+ err = -EPERM;
74982+ goto free_modinfo;
74983+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
74984+ p += sizeof("grsec_modharden_normal") - 1;
74985+ p2 = strstr(p, "_");
74986+ if (p2) {
74987+ *p2 = '\0';
74988+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
74989+ *p2 = '_';
74990+ }
74991+ err = -EPERM;
74992+ goto free_modinfo;
74993+ }
74994+ }
74995+#endif
74996+
74997 /* Fix up syms, so that st_value is a pointer to location. */
74998 err = simplify_symbols(mod, info);
74999 if (err < 0)
75000@@ -3227,13 +3366,6 @@ again:
75001
75002 flush_module_icache(mod);
75003
75004- /* Now copy in args */
75005- mod->args = strndup_user(uargs, ~0UL >> 1);
75006- if (IS_ERR(mod->args)) {
75007- err = PTR_ERR(mod->args);
75008- goto free_arch_cleanup;
75009- }
75010-
75011 dynamic_debug_setup(info->debug, info->num_debug);
75012
75013 mutex_lock(&module_mutex);
75014@@ -3278,11 +3410,10 @@ again:
75015 mutex_unlock(&module_mutex);
75016 dynamic_debug_remove(info->debug);
75017 synchronize_sched();
75018- kfree(mod->args);
75019- free_arch_cleanup:
75020 module_arch_cleanup(mod);
75021 free_modinfo:
75022 free_modinfo(mod);
75023+ kfree(mod->args);
75024 free_unload:
75025 module_unload_free(mod);
75026 unlink_mod:
75027@@ -3365,10 +3496,16 @@ static const char *get_ksymbol(struct module *mod,
75028 unsigned long nextval;
75029
75030 /* At worse, next value is at end of module */
75031- if (within_module_init(addr, mod))
75032- nextval = (unsigned long)mod->module_init+mod->init_text_size;
75033+ if (within_module_init_rx(addr, mod))
75034+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
75035+ else if (within_module_init_rw(addr, mod))
75036+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
75037+ else if (within_module_core_rx(addr, mod))
75038+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
75039+ else if (within_module_core_rw(addr, mod))
75040+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
75041 else
75042- nextval = (unsigned long)mod->module_core+mod->core_text_size;
75043+ return NULL;
75044
75045 /* Scan for closest preceding symbol, and next symbol. (ELF
75046 starts real symbols at 1). */
75047@@ -3621,7 +3758,7 @@ static int m_show(struct seq_file *m, void *p)
75048 return 0;
75049
75050 seq_printf(m, "%s %u",
75051- mod->name, mod->init_size + mod->core_size);
75052+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
75053 print_unload_info(m, mod);
75054
75055 /* Informative for users. */
75056@@ -3630,7 +3767,7 @@ static int m_show(struct seq_file *m, void *p)
75057 mod->state == MODULE_STATE_COMING ? "Loading":
75058 "Live");
75059 /* Used by oprofile and other similar tools. */
75060- seq_printf(m, " 0x%pK", mod->module_core);
75061+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
75062
75063 /* Taints info */
75064 if (mod->taints)
75065@@ -3666,7 +3803,17 @@ static const struct file_operations proc_modules_operations = {
75066
75067 static int __init proc_modules_init(void)
75068 {
75069+#ifndef CONFIG_GRKERNSEC_HIDESYM
75070+#ifdef CONFIG_GRKERNSEC_PROC_USER
75071+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
75072+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75073+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
75074+#else
75075 proc_create("modules", 0, NULL, &proc_modules_operations);
75076+#endif
75077+#else
75078+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
75079+#endif
75080 return 0;
75081 }
75082 module_init(proc_modules_init);
75083@@ -3727,14 +3874,14 @@ struct module *__module_address(unsigned long addr)
75084 {
75085 struct module *mod;
75086
75087- if (addr < module_addr_min || addr > module_addr_max)
75088+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
75089+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
75090 return NULL;
75091
75092 list_for_each_entry_rcu(mod, &modules, list) {
75093 if (mod->state == MODULE_STATE_UNFORMED)
75094 continue;
75095- if (within_module_core(addr, mod)
75096- || within_module_init(addr, mod))
75097+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
75098 return mod;
75099 }
75100 return NULL;
75101@@ -3769,11 +3916,20 @@ bool is_module_text_address(unsigned long addr)
75102 */
75103 struct module *__module_text_address(unsigned long addr)
75104 {
75105- struct module *mod = __module_address(addr);
75106+ struct module *mod;
75107+
75108+#ifdef CONFIG_X86_32
75109+ addr = ktla_ktva(addr);
75110+#endif
75111+
75112+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
75113+ return NULL;
75114+
75115+ mod = __module_address(addr);
75116+
75117 if (mod) {
75118 /* Make sure it's within the text section. */
75119- if (!within(addr, mod->module_init, mod->init_text_size)
75120- && !within(addr, mod->module_core, mod->core_text_size))
75121+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
75122 mod = NULL;
75123 }
75124 return mod;
75125diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
75126index 7e3443f..b2a1e6b 100644
75127--- a/kernel/mutex-debug.c
75128+++ b/kernel/mutex-debug.c
75129@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
75130 }
75131
75132 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
75133- struct thread_info *ti)
75134+ struct task_struct *task)
75135 {
75136 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
75137
75138 /* Mark the current thread as blocked on the lock: */
75139- ti->task->blocked_on = waiter;
75140+ task->blocked_on = waiter;
75141 }
75142
75143 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
75144- struct thread_info *ti)
75145+ struct task_struct *task)
75146 {
75147 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
75148- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
75149- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
75150- ti->task->blocked_on = NULL;
75151+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
75152+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
75153+ task->blocked_on = NULL;
75154
75155 list_del_init(&waiter->list);
75156 waiter->task = NULL;
75157diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
75158index 0799fd3..d06ae3b 100644
75159--- a/kernel/mutex-debug.h
75160+++ b/kernel/mutex-debug.h
75161@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
75162 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
75163 extern void debug_mutex_add_waiter(struct mutex *lock,
75164 struct mutex_waiter *waiter,
75165- struct thread_info *ti);
75166+ struct task_struct *task);
75167 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
75168- struct thread_info *ti);
75169+ struct task_struct *task);
75170 extern void debug_mutex_unlock(struct mutex *lock);
75171 extern void debug_mutex_init(struct mutex *lock, const char *name,
75172 struct lock_class_key *key);
75173diff --git a/kernel/mutex.c b/kernel/mutex.c
75174index a307cc9..27fd2e9 100644
75175--- a/kernel/mutex.c
75176+++ b/kernel/mutex.c
75177@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
75178 spin_lock_mutex(&lock->wait_lock, flags);
75179
75180 debug_mutex_lock_common(lock, &waiter);
75181- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
75182+ debug_mutex_add_waiter(lock, &waiter, task);
75183
75184 /* add waiting tasks to the end of the waitqueue (FIFO): */
75185 list_add_tail(&waiter.list, &lock->wait_list);
75186@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
75187 * TASK_UNINTERRUPTIBLE case.)
75188 */
75189 if (unlikely(signal_pending_state(state, task))) {
75190- mutex_remove_waiter(lock, &waiter,
75191- task_thread_info(task));
75192+ mutex_remove_waiter(lock, &waiter, task);
75193 mutex_release(&lock->dep_map, 1, ip);
75194 spin_unlock_mutex(&lock->wait_lock, flags);
75195
75196@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
75197 done:
75198 lock_acquired(&lock->dep_map, ip);
75199 /* got the lock - rejoice! */
75200- mutex_remove_waiter(lock, &waiter, current_thread_info());
75201+ mutex_remove_waiter(lock, &waiter, task);
75202 mutex_set_owner(lock);
75203
75204 /* set it to 0 if there are no waiters left: */
75205diff --git a/kernel/notifier.c b/kernel/notifier.c
75206index 2d5cc4c..d9ea600 100644
75207--- a/kernel/notifier.c
75208+++ b/kernel/notifier.c
75209@@ -5,6 +5,7 @@
75210 #include <linux/rcupdate.h>
75211 #include <linux/vmalloc.h>
75212 #include <linux/reboot.h>
75213+#include <linux/mm.h>
75214
75215 /*
75216 * Notifier list for kernel code which wants to be called
75217@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
75218 while ((*nl) != NULL) {
75219 if (n->priority > (*nl)->priority)
75220 break;
75221- nl = &((*nl)->next);
75222+ nl = (struct notifier_block **)&((*nl)->next);
75223 }
75224- n->next = *nl;
75225+ pax_open_kernel();
75226+ *(const void **)&n->next = *nl;
75227 rcu_assign_pointer(*nl, n);
75228+ pax_close_kernel();
75229 return 0;
75230 }
75231
75232@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
75233 return 0;
75234 if (n->priority > (*nl)->priority)
75235 break;
75236- nl = &((*nl)->next);
75237+ nl = (struct notifier_block **)&((*nl)->next);
75238 }
75239- n->next = *nl;
75240+ pax_open_kernel();
75241+ *(const void **)&n->next = *nl;
75242 rcu_assign_pointer(*nl, n);
75243+ pax_close_kernel();
75244 return 0;
75245 }
75246
75247@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
75248 {
75249 while ((*nl) != NULL) {
75250 if ((*nl) == n) {
75251+ pax_open_kernel();
75252 rcu_assign_pointer(*nl, n->next);
75253+ pax_close_kernel();
75254 return 0;
75255 }
75256- nl = &((*nl)->next);
75257+ nl = (struct notifier_block **)&((*nl)->next);
75258 }
75259 return -ENOENT;
75260 }
75261diff --git a/kernel/panic.c b/kernel/panic.c
75262index e1b2822..5edc1d9 100644
75263--- a/kernel/panic.c
75264+++ b/kernel/panic.c
75265@@ -410,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
75266 const char *board;
75267
75268 printk(KERN_WARNING "------------[ cut here ]------------\n");
75269- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
75270+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
75271 board = dmi_get_system_info(DMI_PRODUCT_NAME);
75272 if (board)
75273 printk(KERN_WARNING "Hardware name: %s\n", board);
75274@@ -465,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
75275 */
75276 void __stack_chk_fail(void)
75277 {
75278- panic("stack-protector: Kernel stack is corrupted in: %p\n",
75279+ dump_stack();
75280+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
75281 __builtin_return_address(0));
75282 }
75283 EXPORT_SYMBOL(__stack_chk_fail);
75284diff --git a/kernel/pid.c b/kernel/pid.c
75285index f2c6a68..4922d97 100644
75286--- a/kernel/pid.c
75287+++ b/kernel/pid.c
75288@@ -33,6 +33,7 @@
75289 #include <linux/rculist.h>
75290 #include <linux/bootmem.h>
75291 #include <linux/hash.h>
75292+#include <linux/security.h>
75293 #include <linux/pid_namespace.h>
75294 #include <linux/init_task.h>
75295 #include <linux/syscalls.h>
75296@@ -46,7 +47,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
75297
75298 int pid_max = PID_MAX_DEFAULT;
75299
75300-#define RESERVED_PIDS 300
75301+#define RESERVED_PIDS 500
75302
75303 int pid_max_min = RESERVED_PIDS + 1;
75304 int pid_max_max = PID_MAX_LIMIT;
75305@@ -441,10 +442,18 @@ EXPORT_SYMBOL(pid_task);
75306 */
75307 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
75308 {
75309+ struct task_struct *task;
75310+
75311 rcu_lockdep_assert(rcu_read_lock_held(),
75312 "find_task_by_pid_ns() needs rcu_read_lock()"
75313 " protection");
75314- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
75315+
75316+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
75317+
75318+ if (gr_pid_is_chrooted(task))
75319+ return NULL;
75320+
75321+ return task;
75322 }
75323
75324 struct task_struct *find_task_by_vpid(pid_t vnr)
75325@@ -452,6 +461,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
75326 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
75327 }
75328
75329+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
75330+{
75331+ rcu_lockdep_assert(rcu_read_lock_held(),
75332+ "find_task_by_pid_ns() needs rcu_read_lock()"
75333+ " protection");
75334+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
75335+}
75336+
75337 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
75338 {
75339 struct pid *pid;
75340diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
75341index c1c3dc1..bbeaf31 100644
75342--- a/kernel/pid_namespace.c
75343+++ b/kernel/pid_namespace.c
75344@@ -248,7 +248,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
75345 void __user *buffer, size_t *lenp, loff_t *ppos)
75346 {
75347 struct pid_namespace *pid_ns = task_active_pid_ns(current);
75348- struct ctl_table tmp = *table;
75349+ ctl_table_no_const tmp = *table;
75350
75351 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
75352 return -EPERM;
75353diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
75354index 942ca27..111e609 100644
75355--- a/kernel/posix-cpu-timers.c
75356+++ b/kernel/posix-cpu-timers.c
75357@@ -1576,14 +1576,14 @@ struct k_clock clock_posix_cpu = {
75358
75359 static __init int init_posix_cpu_timers(void)
75360 {
75361- struct k_clock process = {
75362+ static struct k_clock process = {
75363 .clock_getres = process_cpu_clock_getres,
75364 .clock_get = process_cpu_clock_get,
75365 .timer_create = process_cpu_timer_create,
75366 .nsleep = process_cpu_nsleep,
75367 .nsleep_restart = process_cpu_nsleep_restart,
75368 };
75369- struct k_clock thread = {
75370+ static struct k_clock thread = {
75371 .clock_getres = thread_cpu_clock_getres,
75372 .clock_get = thread_cpu_clock_get,
75373 .timer_create = thread_cpu_timer_create,
75374diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
75375index e885be1..380fe76 100644
75376--- a/kernel/posix-timers.c
75377+++ b/kernel/posix-timers.c
75378@@ -43,6 +43,7 @@
75379 #include <linux/idr.h>
75380 #include <linux/posix-clock.h>
75381 #include <linux/posix-timers.h>
75382+#include <linux/grsecurity.h>
75383 #include <linux/syscalls.h>
75384 #include <linux/wait.h>
75385 #include <linux/workqueue.h>
75386@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
75387 * which we beg off on and pass to do_sys_settimeofday().
75388 */
75389
75390-static struct k_clock posix_clocks[MAX_CLOCKS];
75391+static struct k_clock *posix_clocks[MAX_CLOCKS];
75392
75393 /*
75394 * These ones are defined below.
75395@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
75396 */
75397 static __init int init_posix_timers(void)
75398 {
75399- struct k_clock clock_realtime = {
75400+ static struct k_clock clock_realtime = {
75401 .clock_getres = hrtimer_get_res,
75402 .clock_get = posix_clock_realtime_get,
75403 .clock_set = posix_clock_realtime_set,
75404@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
75405 .timer_get = common_timer_get,
75406 .timer_del = common_timer_del,
75407 };
75408- struct k_clock clock_monotonic = {
75409+ static struct k_clock clock_monotonic = {
75410 .clock_getres = hrtimer_get_res,
75411 .clock_get = posix_ktime_get_ts,
75412 .nsleep = common_nsleep,
75413@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
75414 .timer_get = common_timer_get,
75415 .timer_del = common_timer_del,
75416 };
75417- struct k_clock clock_monotonic_raw = {
75418+ static struct k_clock clock_monotonic_raw = {
75419 .clock_getres = hrtimer_get_res,
75420 .clock_get = posix_get_monotonic_raw,
75421 };
75422- struct k_clock clock_realtime_coarse = {
75423+ static struct k_clock clock_realtime_coarse = {
75424 .clock_getres = posix_get_coarse_res,
75425 .clock_get = posix_get_realtime_coarse,
75426 };
75427- struct k_clock clock_monotonic_coarse = {
75428+ static struct k_clock clock_monotonic_coarse = {
75429 .clock_getres = posix_get_coarse_res,
75430 .clock_get = posix_get_monotonic_coarse,
75431 };
75432- struct k_clock clock_boottime = {
75433+ static struct k_clock clock_boottime = {
75434 .clock_getres = hrtimer_get_res,
75435 .clock_get = posix_get_boottime,
75436 .nsleep = common_nsleep,
75437@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
75438 return;
75439 }
75440
75441- posix_clocks[clock_id] = *new_clock;
75442+ posix_clocks[clock_id] = new_clock;
75443 }
75444 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
75445
75446@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
75447 return (id & CLOCKFD_MASK) == CLOCKFD ?
75448 &clock_posix_dynamic : &clock_posix_cpu;
75449
75450- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
75451+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
75452 return NULL;
75453- return &posix_clocks[id];
75454+ return posix_clocks[id];
75455 }
75456
75457 static int common_timer_create(struct k_itimer *new_timer)
75458@@ -966,6 +967,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
75459 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
75460 return -EFAULT;
75461
75462+ /* only the CLOCK_REALTIME clock can be set, all other clocks
75463+ have their clock_set fptr set to a nosettime dummy function
75464+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
75465+ call common_clock_set, which calls do_sys_settimeofday, which
75466+ we hook
75467+ */
75468+
75469 return kc->clock_set(which_clock, &new_tp);
75470 }
75471
75472diff --git a/kernel/power/process.c b/kernel/power/process.c
75473index d5a258b..4271191 100644
75474--- a/kernel/power/process.c
75475+++ b/kernel/power/process.c
75476@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
75477 u64 elapsed_csecs64;
75478 unsigned int elapsed_csecs;
75479 bool wakeup = false;
75480+ bool timedout = false;
75481
75482 do_gettimeofday(&start);
75483
75484@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
75485
75486 while (true) {
75487 todo = 0;
75488+ if (time_after(jiffies, end_time))
75489+ timedout = true;
75490 read_lock(&tasklist_lock);
75491 do_each_thread(g, p) {
75492 if (p == current || !freeze_task(p))
75493 continue;
75494
75495- if (!freezer_should_skip(p))
75496+ if (!freezer_should_skip(p)) {
75497 todo++;
75498+ if (timedout) {
75499+ printk(KERN_ERR "Task refusing to freeze:\n");
75500+ sched_show_task(p);
75501+ }
75502+ }
75503 } while_each_thread(g, p);
75504 read_unlock(&tasklist_lock);
75505
75506@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
75507 todo += wq_busy;
75508 }
75509
75510- if (!todo || time_after(jiffies, end_time))
75511+ if (!todo || timedout)
75512 break;
75513
75514 if (pm_wakeup_pending()) {
75515diff --git a/kernel/printk.c b/kernel/printk.c
75516index 267ce78..2487112 100644
75517--- a/kernel/printk.c
75518+++ b/kernel/printk.c
75519@@ -609,11 +609,17 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
75520 return ret;
75521 }
75522
75523+static int check_syslog_permissions(int type, bool from_file);
75524+
75525 static int devkmsg_open(struct inode *inode, struct file *file)
75526 {
75527 struct devkmsg_user *user;
75528 int err;
75529
75530+ err = check_syslog_permissions(SYSLOG_ACTION_OPEN, SYSLOG_FROM_FILE);
75531+ if (err)
75532+ return err;
75533+
75534 /* write-only does not need any file context */
75535 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
75536 return 0;
75537@@ -822,7 +828,7 @@ static int syslog_action_restricted(int type)
75538 if (dmesg_restrict)
75539 return 1;
75540 /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
75541- return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
75542+ return type != SYSLOG_ACTION_OPEN && type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
75543 }
75544
75545 static int check_syslog_permissions(int type, bool from_file)
75546@@ -834,6 +840,11 @@ static int check_syslog_permissions(int type, bool from_file)
75547 if (from_file && type != SYSLOG_ACTION_OPEN)
75548 return 0;
75549
75550+#ifdef CONFIG_GRKERNSEC_DMESG
75551+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
75552+ return -EPERM;
75553+#endif
75554+
75555 if (syslog_action_restricted(type)) {
75556 if (capable(CAP_SYSLOG))
75557 return 0;
75558diff --git a/kernel/profile.c b/kernel/profile.c
75559index 1f39181..86093471 100644
75560--- a/kernel/profile.c
75561+++ b/kernel/profile.c
75562@@ -40,7 +40,7 @@ struct profile_hit {
75563 /* Oprofile timer tick hook */
75564 static int (*timer_hook)(struct pt_regs *) __read_mostly;
75565
75566-static atomic_t *prof_buffer;
75567+static atomic_unchecked_t *prof_buffer;
75568 static unsigned long prof_len, prof_shift;
75569
75570 int prof_on __read_mostly;
75571@@ -282,7 +282,7 @@ static void profile_flip_buffers(void)
75572 hits[i].pc = 0;
75573 continue;
75574 }
75575- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
75576+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
75577 hits[i].hits = hits[i].pc = 0;
75578 }
75579 }
75580@@ -343,9 +343,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
75581 * Add the current hit(s) and flush the write-queue out
75582 * to the global buffer:
75583 */
75584- atomic_add(nr_hits, &prof_buffer[pc]);
75585+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
75586 for (i = 0; i < NR_PROFILE_HIT; ++i) {
75587- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
75588+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
75589 hits[i].pc = hits[i].hits = 0;
75590 }
75591 out:
75592@@ -420,7 +420,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
75593 {
75594 unsigned long pc;
75595 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
75596- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
75597+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
75598 }
75599 #endif /* !CONFIG_SMP */
75600
75601@@ -518,7 +518,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
75602 return -EFAULT;
75603 buf++; p++; count--; read++;
75604 }
75605- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
75606+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
75607 if (copy_to_user(buf, (void *)pnt, count))
75608 return -EFAULT;
75609 read += count;
75610@@ -549,7 +549,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
75611 }
75612 #endif
75613 profile_discard_flip_buffers();
75614- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
75615+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
75616 return count;
75617 }
75618
75619diff --git a/kernel/ptrace.c b/kernel/ptrace.c
75620index 6cbeaae..363c48a 100644
75621--- a/kernel/ptrace.c
75622+++ b/kernel/ptrace.c
75623@@ -324,7 +324,7 @@ static int ptrace_attach(struct task_struct *task, long request,
75624 if (seize)
75625 flags |= PT_SEIZED;
75626 rcu_read_lock();
75627- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
75628+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
75629 flags |= PT_PTRACE_CAP;
75630 rcu_read_unlock();
75631 task->ptrace = flags;
75632@@ -535,7 +535,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
75633 break;
75634 return -EIO;
75635 }
75636- if (copy_to_user(dst, buf, retval))
75637+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
75638 return -EFAULT;
75639 copied += retval;
75640 src += retval;
75641@@ -720,7 +720,7 @@ int ptrace_request(struct task_struct *child, long request,
75642 bool seized = child->ptrace & PT_SEIZED;
75643 int ret = -EIO;
75644 siginfo_t siginfo, *si;
75645- void __user *datavp = (void __user *) data;
75646+ void __user *datavp = (__force void __user *) data;
75647 unsigned long __user *datalp = datavp;
75648 unsigned long flags;
75649
75650@@ -922,14 +922,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
75651 goto out;
75652 }
75653
75654+ if (gr_handle_ptrace(child, request)) {
75655+ ret = -EPERM;
75656+ goto out_put_task_struct;
75657+ }
75658+
75659 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
75660 ret = ptrace_attach(child, request, addr, data);
75661 /*
75662 * Some architectures need to do book-keeping after
75663 * a ptrace attach.
75664 */
75665- if (!ret)
75666+ if (!ret) {
75667 arch_ptrace_attach(child);
75668+ gr_audit_ptrace(child);
75669+ }
75670 goto out_put_task_struct;
75671 }
75672
75673@@ -957,7 +964,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
75674 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
75675 if (copied != sizeof(tmp))
75676 return -EIO;
75677- return put_user(tmp, (unsigned long __user *)data);
75678+ return put_user(tmp, (__force unsigned long __user *)data);
75679 }
75680
75681 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
75682@@ -1067,14 +1074,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
75683 goto out;
75684 }
75685
75686+ if (gr_handle_ptrace(child, request)) {
75687+ ret = -EPERM;
75688+ goto out_put_task_struct;
75689+ }
75690+
75691 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
75692 ret = ptrace_attach(child, request, addr, data);
75693 /*
75694 * Some architectures need to do book-keeping after
75695 * a ptrace attach.
75696 */
75697- if (!ret)
75698+ if (!ret) {
75699 arch_ptrace_attach(child);
75700+ gr_audit_ptrace(child);
75701+ }
75702 goto out_put_task_struct;
75703 }
75704
75705diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
75706index e7dce58..ad0d7b7 100644
75707--- a/kernel/rcutiny.c
75708+++ b/kernel/rcutiny.c
75709@@ -46,7 +46,7 @@
75710 struct rcu_ctrlblk;
75711 static void invoke_rcu_callbacks(void);
75712 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
75713-static void rcu_process_callbacks(struct softirq_action *unused);
75714+static void rcu_process_callbacks(void);
75715 static void __call_rcu(struct rcu_head *head,
75716 void (*func)(struct rcu_head *rcu),
75717 struct rcu_ctrlblk *rcp);
75718@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
75719 rcu_is_callbacks_kthread()));
75720 }
75721
75722-static void rcu_process_callbacks(struct softirq_action *unused)
75723+static void rcu_process_callbacks(void)
75724 {
75725 __rcu_process_callbacks(&rcu_sched_ctrlblk);
75726 __rcu_process_callbacks(&rcu_bh_ctrlblk);
75727diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
75728index f85016a..91cb03b 100644
75729--- a/kernel/rcutiny_plugin.h
75730+++ b/kernel/rcutiny_plugin.h
75731@@ -896,7 +896,7 @@ static int rcu_kthread(void *arg)
75732 have_rcu_kthread_work = morework;
75733 local_irq_restore(flags);
75734 if (work)
75735- rcu_process_callbacks(NULL);
75736+ rcu_process_callbacks();
75737 schedule_timeout_interruptible(1); /* Leave CPU for others. */
75738 }
75739
75740diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
75741index 31dea01..ad91ffb 100644
75742--- a/kernel/rcutorture.c
75743+++ b/kernel/rcutorture.c
75744@@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
75745 { 0 };
75746 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
75747 { 0 };
75748-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
75749-static atomic_t n_rcu_torture_alloc;
75750-static atomic_t n_rcu_torture_alloc_fail;
75751-static atomic_t n_rcu_torture_free;
75752-static atomic_t n_rcu_torture_mberror;
75753-static atomic_t n_rcu_torture_error;
75754+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
75755+static atomic_unchecked_t n_rcu_torture_alloc;
75756+static atomic_unchecked_t n_rcu_torture_alloc_fail;
75757+static atomic_unchecked_t n_rcu_torture_free;
75758+static atomic_unchecked_t n_rcu_torture_mberror;
75759+static atomic_unchecked_t n_rcu_torture_error;
75760 static long n_rcu_torture_barrier_error;
75761 static long n_rcu_torture_boost_ktrerror;
75762 static long n_rcu_torture_boost_rterror;
75763@@ -272,11 +272,11 @@ rcu_torture_alloc(void)
75764
75765 spin_lock_bh(&rcu_torture_lock);
75766 if (list_empty(&rcu_torture_freelist)) {
75767- atomic_inc(&n_rcu_torture_alloc_fail);
75768+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
75769 spin_unlock_bh(&rcu_torture_lock);
75770 return NULL;
75771 }
75772- atomic_inc(&n_rcu_torture_alloc);
75773+ atomic_inc_unchecked(&n_rcu_torture_alloc);
75774 p = rcu_torture_freelist.next;
75775 list_del_init(p);
75776 spin_unlock_bh(&rcu_torture_lock);
75777@@ -289,7 +289,7 @@ rcu_torture_alloc(void)
75778 static void
75779 rcu_torture_free(struct rcu_torture *p)
75780 {
75781- atomic_inc(&n_rcu_torture_free);
75782+ atomic_inc_unchecked(&n_rcu_torture_free);
75783 spin_lock_bh(&rcu_torture_lock);
75784 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
75785 spin_unlock_bh(&rcu_torture_lock);
75786@@ -409,7 +409,7 @@ rcu_torture_cb(struct rcu_head *p)
75787 i = rp->rtort_pipe_count;
75788 if (i > RCU_TORTURE_PIPE_LEN)
75789 i = RCU_TORTURE_PIPE_LEN;
75790- atomic_inc(&rcu_torture_wcount[i]);
75791+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
75792 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
75793 rp->rtort_mbtest = 0;
75794 rcu_torture_free(rp);
75795@@ -457,7 +457,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
75796 i = rp->rtort_pipe_count;
75797 if (i > RCU_TORTURE_PIPE_LEN)
75798 i = RCU_TORTURE_PIPE_LEN;
75799- atomic_inc(&rcu_torture_wcount[i]);
75800+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
75801 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
75802 rp->rtort_mbtest = 0;
75803 list_del(&rp->rtort_free);
75804@@ -975,7 +975,7 @@ rcu_torture_writer(void *arg)
75805 i = old_rp->rtort_pipe_count;
75806 if (i > RCU_TORTURE_PIPE_LEN)
75807 i = RCU_TORTURE_PIPE_LEN;
75808- atomic_inc(&rcu_torture_wcount[i]);
75809+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
75810 old_rp->rtort_pipe_count++;
75811 cur_ops->deferred_free(old_rp);
75812 }
75813@@ -1060,7 +1060,7 @@ static void rcu_torture_timer(unsigned long unused)
75814 }
75815 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
75816 if (p->rtort_mbtest == 0)
75817- atomic_inc(&n_rcu_torture_mberror);
75818+ atomic_inc_unchecked(&n_rcu_torture_mberror);
75819 spin_lock(&rand_lock);
75820 cur_ops->read_delay(&rand);
75821 n_rcu_torture_timers++;
75822@@ -1124,7 +1124,7 @@ rcu_torture_reader(void *arg)
75823 }
75824 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
75825 if (p->rtort_mbtest == 0)
75826- atomic_inc(&n_rcu_torture_mberror);
75827+ atomic_inc_unchecked(&n_rcu_torture_mberror);
75828 cur_ops->read_delay(&rand);
75829 preempt_disable();
75830 pipe_count = p->rtort_pipe_count;
75831@@ -1183,11 +1183,11 @@ rcu_torture_printk(char *page)
75832 rcu_torture_current,
75833 rcu_torture_current_version,
75834 list_empty(&rcu_torture_freelist),
75835- atomic_read(&n_rcu_torture_alloc),
75836- atomic_read(&n_rcu_torture_alloc_fail),
75837- atomic_read(&n_rcu_torture_free));
75838+ atomic_read_unchecked(&n_rcu_torture_alloc),
75839+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
75840+ atomic_read_unchecked(&n_rcu_torture_free));
75841 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
75842- atomic_read(&n_rcu_torture_mberror),
75843+ atomic_read_unchecked(&n_rcu_torture_mberror),
75844 n_rcu_torture_boost_ktrerror,
75845 n_rcu_torture_boost_rterror);
75846 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
75847@@ -1206,14 +1206,14 @@ rcu_torture_printk(char *page)
75848 n_barrier_attempts,
75849 n_rcu_torture_barrier_error);
75850 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
75851- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
75852+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
75853 n_rcu_torture_barrier_error != 0 ||
75854 n_rcu_torture_boost_ktrerror != 0 ||
75855 n_rcu_torture_boost_rterror != 0 ||
75856 n_rcu_torture_boost_failure != 0 ||
75857 i > 1) {
75858 cnt += sprintf(&page[cnt], "!!! ");
75859- atomic_inc(&n_rcu_torture_error);
75860+ atomic_inc_unchecked(&n_rcu_torture_error);
75861 WARN_ON_ONCE(1);
75862 }
75863 cnt += sprintf(&page[cnt], "Reader Pipe: ");
75864@@ -1227,7 +1227,7 @@ rcu_torture_printk(char *page)
75865 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
75866 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75867 cnt += sprintf(&page[cnt], " %d",
75868- atomic_read(&rcu_torture_wcount[i]));
75869+ atomic_read_unchecked(&rcu_torture_wcount[i]));
75870 }
75871 cnt += sprintf(&page[cnt], "\n");
75872 if (cur_ops->stats)
75873@@ -1920,7 +1920,7 @@ rcu_torture_cleanup(void)
75874
75875 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
75876
75877- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
75878+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
75879 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
75880 else if (n_online_successes != n_online_attempts ||
75881 n_offline_successes != n_offline_attempts)
75882@@ -1989,18 +1989,18 @@ rcu_torture_init(void)
75883
75884 rcu_torture_current = NULL;
75885 rcu_torture_current_version = 0;
75886- atomic_set(&n_rcu_torture_alloc, 0);
75887- atomic_set(&n_rcu_torture_alloc_fail, 0);
75888- atomic_set(&n_rcu_torture_free, 0);
75889- atomic_set(&n_rcu_torture_mberror, 0);
75890- atomic_set(&n_rcu_torture_error, 0);
75891+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
75892+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
75893+ atomic_set_unchecked(&n_rcu_torture_free, 0);
75894+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
75895+ atomic_set_unchecked(&n_rcu_torture_error, 0);
75896 n_rcu_torture_barrier_error = 0;
75897 n_rcu_torture_boost_ktrerror = 0;
75898 n_rcu_torture_boost_rterror = 0;
75899 n_rcu_torture_boost_failure = 0;
75900 n_rcu_torture_boosts = 0;
75901 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
75902- atomic_set(&rcu_torture_wcount[i], 0);
75903+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
75904 for_each_possible_cpu(cpu) {
75905 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75906 per_cpu(rcu_torture_count, cpu)[i] = 0;
75907diff --git a/kernel/rcutree.c b/kernel/rcutree.c
75908index e441b77..dd54f17 100644
75909--- a/kernel/rcutree.c
75910+++ b/kernel/rcutree.c
75911@@ -349,9 +349,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
75912 rcu_prepare_for_idle(smp_processor_id());
75913 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
75914 smp_mb__before_atomic_inc(); /* See above. */
75915- atomic_inc(&rdtp->dynticks);
75916+ atomic_inc_unchecked(&rdtp->dynticks);
75917 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
75918- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
75919+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
75920
75921 /*
75922 * It is illegal to enter an extended quiescent state while
75923@@ -487,10 +487,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
75924 int user)
75925 {
75926 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
75927- atomic_inc(&rdtp->dynticks);
75928+ atomic_inc_unchecked(&rdtp->dynticks);
75929 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
75930 smp_mb__after_atomic_inc(); /* See above. */
75931- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
75932+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
75933 rcu_cleanup_after_idle(smp_processor_id());
75934 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
75935 if (!user && !is_idle_task(current)) {
75936@@ -629,14 +629,14 @@ void rcu_nmi_enter(void)
75937 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
75938
75939 if (rdtp->dynticks_nmi_nesting == 0 &&
75940- (atomic_read(&rdtp->dynticks) & 0x1))
75941+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
75942 return;
75943 rdtp->dynticks_nmi_nesting++;
75944 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
75945- atomic_inc(&rdtp->dynticks);
75946+ atomic_inc_unchecked(&rdtp->dynticks);
75947 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
75948 smp_mb__after_atomic_inc(); /* See above. */
75949- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
75950+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
75951 }
75952
75953 /**
75954@@ -655,9 +655,9 @@ void rcu_nmi_exit(void)
75955 return;
75956 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
75957 smp_mb__before_atomic_inc(); /* See above. */
75958- atomic_inc(&rdtp->dynticks);
75959+ atomic_inc_unchecked(&rdtp->dynticks);
75960 smp_mb__after_atomic_inc(); /* Force delay to next write. */
75961- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
75962+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
75963 }
75964
75965 /**
75966@@ -671,7 +671,7 @@ int rcu_is_cpu_idle(void)
75967 int ret;
75968
75969 preempt_disable();
75970- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
75971+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
75972 preempt_enable();
75973 return ret;
75974 }
75975@@ -739,7 +739,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
75976 */
75977 static int dyntick_save_progress_counter(struct rcu_data *rdp)
75978 {
75979- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
75980+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
75981 return (rdp->dynticks_snap & 0x1) == 0;
75982 }
75983
75984@@ -754,7 +754,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
75985 unsigned int curr;
75986 unsigned int snap;
75987
75988- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
75989+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
75990 snap = (unsigned int)rdp->dynticks_snap;
75991
75992 /*
75993@@ -802,10 +802,10 @@ static int jiffies_till_stall_check(void)
75994 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
75995 */
75996 if (till_stall_check < 3) {
75997- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
75998+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
75999 till_stall_check = 3;
76000 } else if (till_stall_check > 300) {
76001- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
76002+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
76003 till_stall_check = 300;
76004 }
76005 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
76006@@ -1592,7 +1592,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
76007 rsp->qlen += rdp->qlen;
76008 rdp->n_cbs_orphaned += rdp->qlen;
76009 rdp->qlen_lazy = 0;
76010- ACCESS_ONCE(rdp->qlen) = 0;
76011+ ACCESS_ONCE_RW(rdp->qlen) = 0;
76012 }
76013
76014 /*
76015@@ -1838,7 +1838,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
76016 }
76017 smp_mb(); /* List handling before counting for rcu_barrier(). */
76018 rdp->qlen_lazy -= count_lazy;
76019- ACCESS_ONCE(rdp->qlen) -= count;
76020+ ACCESS_ONCE_RW(rdp->qlen) -= count;
76021 rdp->n_cbs_invoked += count;
76022
76023 /* Reinstate batch limit if we have worked down the excess. */
76024@@ -2031,7 +2031,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
76025 /*
76026 * Do RCU core processing for the current CPU.
76027 */
76028-static void rcu_process_callbacks(struct softirq_action *unused)
76029+static void rcu_process_callbacks(void)
76030 {
76031 struct rcu_state *rsp;
76032
76033@@ -2154,7 +2154,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
76034 local_irq_restore(flags);
76035 return;
76036 }
76037- ACCESS_ONCE(rdp->qlen)++;
76038+ ACCESS_ONCE_RW(rdp->qlen)++;
76039 if (lazy)
76040 rdp->qlen_lazy++;
76041 else
76042@@ -2363,11 +2363,11 @@ void synchronize_sched_expedited(void)
76043 * counter wrap on a 32-bit system. Quite a few more CPUs would of
76044 * course be required on a 64-bit system.
76045 */
76046- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
76047+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
76048 (ulong)atomic_long_read(&rsp->expedited_done) +
76049 ULONG_MAX / 8)) {
76050 synchronize_sched();
76051- atomic_long_inc(&rsp->expedited_wrap);
76052+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
76053 return;
76054 }
76055
76056@@ -2375,7 +2375,7 @@ void synchronize_sched_expedited(void)
76057 * Take a ticket. Note that atomic_inc_return() implies a
76058 * full memory barrier.
76059 */
76060- snap = atomic_long_inc_return(&rsp->expedited_start);
76061+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
76062 firstsnap = snap;
76063 get_online_cpus();
76064 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
76065@@ -2388,14 +2388,14 @@ void synchronize_sched_expedited(void)
76066 synchronize_sched_expedited_cpu_stop,
76067 NULL) == -EAGAIN) {
76068 put_online_cpus();
76069- atomic_long_inc(&rsp->expedited_tryfail);
76070+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
76071
76072 /* Check to see if someone else did our work for us. */
76073 s = atomic_long_read(&rsp->expedited_done);
76074 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
76075 /* ensure test happens before caller kfree */
76076 smp_mb__before_atomic_inc(); /* ^^^ */
76077- atomic_long_inc(&rsp->expedited_workdone1);
76078+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
76079 return;
76080 }
76081
76082@@ -2404,7 +2404,7 @@ void synchronize_sched_expedited(void)
76083 udelay(trycount * num_online_cpus());
76084 } else {
76085 wait_rcu_gp(call_rcu_sched);
76086- atomic_long_inc(&rsp->expedited_normal);
76087+ atomic_long_inc_unchecked(&rsp->expedited_normal);
76088 return;
76089 }
76090
76091@@ -2413,7 +2413,7 @@ void synchronize_sched_expedited(void)
76092 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
76093 /* ensure test happens before caller kfree */
76094 smp_mb__before_atomic_inc(); /* ^^^ */
76095- atomic_long_inc(&rsp->expedited_workdone2);
76096+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
76097 return;
76098 }
76099
76100@@ -2425,10 +2425,10 @@ void synchronize_sched_expedited(void)
76101 * period works for us.
76102 */
76103 get_online_cpus();
76104- snap = atomic_long_read(&rsp->expedited_start);
76105+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
76106 smp_mb(); /* ensure read is before try_stop_cpus(). */
76107 }
76108- atomic_long_inc(&rsp->expedited_stoppedcpus);
76109+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
76110
76111 /*
76112 * Everyone up to our most recent fetch is covered by our grace
76113@@ -2437,16 +2437,16 @@ void synchronize_sched_expedited(void)
76114 * than we did already did their update.
76115 */
76116 do {
76117- atomic_long_inc(&rsp->expedited_done_tries);
76118+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
76119 s = atomic_long_read(&rsp->expedited_done);
76120 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
76121 /* ensure test happens before caller kfree */
76122 smp_mb__before_atomic_inc(); /* ^^^ */
76123- atomic_long_inc(&rsp->expedited_done_lost);
76124+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
76125 break;
76126 }
76127 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
76128- atomic_long_inc(&rsp->expedited_done_exit);
76129+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
76130
76131 put_online_cpus();
76132 }
76133@@ -2620,7 +2620,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
76134 * ACCESS_ONCE() to prevent the compiler from speculating
76135 * the increment to precede the early-exit check.
76136 */
76137- ACCESS_ONCE(rsp->n_barrier_done)++;
76138+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
76139 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
76140 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
76141 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
76142@@ -2670,7 +2670,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
76143
76144 /* Increment ->n_barrier_done to prevent duplicate work. */
76145 smp_mb(); /* Keep increment after above mechanism. */
76146- ACCESS_ONCE(rsp->n_barrier_done)++;
76147+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
76148 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
76149 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
76150 smp_mb(); /* Keep increment before caller's subsequent code. */
76151@@ -2715,10 +2715,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
76152 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
76153 init_callback_list(rdp);
76154 rdp->qlen_lazy = 0;
76155- ACCESS_ONCE(rdp->qlen) = 0;
76156+ ACCESS_ONCE_RW(rdp->qlen) = 0;
76157 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
76158 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
76159- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
76160+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
76161 #ifdef CONFIG_RCU_USER_QS
76162 WARN_ON_ONCE(rdp->dynticks->in_user);
76163 #endif
76164@@ -2754,8 +2754,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
76165 rdp->blimit = blimit;
76166 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
76167 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
76168- atomic_set(&rdp->dynticks->dynticks,
76169- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
76170+ atomic_set_unchecked(&rdp->dynticks->dynticks,
76171+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
76172 rcu_prepare_for_idle_init(cpu);
76173 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
76174
76175diff --git a/kernel/rcutree.h b/kernel/rcutree.h
76176index 4b69291..704c92e 100644
76177--- a/kernel/rcutree.h
76178+++ b/kernel/rcutree.h
76179@@ -86,7 +86,7 @@ struct rcu_dynticks {
76180 long long dynticks_nesting; /* Track irq/process nesting level. */
76181 /* Process level is worth LLONG_MAX/2. */
76182 int dynticks_nmi_nesting; /* Track NMI nesting level. */
76183- atomic_t dynticks; /* Even value for idle, else odd. */
76184+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
76185 #ifdef CONFIG_RCU_FAST_NO_HZ
76186 int dyntick_drain; /* Prepare-for-idle state variable. */
76187 unsigned long dyntick_holdoff;
76188@@ -423,17 +423,17 @@ struct rcu_state {
76189 /* _rcu_barrier(). */
76190 /* End of fields guarded by barrier_mutex. */
76191
76192- atomic_long_t expedited_start; /* Starting ticket. */
76193- atomic_long_t expedited_done; /* Done ticket. */
76194- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
76195- atomic_long_t expedited_tryfail; /* # acquisition failures. */
76196- atomic_long_t expedited_workdone1; /* # done by others #1. */
76197- atomic_long_t expedited_workdone2; /* # done by others #2. */
76198- atomic_long_t expedited_normal; /* # fallbacks to normal. */
76199- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
76200- atomic_long_t expedited_done_tries; /* # tries to update _done. */
76201- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
76202- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
76203+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
76204+ atomic_long_t expedited_done; /* Done ticket. */
76205+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
76206+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
76207+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
76208+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
76209+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
76210+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
76211+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
76212+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
76213+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
76214
76215 unsigned long jiffies_force_qs; /* Time at which to invoke */
76216 /* force_quiescent_state(). */
76217diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
76218index c1cc7e1..f62e436 100644
76219--- a/kernel/rcutree_plugin.h
76220+++ b/kernel/rcutree_plugin.h
76221@@ -892,7 +892,7 @@ void synchronize_rcu_expedited(void)
76222
76223 /* Clean up and exit. */
76224 smp_mb(); /* ensure expedited GP seen before counter increment. */
76225- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
76226+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
76227 unlock_mb_ret:
76228 mutex_unlock(&sync_rcu_preempt_exp_mutex);
76229 mb_ret:
76230@@ -1440,7 +1440,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
76231 free_cpumask_var(cm);
76232 }
76233
76234-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
76235+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
76236 .store = &rcu_cpu_kthread_task,
76237 .thread_should_run = rcu_cpu_kthread_should_run,
76238 .thread_fn = rcu_cpu_kthread,
76239@@ -2072,7 +2072,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
76240 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
76241 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
76242 cpu, ticks_value, ticks_title,
76243- atomic_read(&rdtp->dynticks) & 0xfff,
76244+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
76245 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
76246 fast_no_hz);
76247 }
76248@@ -2192,7 +2192,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
76249
76250 /* Enqueue the callback on the nocb list and update counts. */
76251 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
76252- ACCESS_ONCE(*old_rhpp) = rhp;
76253+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
76254 atomic_long_add(rhcount, &rdp->nocb_q_count);
76255 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
76256
76257@@ -2384,12 +2384,12 @@ static int rcu_nocb_kthread(void *arg)
76258 * Extract queued callbacks, update counts, and wait
76259 * for a grace period to elapse.
76260 */
76261- ACCESS_ONCE(rdp->nocb_head) = NULL;
76262+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
76263 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
76264 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
76265 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
76266- ACCESS_ONCE(rdp->nocb_p_count) += c;
76267- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
76268+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
76269+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
76270 wait_rcu_gp(rdp->rsp->call_remote);
76271
76272 /* Each pass through the following loop invokes a callback. */
76273@@ -2411,8 +2411,8 @@ static int rcu_nocb_kthread(void *arg)
76274 list = next;
76275 }
76276 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
76277- ACCESS_ONCE(rdp->nocb_p_count) -= c;
76278- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
76279+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
76280+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
76281 rdp->n_nocbs_invoked += c;
76282 }
76283 return 0;
76284@@ -2438,7 +2438,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
76285 rdp = per_cpu_ptr(rsp->rda, cpu);
76286 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
76287 BUG_ON(IS_ERR(t));
76288- ACCESS_ONCE(rdp->nocb_kthread) = t;
76289+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
76290 }
76291 }
76292
76293diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
76294index 0d095dc..1985b19 100644
76295--- a/kernel/rcutree_trace.c
76296+++ b/kernel/rcutree_trace.c
76297@@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
76298 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
76299 rdp->passed_quiesce, rdp->qs_pending);
76300 seq_printf(m, " dt=%d/%llx/%d df=%lu",
76301- atomic_read(&rdp->dynticks->dynticks),
76302+ atomic_read_unchecked(&rdp->dynticks->dynticks),
76303 rdp->dynticks->dynticks_nesting,
76304 rdp->dynticks->dynticks_nmi_nesting,
76305 rdp->dynticks_fqs);
76306@@ -184,17 +184,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
76307 struct rcu_state *rsp = (struct rcu_state *)m->private;
76308
76309 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
76310- atomic_long_read(&rsp->expedited_start),
76311+ atomic_long_read_unchecked(&rsp->expedited_start),
76312 atomic_long_read(&rsp->expedited_done),
76313- atomic_long_read(&rsp->expedited_wrap),
76314- atomic_long_read(&rsp->expedited_tryfail),
76315- atomic_long_read(&rsp->expedited_workdone1),
76316- atomic_long_read(&rsp->expedited_workdone2),
76317- atomic_long_read(&rsp->expedited_normal),
76318- atomic_long_read(&rsp->expedited_stoppedcpus),
76319- atomic_long_read(&rsp->expedited_done_tries),
76320- atomic_long_read(&rsp->expedited_done_lost),
76321- atomic_long_read(&rsp->expedited_done_exit));
76322+ atomic_long_read_unchecked(&rsp->expedited_wrap),
76323+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
76324+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
76325+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
76326+ atomic_long_read_unchecked(&rsp->expedited_normal),
76327+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
76328+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
76329+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
76330+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
76331 return 0;
76332 }
76333
76334diff --git a/kernel/resource.c b/kernel/resource.c
76335index 73f35d4..4684fc4 100644
76336--- a/kernel/resource.c
76337+++ b/kernel/resource.c
76338@@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
76339
76340 static int __init ioresources_init(void)
76341 {
76342+#ifdef CONFIG_GRKERNSEC_PROC_ADD
76343+#ifdef CONFIG_GRKERNSEC_PROC_USER
76344+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
76345+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
76346+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
76347+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
76348+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
76349+#endif
76350+#else
76351 proc_create("ioports", 0, NULL, &proc_ioports_operations);
76352 proc_create("iomem", 0, NULL, &proc_iomem_operations);
76353+#endif
76354 return 0;
76355 }
76356 __initcall(ioresources_init);
76357diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
76358index 98ec494..4241d6d 100644
76359--- a/kernel/rtmutex-tester.c
76360+++ b/kernel/rtmutex-tester.c
76361@@ -20,7 +20,7 @@
76362 #define MAX_RT_TEST_MUTEXES 8
76363
76364 static spinlock_t rttest_lock;
76365-static atomic_t rttest_event;
76366+static atomic_unchecked_t rttest_event;
76367
76368 struct test_thread_data {
76369 int opcode;
76370@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
76371
76372 case RTTEST_LOCKCONT:
76373 td->mutexes[td->opdata] = 1;
76374- td->event = atomic_add_return(1, &rttest_event);
76375+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76376 return 0;
76377
76378 case RTTEST_RESET:
76379@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
76380 return 0;
76381
76382 case RTTEST_RESETEVENT:
76383- atomic_set(&rttest_event, 0);
76384+ atomic_set_unchecked(&rttest_event, 0);
76385 return 0;
76386
76387 default:
76388@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
76389 return ret;
76390
76391 td->mutexes[id] = 1;
76392- td->event = atomic_add_return(1, &rttest_event);
76393+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76394 rt_mutex_lock(&mutexes[id]);
76395- td->event = atomic_add_return(1, &rttest_event);
76396+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76397 td->mutexes[id] = 4;
76398 return 0;
76399
76400@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
76401 return ret;
76402
76403 td->mutexes[id] = 1;
76404- td->event = atomic_add_return(1, &rttest_event);
76405+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76406 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
76407- td->event = atomic_add_return(1, &rttest_event);
76408+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76409 td->mutexes[id] = ret ? 0 : 4;
76410 return ret ? -EINTR : 0;
76411
76412@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
76413 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
76414 return ret;
76415
76416- td->event = atomic_add_return(1, &rttest_event);
76417+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76418 rt_mutex_unlock(&mutexes[id]);
76419- td->event = atomic_add_return(1, &rttest_event);
76420+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76421 td->mutexes[id] = 0;
76422 return 0;
76423
76424@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
76425 break;
76426
76427 td->mutexes[dat] = 2;
76428- td->event = atomic_add_return(1, &rttest_event);
76429+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76430 break;
76431
76432 default:
76433@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
76434 return;
76435
76436 td->mutexes[dat] = 3;
76437- td->event = atomic_add_return(1, &rttest_event);
76438+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76439 break;
76440
76441 case RTTEST_LOCKNOWAIT:
76442@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
76443 return;
76444
76445 td->mutexes[dat] = 1;
76446- td->event = atomic_add_return(1, &rttest_event);
76447+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76448 return;
76449
76450 default:
76451diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
76452index 0984a21..939f183 100644
76453--- a/kernel/sched/auto_group.c
76454+++ b/kernel/sched/auto_group.c
76455@@ -11,7 +11,7 @@
76456
76457 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
76458 static struct autogroup autogroup_default;
76459-static atomic_t autogroup_seq_nr;
76460+static atomic_unchecked_t autogroup_seq_nr;
76461
76462 void __init autogroup_init(struct task_struct *init_task)
76463 {
76464@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
76465
76466 kref_init(&ag->kref);
76467 init_rwsem(&ag->lock);
76468- ag->id = atomic_inc_return(&autogroup_seq_nr);
76469+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
76470 ag->tg = tg;
76471 #ifdef CONFIG_RT_GROUP_SCHED
76472 /*
76473diff --git a/kernel/sched/core.c b/kernel/sched/core.c
76474index 26058d0..f9d3c76 100644
76475--- a/kernel/sched/core.c
76476+++ b/kernel/sched/core.c
76477@@ -3631,6 +3631,8 @@ int can_nice(const struct task_struct *p, const int nice)
76478 /* convert nice value [19,-20] to rlimit style value [1,40] */
76479 int nice_rlim = 20 - nice;
76480
76481+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
76482+
76483 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
76484 capable(CAP_SYS_NICE));
76485 }
76486@@ -3664,7 +3666,8 @@ SYSCALL_DEFINE1(nice, int, increment)
76487 if (nice > 19)
76488 nice = 19;
76489
76490- if (increment < 0 && !can_nice(current, nice))
76491+ if (increment < 0 && (!can_nice(current, nice) ||
76492+ gr_handle_chroot_nice()))
76493 return -EPERM;
76494
76495 retval = security_task_setnice(current, nice);
76496@@ -3818,6 +3821,7 @@ recheck:
76497 unsigned long rlim_rtprio =
76498 task_rlimit(p, RLIMIT_RTPRIO);
76499
76500+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
76501 /* can't set/change the rt policy */
76502 if (policy != p->policy && !rlim_rtprio)
76503 return -EPERM;
76504@@ -4901,7 +4905,7 @@ static void migrate_tasks(unsigned int dead_cpu)
76505
76506 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
76507
76508-static struct ctl_table sd_ctl_dir[] = {
76509+static ctl_table_no_const sd_ctl_dir[] __read_only = {
76510 {
76511 .procname = "sched_domain",
76512 .mode = 0555,
76513@@ -4918,17 +4922,17 @@ static struct ctl_table sd_ctl_root[] = {
76514 {}
76515 };
76516
76517-static struct ctl_table *sd_alloc_ctl_entry(int n)
76518+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
76519 {
76520- struct ctl_table *entry =
76521+ ctl_table_no_const *entry =
76522 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
76523
76524 return entry;
76525 }
76526
76527-static void sd_free_ctl_entry(struct ctl_table **tablep)
76528+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
76529 {
76530- struct ctl_table *entry;
76531+ ctl_table_no_const *entry;
76532
76533 /*
76534 * In the intermediate directories, both the child directory and
76535@@ -4936,22 +4940,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
76536 * will always be set. In the lowest directory the names are
76537 * static strings and all have proc handlers.
76538 */
76539- for (entry = *tablep; entry->mode; entry++) {
76540- if (entry->child)
76541- sd_free_ctl_entry(&entry->child);
76542+ for (entry = tablep; entry->mode; entry++) {
76543+ if (entry->child) {
76544+ sd_free_ctl_entry(entry->child);
76545+ pax_open_kernel();
76546+ entry->child = NULL;
76547+ pax_close_kernel();
76548+ }
76549 if (entry->proc_handler == NULL)
76550 kfree(entry->procname);
76551 }
76552
76553- kfree(*tablep);
76554- *tablep = NULL;
76555+ kfree(tablep);
76556 }
76557
76558 static int min_load_idx = 0;
76559 static int max_load_idx = CPU_LOAD_IDX_MAX;
76560
76561 static void
76562-set_table_entry(struct ctl_table *entry,
76563+set_table_entry(ctl_table_no_const *entry,
76564 const char *procname, void *data, int maxlen,
76565 umode_t mode, proc_handler *proc_handler,
76566 bool load_idx)
76567@@ -4971,7 +4978,7 @@ set_table_entry(struct ctl_table *entry,
76568 static struct ctl_table *
76569 sd_alloc_ctl_domain_table(struct sched_domain *sd)
76570 {
76571- struct ctl_table *table = sd_alloc_ctl_entry(13);
76572+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
76573
76574 if (table == NULL)
76575 return NULL;
76576@@ -5006,9 +5013,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
76577 return table;
76578 }
76579
76580-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
76581+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
76582 {
76583- struct ctl_table *entry, *table;
76584+ ctl_table_no_const *entry, *table;
76585 struct sched_domain *sd;
76586 int domain_num = 0, i;
76587 char buf[32];
76588@@ -5035,11 +5042,13 @@ static struct ctl_table_header *sd_sysctl_header;
76589 static void register_sched_domain_sysctl(void)
76590 {
76591 int i, cpu_num = num_possible_cpus();
76592- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
76593+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
76594 char buf[32];
76595
76596 WARN_ON(sd_ctl_dir[0].child);
76597+ pax_open_kernel();
76598 sd_ctl_dir[0].child = entry;
76599+ pax_close_kernel();
76600
76601 if (entry == NULL)
76602 return;
76603@@ -5062,8 +5071,12 @@ static void unregister_sched_domain_sysctl(void)
76604 if (sd_sysctl_header)
76605 unregister_sysctl_table(sd_sysctl_header);
76606 sd_sysctl_header = NULL;
76607- if (sd_ctl_dir[0].child)
76608- sd_free_ctl_entry(&sd_ctl_dir[0].child);
76609+ if (sd_ctl_dir[0].child) {
76610+ sd_free_ctl_entry(sd_ctl_dir[0].child);
76611+ pax_open_kernel();
76612+ sd_ctl_dir[0].child = NULL;
76613+ pax_close_kernel();
76614+ }
76615 }
76616 #else
76617 static void register_sched_domain_sysctl(void)
76618@@ -5162,7 +5175,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
76619 * happens before everything else. This has to be lower priority than
76620 * the notifier in the perf_event subsystem, though.
76621 */
76622-static struct notifier_block __cpuinitdata migration_notifier = {
76623+static struct notifier_block migration_notifier = {
76624 .notifier_call = migration_call,
76625 .priority = CPU_PRI_MIGRATION,
76626 };
76627diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
76628index 81fa536..6ccf96a 100644
76629--- a/kernel/sched/fair.c
76630+++ b/kernel/sched/fair.c
76631@@ -830,7 +830,7 @@ void task_numa_fault(int node, int pages, bool migrated)
76632
76633 static void reset_ptenuma_scan(struct task_struct *p)
76634 {
76635- ACCESS_ONCE(p->mm->numa_scan_seq)++;
76636+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
76637 p->mm->numa_scan_offset = 0;
76638 }
76639
76640@@ -3254,25 +3254,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
76641 */
76642 static int select_idle_sibling(struct task_struct *p, int target)
76643 {
76644- int cpu = smp_processor_id();
76645- int prev_cpu = task_cpu(p);
76646 struct sched_domain *sd;
76647 struct sched_group *sg;
76648- int i;
76649+ int i = task_cpu(p);
76650
76651- /*
76652- * If the task is going to be woken-up on this cpu and if it is
76653- * already idle, then it is the right target.
76654- */
76655- if (target == cpu && idle_cpu(cpu))
76656- return cpu;
76657+ if (idle_cpu(target))
76658+ return target;
76659
76660 /*
76661- * If the task is going to be woken-up on the cpu where it previously
76662- * ran and if it is currently idle, then it the right target.
76663+ * If the prevous cpu is cache affine and idle, don't be stupid.
76664 */
76665- if (target == prev_cpu && idle_cpu(prev_cpu))
76666- return prev_cpu;
76667+ if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
76668+ return i;
76669
76670 /*
76671 * Otherwise, iterate the domains and find an elegible idle cpu.
76672@@ -3286,7 +3279,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
76673 goto next;
76674
76675 for_each_cpu(i, sched_group_cpus(sg)) {
76676- if (!idle_cpu(i))
76677+ if (i == target || !idle_cpu(i))
76678 goto next;
76679 }
76680
76681@@ -5663,7 +5656,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
76682 * run_rebalance_domains is triggered when needed from the scheduler tick.
76683 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
76684 */
76685-static void run_rebalance_domains(struct softirq_action *h)
76686+static void run_rebalance_domains(void)
76687 {
76688 int this_cpu = smp_processor_id();
76689 struct rq *this_rq = cpu_rq(this_cpu);
76690diff --git a/kernel/signal.c b/kernel/signal.c
76691index 3d09cf6..a67d2c6 100644
76692--- a/kernel/signal.c
76693+++ b/kernel/signal.c
76694@@ -50,12 +50,12 @@ static struct kmem_cache *sigqueue_cachep;
76695
76696 int print_fatal_signals __read_mostly;
76697
76698-static void __user *sig_handler(struct task_struct *t, int sig)
76699+static __sighandler_t sig_handler(struct task_struct *t, int sig)
76700 {
76701 return t->sighand->action[sig - 1].sa.sa_handler;
76702 }
76703
76704-static int sig_handler_ignored(void __user *handler, int sig)
76705+static int sig_handler_ignored(__sighandler_t handler, int sig)
76706 {
76707 /* Is it explicitly or implicitly ignored? */
76708 return handler == SIG_IGN ||
76709@@ -64,7 +64,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
76710
76711 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
76712 {
76713- void __user *handler;
76714+ __sighandler_t handler;
76715
76716 handler = sig_handler(t, sig);
76717
76718@@ -368,6 +368,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
76719 atomic_inc(&user->sigpending);
76720 rcu_read_unlock();
76721
76722+ if (!override_rlimit)
76723+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
76724+
76725 if (override_rlimit ||
76726 atomic_read(&user->sigpending) <=
76727 task_rlimit(t, RLIMIT_SIGPENDING)) {
76728@@ -492,7 +495,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
76729
76730 int unhandled_signal(struct task_struct *tsk, int sig)
76731 {
76732- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
76733+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
76734 if (is_global_init(tsk))
76735 return 1;
76736 if (handler != SIG_IGN && handler != SIG_DFL)
76737@@ -812,6 +815,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
76738 }
76739 }
76740
76741+ /* allow glibc communication via tgkill to other threads in our
76742+ thread group */
76743+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
76744+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
76745+ && gr_handle_signal(t, sig))
76746+ return -EPERM;
76747+
76748 return security_task_kill(t, info, sig, 0);
76749 }
76750
76751@@ -1194,7 +1204,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
76752 return send_signal(sig, info, p, 1);
76753 }
76754
76755-static int
76756+int
76757 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
76758 {
76759 return send_signal(sig, info, t, 0);
76760@@ -1231,6 +1241,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
76761 unsigned long int flags;
76762 int ret, blocked, ignored;
76763 struct k_sigaction *action;
76764+ int is_unhandled = 0;
76765
76766 spin_lock_irqsave(&t->sighand->siglock, flags);
76767 action = &t->sighand->action[sig-1];
76768@@ -1245,9 +1256,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
76769 }
76770 if (action->sa.sa_handler == SIG_DFL)
76771 t->signal->flags &= ~SIGNAL_UNKILLABLE;
76772+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
76773+ is_unhandled = 1;
76774 ret = specific_send_sig_info(sig, info, t);
76775 spin_unlock_irqrestore(&t->sighand->siglock, flags);
76776
76777+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
76778+ normal operation */
76779+ if (is_unhandled) {
76780+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
76781+ gr_handle_crash(t, sig);
76782+ }
76783+
76784 return ret;
76785 }
76786
76787@@ -1314,8 +1334,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
76788 ret = check_kill_permission(sig, info, p);
76789 rcu_read_unlock();
76790
76791- if (!ret && sig)
76792+ if (!ret && sig) {
76793 ret = do_send_sig_info(sig, info, p, true);
76794+ if (!ret)
76795+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
76796+ }
76797
76798 return ret;
76799 }
76800@@ -2852,7 +2875,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
76801 int error = -ESRCH;
76802
76803 rcu_read_lock();
76804- p = find_task_by_vpid(pid);
76805+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76806+ /* allow glibc communication via tgkill to other threads in our
76807+ thread group */
76808+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
76809+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
76810+ p = find_task_by_vpid_unrestricted(pid);
76811+ else
76812+#endif
76813+ p = find_task_by_vpid(pid);
76814 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
76815 error = check_kill_permission(sig, info, p);
76816 /*
76817@@ -3135,8 +3166,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
76818 }
76819 seg = get_fs();
76820 set_fs(KERNEL_DS);
76821- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
76822- (stack_t __force __user *) &uoss,
76823+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
76824+ (stack_t __force_user *) &uoss,
76825 compat_user_stack_pointer());
76826 set_fs(seg);
76827 if (ret >= 0 && uoss_ptr) {
76828diff --git a/kernel/smp.c b/kernel/smp.c
76829index 69f38bd..77bbf12 100644
76830--- a/kernel/smp.c
76831+++ b/kernel/smp.c
76832@@ -77,7 +77,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
76833 return NOTIFY_OK;
76834 }
76835
76836-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
76837+static struct notifier_block hotplug_cfd_notifier = {
76838 .notifier_call = hotplug_cfd,
76839 };
76840
76841diff --git a/kernel/smpboot.c b/kernel/smpboot.c
76842index d6c5fc0..530560c 100644
76843--- a/kernel/smpboot.c
76844+++ b/kernel/smpboot.c
76845@@ -275,7 +275,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
76846 }
76847 smpboot_unpark_thread(plug_thread, cpu);
76848 }
76849- list_add(&plug_thread->list, &hotplug_threads);
76850+ pax_list_add(&plug_thread->list, &hotplug_threads);
76851 out:
76852 mutex_unlock(&smpboot_threads_lock);
76853 return ret;
76854@@ -292,7 +292,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
76855 {
76856 get_online_cpus();
76857 mutex_lock(&smpboot_threads_lock);
76858- list_del(&plug_thread->list);
76859+ pax_list_del(&plug_thread->list);
76860 smpboot_destroy_threads(plug_thread);
76861 mutex_unlock(&smpboot_threads_lock);
76862 put_online_cpus();
76863diff --git a/kernel/softirq.c b/kernel/softirq.c
76864index ed567ba..e71dabf 100644
76865--- a/kernel/softirq.c
76866+++ b/kernel/softirq.c
76867@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
76868 EXPORT_SYMBOL(irq_stat);
76869 #endif
76870
76871-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
76872+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
76873
76874 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
76875
76876-char *softirq_to_name[NR_SOFTIRQS] = {
76877+const char * const softirq_to_name[NR_SOFTIRQS] = {
76878 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
76879 "TASKLET", "SCHED", "HRTIMER", "RCU"
76880 };
76881@@ -244,7 +244,7 @@ restart:
76882 kstat_incr_softirqs_this_cpu(vec_nr);
76883
76884 trace_softirq_entry(vec_nr);
76885- h->action(h);
76886+ h->action();
76887 trace_softirq_exit(vec_nr);
76888 if (unlikely(prev_count != preempt_count())) {
76889 printk(KERN_ERR "huh, entered softirq %u %s %p"
76890@@ -391,7 +391,7 @@ void __raise_softirq_irqoff(unsigned int nr)
76891 or_softirq_pending(1UL << nr);
76892 }
76893
76894-void open_softirq(int nr, void (*action)(struct softirq_action *))
76895+void __init open_softirq(int nr, void (*action)(void))
76896 {
76897 softirq_vec[nr].action = action;
76898 }
76899@@ -447,7 +447,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
76900
76901 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
76902
76903-static void tasklet_action(struct softirq_action *a)
76904+static void tasklet_action(void)
76905 {
76906 struct tasklet_struct *list;
76907
76908@@ -482,7 +482,7 @@ static void tasklet_action(struct softirq_action *a)
76909 }
76910 }
76911
76912-static void tasklet_hi_action(struct softirq_action *a)
76913+static void tasklet_hi_action(void)
76914 {
76915 struct tasklet_struct *list;
76916
76917@@ -718,7 +718,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
76918 return NOTIFY_OK;
76919 }
76920
76921-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
76922+static struct notifier_block remote_softirq_cpu_notifier = {
76923 .notifier_call = remote_softirq_cpu_notify,
76924 };
76925
76926@@ -835,11 +835,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
76927 return NOTIFY_OK;
76928 }
76929
76930-static struct notifier_block __cpuinitdata cpu_nfb = {
76931+static struct notifier_block cpu_nfb = {
76932 .notifier_call = cpu_callback
76933 };
76934
76935-static struct smp_hotplug_thread softirq_threads = {
76936+static struct smp_hotplug_thread softirq_threads __read_only = {
76937 .store = &ksoftirqd,
76938 .thread_should_run = ksoftirqd_should_run,
76939 .thread_fn = run_ksoftirqd,
76940diff --git a/kernel/srcu.c b/kernel/srcu.c
76941index 2b85982..d52ab26 100644
76942--- a/kernel/srcu.c
76943+++ b/kernel/srcu.c
76944@@ -305,9 +305,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
76945 preempt_disable();
76946 idx = rcu_dereference_index_check(sp->completed,
76947 rcu_read_lock_sched_held()) & 0x1;
76948- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
76949+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
76950 smp_mb(); /* B */ /* Avoid leaking the critical section. */
76951- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
76952+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
76953 preempt_enable();
76954 return idx;
76955 }
76956@@ -323,7 +323,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
76957 {
76958 preempt_disable();
76959 smp_mb(); /* C */ /* Avoid leaking the critical section. */
76960- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
76961+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
76962 preempt_enable();
76963 }
76964 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
76965diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
76966index 2f194e9..2c05ea9 100644
76967--- a/kernel/stop_machine.c
76968+++ b/kernel/stop_machine.c
76969@@ -362,7 +362,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
76970 * cpu notifiers. It currently shares the same priority as sched
76971 * migration_notifier.
76972 */
76973-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
76974+static struct notifier_block cpu_stop_cpu_notifier = {
76975 .notifier_call = cpu_stop_cpu_callback,
76976 .priority = 10,
76977 };
76978diff --git a/kernel/sys.c b/kernel/sys.c
76979index 265b376..4e42ef5 100644
76980--- a/kernel/sys.c
76981+++ b/kernel/sys.c
76982@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
76983 error = -EACCES;
76984 goto out;
76985 }
76986+
76987+ if (gr_handle_chroot_setpriority(p, niceval)) {
76988+ error = -EACCES;
76989+ goto out;
76990+ }
76991+
76992 no_nice = security_task_setnice(p, niceval);
76993 if (no_nice) {
76994 error = no_nice;
76995@@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
76996 goto error;
76997 }
76998
76999+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
77000+ goto error;
77001+
77002 if (rgid != (gid_t) -1 ||
77003 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
77004 new->sgid = new->egid;
77005@@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
77006 old = current_cred();
77007
77008 retval = -EPERM;
77009+
77010+ if (gr_check_group_change(kgid, kgid, kgid))
77011+ goto error;
77012+
77013 if (nsown_capable(CAP_SETGID))
77014 new->gid = new->egid = new->sgid = new->fsgid = kgid;
77015 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
77016@@ -647,7 +660,7 @@ error:
77017 /*
77018 * change the user struct in a credentials set to match the new UID
77019 */
77020-static int set_user(struct cred *new)
77021+int set_user(struct cred *new)
77022 {
77023 struct user_struct *new_user;
77024
77025@@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
77026 goto error;
77027 }
77028
77029+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
77030+ goto error;
77031+
77032 if (!uid_eq(new->uid, old->uid)) {
77033 retval = set_user(new);
77034 if (retval < 0)
77035@@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
77036 old = current_cred();
77037
77038 retval = -EPERM;
77039+
77040+ if (gr_check_crash_uid(kuid))
77041+ goto error;
77042+ if (gr_check_user_change(kuid, kuid, kuid))
77043+ goto error;
77044+
77045 if (nsown_capable(CAP_SETUID)) {
77046 new->suid = new->uid = kuid;
77047 if (!uid_eq(kuid, old->uid)) {
77048@@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
77049 goto error;
77050 }
77051
77052+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
77053+ goto error;
77054+
77055 if (ruid != (uid_t) -1) {
77056 new->uid = kruid;
77057 if (!uid_eq(kruid, old->uid)) {
77058@@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
77059 goto error;
77060 }
77061
77062+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
77063+ goto error;
77064+
77065 if (rgid != (gid_t) -1)
77066 new->gid = krgid;
77067 if (egid != (gid_t) -1)
77068@@ -981,6 +1009,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
77069 if (!uid_valid(kuid))
77070 return old_fsuid;
77071
77072+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
77073+ goto error;
77074+
77075 new = prepare_creds();
77076 if (!new)
77077 return old_fsuid;
77078@@ -995,6 +1026,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
77079 }
77080 }
77081
77082+error:
77083 abort_creds(new);
77084 return old_fsuid;
77085
77086@@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
77087 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
77088 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
77089 nsown_capable(CAP_SETGID)) {
77090+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
77091+ goto error;
77092+
77093 if (!gid_eq(kgid, old->fsgid)) {
77094 new->fsgid = kgid;
77095 goto change_okay;
77096 }
77097 }
77098
77099+error:
77100 abort_creds(new);
77101 return old_fsgid;
77102
77103@@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
77104 return -EFAULT;
77105
77106 down_read(&uts_sem);
77107- error = __copy_to_user(&name->sysname, &utsname()->sysname,
77108+ error = __copy_to_user(name->sysname, &utsname()->sysname,
77109 __OLD_UTS_LEN);
77110 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
77111- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
77112+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
77113 __OLD_UTS_LEN);
77114 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
77115- error |= __copy_to_user(&name->release, &utsname()->release,
77116+ error |= __copy_to_user(name->release, &utsname()->release,
77117 __OLD_UTS_LEN);
77118 error |= __put_user(0, name->release + __OLD_UTS_LEN);
77119- error |= __copy_to_user(&name->version, &utsname()->version,
77120+ error |= __copy_to_user(name->version, &utsname()->version,
77121 __OLD_UTS_LEN);
77122 error |= __put_user(0, name->version + __OLD_UTS_LEN);
77123- error |= __copy_to_user(&name->machine, &utsname()->machine,
77124+ error |= __copy_to_user(name->machine, &utsname()->machine,
77125 __OLD_UTS_LEN);
77126 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
77127 up_read(&uts_sem);
77128@@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
77129 error = get_dumpable(me->mm);
77130 break;
77131 case PR_SET_DUMPABLE:
77132- if (arg2 < 0 || arg2 > 1) {
77133+ if (arg2 > 1) {
77134 error = -EINVAL;
77135 break;
77136 }
77137diff --git a/kernel/sysctl.c b/kernel/sysctl.c
77138index c88878d..e4fa5d1 100644
77139--- a/kernel/sysctl.c
77140+++ b/kernel/sysctl.c
77141@@ -92,7 +92,6 @@
77142
77143
77144 #if defined(CONFIG_SYSCTL)
77145-
77146 /* External variables not in a header file. */
77147 extern int sysctl_overcommit_memory;
77148 extern int sysctl_overcommit_ratio;
77149@@ -172,10 +171,8 @@ static int proc_taint(struct ctl_table *table, int write,
77150 void __user *buffer, size_t *lenp, loff_t *ppos);
77151 #endif
77152
77153-#ifdef CONFIG_PRINTK
77154 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
77155 void __user *buffer, size_t *lenp, loff_t *ppos);
77156-#endif
77157
77158 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
77159 void __user *buffer, size_t *lenp, loff_t *ppos);
77160@@ -206,6 +203,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
77161
77162 #endif
77163
77164+extern struct ctl_table grsecurity_table[];
77165+
77166 static struct ctl_table kern_table[];
77167 static struct ctl_table vm_table[];
77168 static struct ctl_table fs_table[];
77169@@ -220,6 +219,20 @@ extern struct ctl_table epoll_table[];
77170 int sysctl_legacy_va_layout;
77171 #endif
77172
77173+#ifdef CONFIG_PAX_SOFTMODE
77174+static ctl_table pax_table[] = {
77175+ {
77176+ .procname = "softmode",
77177+ .data = &pax_softmode,
77178+ .maxlen = sizeof(unsigned int),
77179+ .mode = 0600,
77180+ .proc_handler = &proc_dointvec,
77181+ },
77182+
77183+ { }
77184+};
77185+#endif
77186+
77187 /* The default sysctl tables: */
77188
77189 static struct ctl_table sysctl_base_table[] = {
77190@@ -268,6 +281,22 @@ static int max_extfrag_threshold = 1000;
77191 #endif
77192
77193 static struct ctl_table kern_table[] = {
77194+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
77195+ {
77196+ .procname = "grsecurity",
77197+ .mode = 0500,
77198+ .child = grsecurity_table,
77199+ },
77200+#endif
77201+
77202+#ifdef CONFIG_PAX_SOFTMODE
77203+ {
77204+ .procname = "pax",
77205+ .mode = 0500,
77206+ .child = pax_table,
77207+ },
77208+#endif
77209+
77210 {
77211 .procname = "sched_child_runs_first",
77212 .data = &sysctl_sched_child_runs_first,
77213@@ -593,7 +622,7 @@ static struct ctl_table kern_table[] = {
77214 .data = &modprobe_path,
77215 .maxlen = KMOD_PATH_LEN,
77216 .mode = 0644,
77217- .proc_handler = proc_dostring,
77218+ .proc_handler = proc_dostring_modpriv,
77219 },
77220 {
77221 .procname = "modules_disabled",
77222@@ -760,16 +789,20 @@ static struct ctl_table kern_table[] = {
77223 .extra1 = &zero,
77224 .extra2 = &one,
77225 },
77226+#endif
77227 {
77228 .procname = "kptr_restrict",
77229 .data = &kptr_restrict,
77230 .maxlen = sizeof(int),
77231 .mode = 0644,
77232 .proc_handler = proc_dointvec_minmax_sysadmin,
77233+#ifdef CONFIG_GRKERNSEC_HIDESYM
77234+ .extra1 = &two,
77235+#else
77236 .extra1 = &zero,
77237+#endif
77238 .extra2 = &two,
77239 },
77240-#endif
77241 {
77242 .procname = "ngroups_max",
77243 .data = &ngroups_max,
77244@@ -1266,6 +1299,13 @@ static struct ctl_table vm_table[] = {
77245 .proc_handler = proc_dointvec_minmax,
77246 .extra1 = &zero,
77247 },
77248+ {
77249+ .procname = "heap_stack_gap",
77250+ .data = &sysctl_heap_stack_gap,
77251+ .maxlen = sizeof(sysctl_heap_stack_gap),
77252+ .mode = 0644,
77253+ .proc_handler = proc_doulongvec_minmax,
77254+ },
77255 #else
77256 {
77257 .procname = "nr_trim_pages",
77258@@ -1716,6 +1756,16 @@ int proc_dostring(struct ctl_table *table, int write,
77259 buffer, lenp, ppos);
77260 }
77261
77262+int proc_dostring_modpriv(struct ctl_table *table, int write,
77263+ void __user *buffer, size_t *lenp, loff_t *ppos)
77264+{
77265+ if (write && !capable(CAP_SYS_MODULE))
77266+ return -EPERM;
77267+
77268+ return _proc_do_string(table->data, table->maxlen, write,
77269+ buffer, lenp, ppos);
77270+}
77271+
77272 static size_t proc_skip_spaces(char **buf)
77273 {
77274 size_t ret;
77275@@ -1821,6 +1871,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
77276 len = strlen(tmp);
77277 if (len > *size)
77278 len = *size;
77279+ if (len > sizeof(tmp))
77280+ len = sizeof(tmp);
77281 if (copy_to_user(*buf, tmp, len))
77282 return -EFAULT;
77283 *size -= len;
77284@@ -1985,7 +2037,7 @@ int proc_dointvec(struct ctl_table *table, int write,
77285 static int proc_taint(struct ctl_table *table, int write,
77286 void __user *buffer, size_t *lenp, loff_t *ppos)
77287 {
77288- struct ctl_table t;
77289+ ctl_table_no_const t;
77290 unsigned long tmptaint = get_taint();
77291 int err;
77292
77293@@ -2013,7 +2065,6 @@ static int proc_taint(struct ctl_table *table, int write,
77294 return err;
77295 }
77296
77297-#ifdef CONFIG_PRINTK
77298 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
77299 void __user *buffer, size_t *lenp, loff_t *ppos)
77300 {
77301@@ -2022,7 +2073,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
77302
77303 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
77304 }
77305-#endif
77306
77307 struct do_proc_dointvec_minmax_conv_param {
77308 int *min;
77309@@ -2169,8 +2219,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
77310 *i = val;
77311 } else {
77312 val = convdiv * (*i) / convmul;
77313- if (!first)
77314+ if (!first) {
77315 err = proc_put_char(&buffer, &left, '\t');
77316+ if (err)
77317+ break;
77318+ }
77319 err = proc_put_long(&buffer, &left, val, false);
77320 if (err)
77321 break;
77322@@ -2562,6 +2615,12 @@ int proc_dostring(struct ctl_table *table, int write,
77323 return -ENOSYS;
77324 }
77325
77326+int proc_dostring_modpriv(struct ctl_table *table, int write,
77327+ void __user *buffer, size_t *lenp, loff_t *ppos)
77328+{
77329+ return -ENOSYS;
77330+}
77331+
77332 int proc_dointvec(struct ctl_table *table, int write,
77333 void __user *buffer, size_t *lenp, loff_t *ppos)
77334 {
77335@@ -2618,5 +2677,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
77336 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
77337 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
77338 EXPORT_SYMBOL(proc_dostring);
77339+EXPORT_SYMBOL(proc_dostring_modpriv);
77340 EXPORT_SYMBOL(proc_doulongvec_minmax);
77341 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
77342diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
77343index 0ddf3a0..a199f50 100644
77344--- a/kernel/sysctl_binary.c
77345+++ b/kernel/sysctl_binary.c
77346@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
77347 int i;
77348
77349 set_fs(KERNEL_DS);
77350- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
77351+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
77352 set_fs(old_fs);
77353 if (result < 0)
77354 goto out_kfree;
77355@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
77356 }
77357
77358 set_fs(KERNEL_DS);
77359- result = vfs_write(file, buffer, str - buffer, &pos);
77360+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
77361 set_fs(old_fs);
77362 if (result < 0)
77363 goto out_kfree;
77364@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
77365 int i;
77366
77367 set_fs(KERNEL_DS);
77368- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
77369+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
77370 set_fs(old_fs);
77371 if (result < 0)
77372 goto out_kfree;
77373@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
77374 }
77375
77376 set_fs(KERNEL_DS);
77377- result = vfs_write(file, buffer, str - buffer, &pos);
77378+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
77379 set_fs(old_fs);
77380 if (result < 0)
77381 goto out_kfree;
77382@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
77383 int i;
77384
77385 set_fs(KERNEL_DS);
77386- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
77387+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
77388 set_fs(old_fs);
77389 if (result < 0)
77390 goto out;
77391@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
77392 __le16 dnaddr;
77393
77394 set_fs(KERNEL_DS);
77395- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
77396+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
77397 set_fs(old_fs);
77398 if (result < 0)
77399 goto out;
77400@@ -1234,7 +1234,7 @@ static ssize_t bin_dn_node_address(struct file *file,
77401 le16_to_cpu(dnaddr) & 0x3ff);
77402
77403 set_fs(KERNEL_DS);
77404- result = vfs_write(file, buf, len, &pos);
77405+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
77406 set_fs(old_fs);
77407 if (result < 0)
77408 goto out;
77409diff --git a/kernel/taskstats.c b/kernel/taskstats.c
77410index 145bb4d..b2aa969 100644
77411--- a/kernel/taskstats.c
77412+++ b/kernel/taskstats.c
77413@@ -28,9 +28,12 @@
77414 #include <linux/fs.h>
77415 #include <linux/file.h>
77416 #include <linux/pid_namespace.h>
77417+#include <linux/grsecurity.h>
77418 #include <net/genetlink.h>
77419 #include <linux/atomic.h>
77420
77421+extern int gr_is_taskstats_denied(int pid);
77422+
77423 /*
77424 * Maximum length of a cpumask that can be specified in
77425 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
77426@@ -570,6 +573,9 @@ err:
77427
77428 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
77429 {
77430+ if (gr_is_taskstats_denied(current->pid))
77431+ return -EACCES;
77432+
77433 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
77434 return cmd_attr_register_cpumask(info);
77435 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
77436diff --git a/kernel/time.c b/kernel/time.c
77437index d226c6a..c7c0960 100644
77438--- a/kernel/time.c
77439+++ b/kernel/time.c
77440@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
77441 return error;
77442
77443 if (tz) {
77444+ /* we log in do_settimeofday called below, so don't log twice
77445+ */
77446+ if (!tv)
77447+ gr_log_timechange();
77448+
77449 sys_tz = *tz;
77450 update_vsyscall_tz();
77451 if (firsttime) {
77452diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
77453index f11d83b..d016d91 100644
77454--- a/kernel/time/alarmtimer.c
77455+++ b/kernel/time/alarmtimer.c
77456@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
77457 struct platform_device *pdev;
77458 int error = 0;
77459 int i;
77460- struct k_clock alarm_clock = {
77461+ static struct k_clock alarm_clock = {
77462 .clock_getres = alarm_clock_getres,
77463 .clock_get = alarm_clock_get,
77464 .timer_create = alarm_timer_create,
77465diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
77466index f113755..ec24223 100644
77467--- a/kernel/time/tick-broadcast.c
77468+++ b/kernel/time/tick-broadcast.c
77469@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
77470 * then clear the broadcast bit.
77471 */
77472 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
77473- int cpu = smp_processor_id();
77474+ cpu = smp_processor_id();
77475
77476 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
77477 tick_broadcast_clear_oneshot(cpu);
77478diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
77479index cbc6acb..3a77191 100644
77480--- a/kernel/time/timekeeping.c
77481+++ b/kernel/time/timekeeping.c
77482@@ -15,6 +15,7 @@
77483 #include <linux/init.h>
77484 #include <linux/mm.h>
77485 #include <linux/sched.h>
77486+#include <linux/grsecurity.h>
77487 #include <linux/syscore_ops.h>
77488 #include <linux/clocksource.h>
77489 #include <linux/jiffies.h>
77490@@ -412,6 +413,8 @@ int do_settimeofday(const struct timespec *tv)
77491 if (!timespec_valid_strict(tv))
77492 return -EINVAL;
77493
77494+ gr_log_timechange();
77495+
77496 write_seqlock_irqsave(&tk->lock, flags);
77497
77498 timekeeping_forward_now(tk);
77499diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
77500index af5a7e9..715611a 100644
77501--- a/kernel/time/timer_list.c
77502+++ b/kernel/time/timer_list.c
77503@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
77504
77505 static void print_name_offset(struct seq_file *m, void *sym)
77506 {
77507+#ifdef CONFIG_GRKERNSEC_HIDESYM
77508+ SEQ_printf(m, "<%p>", NULL);
77509+#else
77510 char symname[KSYM_NAME_LEN];
77511
77512 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
77513 SEQ_printf(m, "<%pK>", sym);
77514 else
77515 SEQ_printf(m, "%s", symname);
77516+#endif
77517 }
77518
77519 static void
77520@@ -112,7 +116,11 @@ next_one:
77521 static void
77522 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
77523 {
77524+#ifdef CONFIG_GRKERNSEC_HIDESYM
77525+ SEQ_printf(m, " .base: %p\n", NULL);
77526+#else
77527 SEQ_printf(m, " .base: %pK\n", base);
77528+#endif
77529 SEQ_printf(m, " .index: %d\n",
77530 base->index);
77531 SEQ_printf(m, " .resolution: %Lu nsecs\n",
77532@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
77533 {
77534 struct proc_dir_entry *pe;
77535
77536+#ifdef CONFIG_GRKERNSEC_PROC_ADD
77537+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
77538+#else
77539 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
77540+#endif
77541 if (!pe)
77542 return -ENOMEM;
77543 return 0;
77544diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
77545index 0b537f2..40d6c20 100644
77546--- a/kernel/time/timer_stats.c
77547+++ b/kernel/time/timer_stats.c
77548@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
77549 static unsigned long nr_entries;
77550 static struct entry entries[MAX_ENTRIES];
77551
77552-static atomic_t overflow_count;
77553+static atomic_unchecked_t overflow_count;
77554
77555 /*
77556 * The entries are in a hash-table, for fast lookup:
77557@@ -140,7 +140,7 @@ static void reset_entries(void)
77558 nr_entries = 0;
77559 memset(entries, 0, sizeof(entries));
77560 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
77561- atomic_set(&overflow_count, 0);
77562+ atomic_set_unchecked(&overflow_count, 0);
77563 }
77564
77565 static struct entry *alloc_entry(void)
77566@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
77567 if (likely(entry))
77568 entry->count++;
77569 else
77570- atomic_inc(&overflow_count);
77571+ atomic_inc_unchecked(&overflow_count);
77572
77573 out_unlock:
77574 raw_spin_unlock_irqrestore(lock, flags);
77575@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
77576
77577 static void print_name_offset(struct seq_file *m, unsigned long addr)
77578 {
77579+#ifdef CONFIG_GRKERNSEC_HIDESYM
77580+ seq_printf(m, "<%p>", NULL);
77581+#else
77582 char symname[KSYM_NAME_LEN];
77583
77584 if (lookup_symbol_name(addr, symname) < 0)
77585- seq_printf(m, "<%p>", (void *)addr);
77586+ seq_printf(m, "<%pK>", (void *)addr);
77587 else
77588 seq_printf(m, "%s", symname);
77589+#endif
77590 }
77591
77592 static int tstats_show(struct seq_file *m, void *v)
77593@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
77594
77595 seq_puts(m, "Timer Stats Version: v0.2\n");
77596 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
77597- if (atomic_read(&overflow_count))
77598+ if (atomic_read_unchecked(&overflow_count))
77599 seq_printf(m, "Overflow: %d entries\n",
77600- atomic_read(&overflow_count));
77601+ atomic_read_unchecked(&overflow_count));
77602
77603 for (i = 0; i < nr_entries; i++) {
77604 entry = entries + i;
77605@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
77606 {
77607 struct proc_dir_entry *pe;
77608
77609+#ifdef CONFIG_GRKERNSEC_PROC_ADD
77610+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
77611+#else
77612 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
77613+#endif
77614 if (!pe)
77615 return -ENOMEM;
77616 return 0;
77617diff --git a/kernel/timer.c b/kernel/timer.c
77618index 367d008..1ee9ed9 100644
77619--- a/kernel/timer.c
77620+++ b/kernel/timer.c
77621@@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
77622 /*
77623 * This function runs timers and the timer-tq in bottom half context.
77624 */
77625-static void run_timer_softirq(struct softirq_action *h)
77626+static void run_timer_softirq(void)
77627 {
77628 struct tvec_base *base = __this_cpu_read(tvec_bases);
77629
77630@@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
77631 return NOTIFY_OK;
77632 }
77633
77634-static struct notifier_block __cpuinitdata timers_nb = {
77635+static struct notifier_block timers_nb = {
77636 .notifier_call = timer_cpu_notify,
77637 };
77638
77639diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
77640index c0bd030..62a1927 100644
77641--- a/kernel/trace/blktrace.c
77642+++ b/kernel/trace/blktrace.c
77643@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
77644 struct blk_trace *bt = filp->private_data;
77645 char buf[16];
77646
77647- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
77648+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
77649
77650 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
77651 }
77652@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
77653 return 1;
77654
77655 bt = buf->chan->private_data;
77656- atomic_inc(&bt->dropped);
77657+ atomic_inc_unchecked(&bt->dropped);
77658 return 0;
77659 }
77660
77661@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
77662
77663 bt->dir = dir;
77664 bt->dev = dev;
77665- atomic_set(&bt->dropped, 0);
77666+ atomic_set_unchecked(&bt->dropped, 0);
77667
77668 ret = -EIO;
77669 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
77670diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
77671index 43defd1..76da436 100644
77672--- a/kernel/trace/ftrace.c
77673+++ b/kernel/trace/ftrace.c
77674@@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
77675 if (unlikely(ftrace_disabled))
77676 return 0;
77677
77678+ ret = ftrace_arch_code_modify_prepare();
77679+ FTRACE_WARN_ON(ret);
77680+ if (ret)
77681+ return 0;
77682+
77683 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
77684+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
77685 if (ret) {
77686 ftrace_bug(ret, ip);
77687- return 0;
77688 }
77689- return 1;
77690+ return ret ? 0 : 1;
77691 }
77692
77693 /*
77694@@ -2965,7 +2970,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
77695
77696 int
77697 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
77698- void *data)
77699+ void *data)
77700 {
77701 struct ftrace_func_probe *entry;
77702 struct ftrace_page *pg;
77703@@ -3832,8 +3837,10 @@ static int ftrace_process_locs(struct module *mod,
77704 if (!count)
77705 return 0;
77706
77707+ pax_open_kernel();
77708 sort(start, count, sizeof(*start),
77709 ftrace_cmp_ips, ftrace_swap_ips);
77710+ pax_close_kernel();
77711
77712 start_pg = ftrace_allocate_pages(count);
77713 if (!start_pg)
77714@@ -4559,8 +4566,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
77715 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
77716
77717 static int ftrace_graph_active;
77718-static struct notifier_block ftrace_suspend_notifier;
77719-
77720 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
77721 {
77722 return 0;
77723@@ -4704,6 +4709,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
77724 return NOTIFY_DONE;
77725 }
77726
77727+static struct notifier_block ftrace_suspend_notifier = {
77728+ .notifier_call = ftrace_suspend_notifier_call
77729+};
77730+
77731 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
77732 trace_func_graph_ent_t entryfunc)
77733 {
77734@@ -4717,7 +4726,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
77735 goto out;
77736 }
77737
77738- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
77739 register_pm_notifier(&ftrace_suspend_notifier);
77740
77741 ftrace_graph_active++;
77742diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
77743index ce8514f..8233573 100644
77744--- a/kernel/trace/ring_buffer.c
77745+++ b/kernel/trace/ring_buffer.c
77746@@ -346,9 +346,9 @@ struct buffer_data_page {
77747 */
77748 struct buffer_page {
77749 struct list_head list; /* list of buffer pages */
77750- local_t write; /* index for next write */
77751+ local_unchecked_t write; /* index for next write */
77752 unsigned read; /* index for next read */
77753- local_t entries; /* entries on this page */
77754+ local_unchecked_t entries; /* entries on this page */
77755 unsigned long real_end; /* real end of data */
77756 struct buffer_data_page *page; /* Actual data page */
77757 };
77758@@ -461,8 +461,8 @@ struct ring_buffer_per_cpu {
77759 unsigned long last_overrun;
77760 local_t entries_bytes;
77761 local_t entries;
77762- local_t overrun;
77763- local_t commit_overrun;
77764+ local_unchecked_t overrun;
77765+ local_unchecked_t commit_overrun;
77766 local_t dropped_events;
77767 local_t committing;
77768 local_t commits;
77769@@ -861,8 +861,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
77770 *
77771 * We add a counter to the write field to denote this.
77772 */
77773- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
77774- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
77775+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
77776+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
77777
77778 /*
77779 * Just make sure we have seen our old_write and synchronize
77780@@ -890,8 +890,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
77781 * cmpxchg to only update if an interrupt did not already
77782 * do it for us. If the cmpxchg fails, we don't care.
77783 */
77784- (void)local_cmpxchg(&next_page->write, old_write, val);
77785- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
77786+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
77787+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
77788
77789 /*
77790 * No need to worry about races with clearing out the commit.
77791@@ -1250,12 +1250,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
77792
77793 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
77794 {
77795- return local_read(&bpage->entries) & RB_WRITE_MASK;
77796+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
77797 }
77798
77799 static inline unsigned long rb_page_write(struct buffer_page *bpage)
77800 {
77801- return local_read(&bpage->write) & RB_WRITE_MASK;
77802+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
77803 }
77804
77805 static int
77806@@ -1350,7 +1350,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
77807 * bytes consumed in ring buffer from here.
77808 * Increment overrun to account for the lost events.
77809 */
77810- local_add(page_entries, &cpu_buffer->overrun);
77811+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
77812 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
77813 }
77814
77815@@ -1906,7 +1906,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
77816 * it is our responsibility to update
77817 * the counters.
77818 */
77819- local_add(entries, &cpu_buffer->overrun);
77820+ local_add_unchecked(entries, &cpu_buffer->overrun);
77821 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
77822
77823 /*
77824@@ -2056,7 +2056,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
77825 if (tail == BUF_PAGE_SIZE)
77826 tail_page->real_end = 0;
77827
77828- local_sub(length, &tail_page->write);
77829+ local_sub_unchecked(length, &tail_page->write);
77830 return;
77831 }
77832
77833@@ -2091,7 +2091,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
77834 rb_event_set_padding(event);
77835
77836 /* Set the write back to the previous setting */
77837- local_sub(length, &tail_page->write);
77838+ local_sub_unchecked(length, &tail_page->write);
77839 return;
77840 }
77841
77842@@ -2103,7 +2103,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
77843
77844 /* Set write to end of buffer */
77845 length = (tail + length) - BUF_PAGE_SIZE;
77846- local_sub(length, &tail_page->write);
77847+ local_sub_unchecked(length, &tail_page->write);
77848 }
77849
77850 /*
77851@@ -2129,7 +2129,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
77852 * about it.
77853 */
77854 if (unlikely(next_page == commit_page)) {
77855- local_inc(&cpu_buffer->commit_overrun);
77856+ local_inc_unchecked(&cpu_buffer->commit_overrun);
77857 goto out_reset;
77858 }
77859
77860@@ -2185,7 +2185,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
77861 cpu_buffer->tail_page) &&
77862 (cpu_buffer->commit_page ==
77863 cpu_buffer->reader_page))) {
77864- local_inc(&cpu_buffer->commit_overrun);
77865+ local_inc_unchecked(&cpu_buffer->commit_overrun);
77866 goto out_reset;
77867 }
77868 }
77869@@ -2233,7 +2233,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
77870 length += RB_LEN_TIME_EXTEND;
77871
77872 tail_page = cpu_buffer->tail_page;
77873- write = local_add_return(length, &tail_page->write);
77874+ write = local_add_return_unchecked(length, &tail_page->write);
77875
77876 /* set write to only the index of the write */
77877 write &= RB_WRITE_MASK;
77878@@ -2250,7 +2250,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
77879 kmemcheck_annotate_bitfield(event, bitfield);
77880 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
77881
77882- local_inc(&tail_page->entries);
77883+ local_inc_unchecked(&tail_page->entries);
77884
77885 /*
77886 * If this is the first commit on the page, then update
77887@@ -2283,7 +2283,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
77888
77889 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
77890 unsigned long write_mask =
77891- local_read(&bpage->write) & ~RB_WRITE_MASK;
77892+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
77893 unsigned long event_length = rb_event_length(event);
77894 /*
77895 * This is on the tail page. It is possible that
77896@@ -2293,7 +2293,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
77897 */
77898 old_index += write_mask;
77899 new_index += write_mask;
77900- index = local_cmpxchg(&bpage->write, old_index, new_index);
77901+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
77902 if (index == old_index) {
77903 /* update counters */
77904 local_sub(event_length, &cpu_buffer->entries_bytes);
77905@@ -2632,7 +2632,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
77906
77907 /* Do the likely case first */
77908 if (likely(bpage->page == (void *)addr)) {
77909- local_dec(&bpage->entries);
77910+ local_dec_unchecked(&bpage->entries);
77911 return;
77912 }
77913
77914@@ -2644,7 +2644,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
77915 start = bpage;
77916 do {
77917 if (bpage->page == (void *)addr) {
77918- local_dec(&bpage->entries);
77919+ local_dec_unchecked(&bpage->entries);
77920 return;
77921 }
77922 rb_inc_page(cpu_buffer, &bpage);
77923@@ -2926,7 +2926,7 @@ static inline unsigned long
77924 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
77925 {
77926 return local_read(&cpu_buffer->entries) -
77927- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
77928+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
77929 }
77930
77931 /**
77932@@ -3015,7 +3015,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
77933 return 0;
77934
77935 cpu_buffer = buffer->buffers[cpu];
77936- ret = local_read(&cpu_buffer->overrun);
77937+ ret = local_read_unchecked(&cpu_buffer->overrun);
77938
77939 return ret;
77940 }
77941@@ -3038,7 +3038,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
77942 return 0;
77943
77944 cpu_buffer = buffer->buffers[cpu];
77945- ret = local_read(&cpu_buffer->commit_overrun);
77946+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
77947
77948 return ret;
77949 }
77950@@ -3105,7 +3105,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
77951 /* if you care about this being correct, lock the buffer */
77952 for_each_buffer_cpu(buffer, cpu) {
77953 cpu_buffer = buffer->buffers[cpu];
77954- overruns += local_read(&cpu_buffer->overrun);
77955+ overruns += local_read_unchecked(&cpu_buffer->overrun);
77956 }
77957
77958 return overruns;
77959@@ -3281,8 +3281,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
77960 /*
77961 * Reset the reader page to size zero.
77962 */
77963- local_set(&cpu_buffer->reader_page->write, 0);
77964- local_set(&cpu_buffer->reader_page->entries, 0);
77965+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
77966+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
77967 local_set(&cpu_buffer->reader_page->page->commit, 0);
77968 cpu_buffer->reader_page->real_end = 0;
77969
77970@@ -3316,7 +3316,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
77971 * want to compare with the last_overrun.
77972 */
77973 smp_mb();
77974- overwrite = local_read(&(cpu_buffer->overrun));
77975+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
77976
77977 /*
77978 * Here's the tricky part.
77979@@ -3886,8 +3886,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
77980
77981 cpu_buffer->head_page
77982 = list_entry(cpu_buffer->pages, struct buffer_page, list);
77983- local_set(&cpu_buffer->head_page->write, 0);
77984- local_set(&cpu_buffer->head_page->entries, 0);
77985+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
77986+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
77987 local_set(&cpu_buffer->head_page->page->commit, 0);
77988
77989 cpu_buffer->head_page->read = 0;
77990@@ -3897,14 +3897,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
77991
77992 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
77993 INIT_LIST_HEAD(&cpu_buffer->new_pages);
77994- local_set(&cpu_buffer->reader_page->write, 0);
77995- local_set(&cpu_buffer->reader_page->entries, 0);
77996+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
77997+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
77998 local_set(&cpu_buffer->reader_page->page->commit, 0);
77999 cpu_buffer->reader_page->read = 0;
78000
78001 local_set(&cpu_buffer->entries_bytes, 0);
78002- local_set(&cpu_buffer->overrun, 0);
78003- local_set(&cpu_buffer->commit_overrun, 0);
78004+ local_set_unchecked(&cpu_buffer->overrun, 0);
78005+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
78006 local_set(&cpu_buffer->dropped_events, 0);
78007 local_set(&cpu_buffer->entries, 0);
78008 local_set(&cpu_buffer->committing, 0);
78009@@ -4308,8 +4308,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
78010 rb_init_page(bpage);
78011 bpage = reader->page;
78012 reader->page = *data_page;
78013- local_set(&reader->write, 0);
78014- local_set(&reader->entries, 0);
78015+ local_set_unchecked(&reader->write, 0);
78016+ local_set_unchecked(&reader->entries, 0);
78017 reader->read = 0;
78018 *data_page = bpage;
78019
78020diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
78021index 3c13e46..883d039 100644
78022--- a/kernel/trace/trace.c
78023+++ b/kernel/trace/trace.c
78024@@ -4465,10 +4465,9 @@ static const struct file_operations tracing_dyn_info_fops = {
78025 };
78026 #endif
78027
78028-static struct dentry *d_tracer;
78029-
78030 struct dentry *tracing_init_dentry(void)
78031 {
78032+ static struct dentry *d_tracer;
78033 static int once;
78034
78035 if (d_tracer)
78036@@ -4488,10 +4487,9 @@ struct dentry *tracing_init_dentry(void)
78037 return d_tracer;
78038 }
78039
78040-static struct dentry *d_percpu;
78041-
78042 struct dentry *tracing_dentry_percpu(void)
78043 {
78044+ static struct dentry *d_percpu;
78045 static int once;
78046 struct dentry *d_tracer;
78047
78048diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
78049index 880073d..42db7c3 100644
78050--- a/kernel/trace/trace_events.c
78051+++ b/kernel/trace/trace_events.c
78052@@ -1330,10 +1330,6 @@ static LIST_HEAD(ftrace_module_file_list);
78053 struct ftrace_module_file_ops {
78054 struct list_head list;
78055 struct module *mod;
78056- struct file_operations id;
78057- struct file_operations enable;
78058- struct file_operations format;
78059- struct file_operations filter;
78060 };
78061
78062 static struct ftrace_module_file_ops *
78063@@ -1354,17 +1350,12 @@ trace_create_file_ops(struct module *mod)
78064
78065 file_ops->mod = mod;
78066
78067- file_ops->id = ftrace_event_id_fops;
78068- file_ops->id.owner = mod;
78069-
78070- file_ops->enable = ftrace_enable_fops;
78071- file_ops->enable.owner = mod;
78072-
78073- file_ops->filter = ftrace_event_filter_fops;
78074- file_ops->filter.owner = mod;
78075-
78076- file_ops->format = ftrace_event_format_fops;
78077- file_ops->format.owner = mod;
78078+ pax_open_kernel();
78079+ mod->trace_id.owner = mod;
78080+ mod->trace_enable.owner = mod;
78081+ mod->trace_filter.owner = mod;
78082+ mod->trace_format.owner = mod;
78083+ pax_close_kernel();
78084
78085 list_add(&file_ops->list, &ftrace_module_file_list);
78086
78087@@ -1388,8 +1379,8 @@ static void trace_module_add_events(struct module *mod)
78088
78089 for_each_event(call, start, end) {
78090 __trace_add_event_call(*call, mod,
78091- &file_ops->id, &file_ops->enable,
78092- &file_ops->filter, &file_ops->format);
78093+ &mod->trace_id, &mod->trace_enable,
78094+ &mod->trace_filter, &mod->trace_format);
78095 }
78096 }
78097
78098diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
78099index fd3c8aa..5f324a6 100644
78100--- a/kernel/trace/trace_mmiotrace.c
78101+++ b/kernel/trace/trace_mmiotrace.c
78102@@ -24,7 +24,7 @@ struct header_iter {
78103 static struct trace_array *mmio_trace_array;
78104 static bool overrun_detected;
78105 static unsigned long prev_overruns;
78106-static atomic_t dropped_count;
78107+static atomic_unchecked_t dropped_count;
78108
78109 static void mmio_reset_data(struct trace_array *tr)
78110 {
78111@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
78112
78113 static unsigned long count_overruns(struct trace_iterator *iter)
78114 {
78115- unsigned long cnt = atomic_xchg(&dropped_count, 0);
78116+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
78117 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
78118
78119 if (over > prev_overruns)
78120@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
78121 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
78122 sizeof(*entry), 0, pc);
78123 if (!event) {
78124- atomic_inc(&dropped_count);
78125+ atomic_inc_unchecked(&dropped_count);
78126 return;
78127 }
78128 entry = ring_buffer_event_data(event);
78129@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
78130 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
78131 sizeof(*entry), 0, pc);
78132 if (!event) {
78133- atomic_inc(&dropped_count);
78134+ atomic_inc_unchecked(&dropped_count);
78135 return;
78136 }
78137 entry = ring_buffer_event_data(event);
78138diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
78139index 194d796..76edb8f 100644
78140--- a/kernel/trace/trace_output.c
78141+++ b/kernel/trace/trace_output.c
78142@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
78143
78144 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
78145 if (!IS_ERR(p)) {
78146- p = mangle_path(s->buffer + s->len, p, "\n");
78147+ p = mangle_path(s->buffer + s->len, p, "\n\\");
78148 if (p) {
78149 s->len = p - s->buffer;
78150 return 1;
78151@@ -852,14 +852,16 @@ int register_ftrace_event(struct trace_event *event)
78152 goto out;
78153 }
78154
78155+ pax_open_kernel();
78156 if (event->funcs->trace == NULL)
78157- event->funcs->trace = trace_nop_print;
78158+ *(void **)&event->funcs->trace = trace_nop_print;
78159 if (event->funcs->raw == NULL)
78160- event->funcs->raw = trace_nop_print;
78161+ *(void **)&event->funcs->raw = trace_nop_print;
78162 if (event->funcs->hex == NULL)
78163- event->funcs->hex = trace_nop_print;
78164+ *(void **)&event->funcs->hex = trace_nop_print;
78165 if (event->funcs->binary == NULL)
78166- event->funcs->binary = trace_nop_print;
78167+ *(void **)&event->funcs->binary = trace_nop_print;
78168+ pax_close_kernel();
78169
78170 key = event->type & (EVENT_HASHSIZE - 1);
78171
78172diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
78173index 42ca822..cdcacc6 100644
78174--- a/kernel/trace/trace_stack.c
78175+++ b/kernel/trace/trace_stack.c
78176@@ -52,7 +52,7 @@ static inline void check_stack(void)
78177 return;
78178
78179 /* we do not handle interrupt stacks yet */
78180- if (!object_is_on_stack(&this_size))
78181+ if (!object_starts_on_stack(&this_size))
78182 return;
78183
78184 local_irq_save(flags);
78185diff --git a/kernel/user.c b/kernel/user.c
78186index 33acb5e..57ebfd4 100644
78187--- a/kernel/user.c
78188+++ b/kernel/user.c
78189@@ -47,9 +47,7 @@ struct user_namespace init_user_ns = {
78190 .count = 4294967295U,
78191 },
78192 },
78193- .kref = {
78194- .refcount = ATOMIC_INIT(3),
78195- },
78196+ .count = ATOMIC_INIT(3),
78197 .owner = GLOBAL_ROOT_UID,
78198 .group = GLOBAL_ROOT_GID,
78199 .proc_inum = PROC_USER_INIT_INO,
78200diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
78201index 2b042c4..24f8ec3 100644
78202--- a/kernel/user_namespace.c
78203+++ b/kernel/user_namespace.c
78204@@ -78,7 +78,7 @@ int create_user_ns(struct cred *new)
78205 return ret;
78206 }
78207
78208- kref_init(&ns->kref);
78209+ atomic_set(&ns->count, 1);
78210 /* Leave the new->user_ns reference with the new user namespace. */
78211 ns->parent = parent_ns;
78212 ns->owner = owner;
78213@@ -104,15 +104,16 @@ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
78214 return create_user_ns(cred);
78215 }
78216
78217-void free_user_ns(struct kref *kref)
78218+void free_user_ns(struct user_namespace *ns)
78219 {
78220- struct user_namespace *parent, *ns =
78221- container_of(kref, struct user_namespace, kref);
78222+ struct user_namespace *parent;
78223
78224- parent = ns->parent;
78225- proc_free_inum(ns->proc_inum);
78226- kmem_cache_free(user_ns_cachep, ns);
78227- put_user_ns(parent);
78228+ do {
78229+ parent = ns->parent;
78230+ proc_free_inum(ns->proc_inum);
78231+ kmem_cache_free(user_ns_cachep, ns);
78232+ ns = parent;
78233+ } while (atomic_dec_and_test(&parent->count));
78234 }
78235 EXPORT_SYMBOL(free_user_ns);
78236
78237diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
78238index 63da38c..639904e 100644
78239--- a/kernel/utsname_sysctl.c
78240+++ b/kernel/utsname_sysctl.c
78241@@ -46,7 +46,7 @@ static void put_uts(ctl_table *table, int write, void *which)
78242 static int proc_do_uts_string(ctl_table *table, int write,
78243 void __user *buffer, size_t *lenp, loff_t *ppos)
78244 {
78245- struct ctl_table uts_table;
78246+ ctl_table_no_const uts_table;
78247 int r;
78248 memcpy(&uts_table, table, sizeof(uts_table));
78249 uts_table.data = get_uts(table, write);
78250diff --git a/kernel/watchdog.c b/kernel/watchdog.c
78251index 75a2ab3..5961da7 100644
78252--- a/kernel/watchdog.c
78253+++ b/kernel/watchdog.c
78254@@ -527,7 +527,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
78255 }
78256 #endif /* CONFIG_SYSCTL */
78257
78258-static struct smp_hotplug_thread watchdog_threads = {
78259+static struct smp_hotplug_thread watchdog_threads __read_only = {
78260 .store = &softlockup_watchdog,
78261 .thread_should_run = watchdog_should_run,
78262 .thread_fn = watchdog,
78263diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
78264index 67604e5..fe94fb1 100644
78265--- a/lib/Kconfig.debug
78266+++ b/lib/Kconfig.debug
78267@@ -550,7 +550,7 @@ config DEBUG_MUTEXES
78268
78269 config DEBUG_LOCK_ALLOC
78270 bool "Lock debugging: detect incorrect freeing of live locks"
78271- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
78272+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
78273 select DEBUG_SPINLOCK
78274 select DEBUG_MUTEXES
78275 select LOCKDEP
78276@@ -564,7 +564,7 @@ config DEBUG_LOCK_ALLOC
78277
78278 config PROVE_LOCKING
78279 bool "Lock debugging: prove locking correctness"
78280- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
78281+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
78282 select LOCKDEP
78283 select DEBUG_SPINLOCK
78284 select DEBUG_MUTEXES
78285@@ -670,7 +670,7 @@ config LOCKDEP
78286
78287 config LOCK_STAT
78288 bool "Lock usage statistics"
78289- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
78290+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
78291 select LOCKDEP
78292 select DEBUG_SPINLOCK
78293 select DEBUG_MUTEXES
78294@@ -1278,6 +1278,7 @@ config LATENCYTOP
78295 depends on DEBUG_KERNEL
78296 depends on STACKTRACE_SUPPORT
78297 depends on PROC_FS
78298+ depends on !GRKERNSEC_HIDESYM
78299 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
78300 select KALLSYMS
78301 select KALLSYMS_ALL
78302@@ -1306,7 +1307,7 @@ config INTERVAL_TREE_TEST
78303
78304 config PROVIDE_OHCI1394_DMA_INIT
78305 bool "Remote debugging over FireWire early on boot"
78306- depends on PCI && X86
78307+ depends on PCI && X86 && !GRKERNSEC
78308 help
78309 If you want to debug problems which hang or crash the kernel early
78310 on boot and the crashing machine has a FireWire port, you can use
78311@@ -1335,7 +1336,7 @@ config PROVIDE_OHCI1394_DMA_INIT
78312
78313 config FIREWIRE_OHCI_REMOTE_DMA
78314 bool "Remote debugging over FireWire with firewire-ohci"
78315- depends on FIREWIRE_OHCI
78316+ depends on FIREWIRE_OHCI && !GRKERNSEC
78317 help
78318 This option lets you use the FireWire bus for remote debugging
78319 with help of the firewire-ohci driver. It enables unfiltered
78320diff --git a/lib/Makefile b/lib/Makefile
78321index 02ed6c0..bd243da 100644
78322--- a/lib/Makefile
78323+++ b/lib/Makefile
78324@@ -47,7 +47,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
78325
78326 obj-$(CONFIG_BTREE) += btree.o
78327 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
78328-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
78329+obj-y += list_debug.o
78330 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
78331
78332 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
78333diff --git a/lib/bitmap.c b/lib/bitmap.c
78334index 06f7e4f..f3cf2b0 100644
78335--- a/lib/bitmap.c
78336+++ b/lib/bitmap.c
78337@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
78338 {
78339 int c, old_c, totaldigits, ndigits, nchunks, nbits;
78340 u32 chunk;
78341- const char __user __force *ubuf = (const char __user __force *)buf;
78342+ const char __user *ubuf = (const char __force_user *)buf;
78343
78344 bitmap_zero(maskp, nmaskbits);
78345
78346@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
78347 {
78348 if (!access_ok(VERIFY_READ, ubuf, ulen))
78349 return -EFAULT;
78350- return __bitmap_parse((const char __force *)ubuf,
78351+ return __bitmap_parse((const char __force_kernel *)ubuf,
78352 ulen, 1, maskp, nmaskbits);
78353
78354 }
78355@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
78356 {
78357 unsigned a, b;
78358 int c, old_c, totaldigits;
78359- const char __user __force *ubuf = (const char __user __force *)buf;
78360+ const char __user *ubuf = (const char __force_user *)buf;
78361 int exp_digit, in_range;
78362
78363 totaldigits = c = 0;
78364@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
78365 {
78366 if (!access_ok(VERIFY_READ, ubuf, ulen))
78367 return -EFAULT;
78368- return __bitmap_parselist((const char __force *)ubuf,
78369+ return __bitmap_parselist((const char __force_kernel *)ubuf,
78370 ulen, 1, maskp, nmaskbits);
78371 }
78372 EXPORT_SYMBOL(bitmap_parselist_user);
78373diff --git a/lib/bug.c b/lib/bug.c
78374index d0cdf14..4d07bd2 100644
78375--- a/lib/bug.c
78376+++ b/lib/bug.c
78377@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
78378 return BUG_TRAP_TYPE_NONE;
78379
78380 bug = find_bug(bugaddr);
78381+ if (!bug)
78382+ return BUG_TRAP_TYPE_NONE;
78383
78384 file = NULL;
78385 line = 0;
78386diff --git a/lib/debugobjects.c b/lib/debugobjects.c
78387index d11808c..dc2d6f8 100644
78388--- a/lib/debugobjects.c
78389+++ b/lib/debugobjects.c
78390@@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
78391 if (limit > 4)
78392 return;
78393
78394- is_on_stack = object_is_on_stack(addr);
78395+ is_on_stack = object_starts_on_stack(addr);
78396 if (is_on_stack == onstack)
78397 return;
78398
78399diff --git a/lib/devres.c b/lib/devres.c
78400index 80b9c76..9e32279 100644
78401--- a/lib/devres.c
78402+++ b/lib/devres.c
78403@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
78404 void devm_iounmap(struct device *dev, void __iomem *addr)
78405 {
78406 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
78407- (void *)addr));
78408+ (void __force *)addr));
78409 iounmap(addr);
78410 }
78411 EXPORT_SYMBOL(devm_iounmap);
78412@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
78413 {
78414 ioport_unmap(addr);
78415 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
78416- devm_ioport_map_match, (void *)addr));
78417+ devm_ioport_map_match, (void __force *)addr));
78418 }
78419 EXPORT_SYMBOL(devm_ioport_unmap);
78420
78421diff --git a/lib/dma-debug.c b/lib/dma-debug.c
78422index 5e396ac..58d5de1 100644
78423--- a/lib/dma-debug.c
78424+++ b/lib/dma-debug.c
78425@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
78426
78427 void dma_debug_add_bus(struct bus_type *bus)
78428 {
78429- struct notifier_block *nb;
78430+ notifier_block_no_const *nb;
78431
78432 if (global_disable)
78433 return;
78434@@ -942,7 +942,7 @@ out:
78435
78436 static void check_for_stack(struct device *dev, void *addr)
78437 {
78438- if (object_is_on_stack(addr))
78439+ if (object_starts_on_stack(addr))
78440 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
78441 "stack [addr=%p]\n", addr);
78442 }
78443diff --git a/lib/inflate.c b/lib/inflate.c
78444index 013a761..c28f3fc 100644
78445--- a/lib/inflate.c
78446+++ b/lib/inflate.c
78447@@ -269,7 +269,7 @@ static void free(void *where)
78448 malloc_ptr = free_mem_ptr;
78449 }
78450 #else
78451-#define malloc(a) kmalloc(a, GFP_KERNEL)
78452+#define malloc(a) kmalloc((a), GFP_KERNEL)
78453 #define free(a) kfree(a)
78454 #endif
78455
78456diff --git a/lib/ioremap.c b/lib/ioremap.c
78457index 0c9216c..863bd89 100644
78458--- a/lib/ioremap.c
78459+++ b/lib/ioremap.c
78460@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
78461 unsigned long next;
78462
78463 phys_addr -= addr;
78464- pmd = pmd_alloc(&init_mm, pud, addr);
78465+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
78466 if (!pmd)
78467 return -ENOMEM;
78468 do {
78469@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
78470 unsigned long next;
78471
78472 phys_addr -= addr;
78473- pud = pud_alloc(&init_mm, pgd, addr);
78474+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
78475 if (!pud)
78476 return -ENOMEM;
78477 do {
78478diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
78479index bd2bea9..6b3c95e 100644
78480--- a/lib/is_single_threaded.c
78481+++ b/lib/is_single_threaded.c
78482@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
78483 struct task_struct *p, *t;
78484 bool ret;
78485
78486+ if (!mm)
78487+ return true;
78488+
78489 if (atomic_read(&task->signal->live) != 1)
78490 return false;
78491
78492diff --git a/lib/kobject.c b/lib/kobject.c
78493index e07ee1f..998489d 100644
78494--- a/lib/kobject.c
78495+++ b/lib/kobject.c
78496@@ -852,9 +852,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
78497
78498
78499 static DEFINE_SPINLOCK(kobj_ns_type_lock);
78500-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
78501+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
78502
78503-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
78504+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
78505 {
78506 enum kobj_ns_type type = ops->type;
78507 int error;
78508diff --git a/lib/list_debug.c b/lib/list_debug.c
78509index c24c2f7..0475b78 100644
78510--- a/lib/list_debug.c
78511+++ b/lib/list_debug.c
78512@@ -11,7 +11,9 @@
78513 #include <linux/bug.h>
78514 #include <linux/kernel.h>
78515 #include <linux/rculist.h>
78516+#include <linux/mm.h>
78517
78518+#ifdef CONFIG_DEBUG_LIST
78519 /*
78520 * Insert a new entry between two known consecutive entries.
78521 *
78522@@ -19,21 +21,32 @@
78523 * the prev/next entries already!
78524 */
78525
78526-void __list_add(struct list_head *new,
78527- struct list_head *prev,
78528- struct list_head *next)
78529+static bool __list_add_debug(struct list_head *new,
78530+ struct list_head *prev,
78531+ struct list_head *next)
78532 {
78533- WARN(next->prev != prev,
78534+ if (WARN(next->prev != prev,
78535 "list_add corruption. next->prev should be "
78536 "prev (%p), but was %p. (next=%p).\n",
78537- prev, next->prev, next);
78538- WARN(prev->next != next,
78539+ prev, next->prev, next) ||
78540+ WARN(prev->next != next,
78541 "list_add corruption. prev->next should be "
78542 "next (%p), but was %p. (prev=%p).\n",
78543- next, prev->next, prev);
78544- WARN(new == prev || new == next,
78545+ next, prev->next, prev) ||
78546+ WARN(new == prev || new == next,
78547 "list_add double add: new=%p, prev=%p, next=%p.\n",
78548- new, prev, next);
78549+ new, prev, next))
78550+ return false;
78551+ return true;
78552+}
78553+
78554+void __list_add(struct list_head *new,
78555+ struct list_head *prev,
78556+ struct list_head *next)
78557+{
78558+ if (!__list_add_debug(new, prev, next))
78559+ return;
78560+
78561 next->prev = new;
78562 new->next = next;
78563 new->prev = prev;
78564@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
78565 }
78566 EXPORT_SYMBOL(__list_add);
78567
78568-void __list_del_entry(struct list_head *entry)
78569+static bool __list_del_entry_debug(struct list_head *entry)
78570 {
78571 struct list_head *prev, *next;
78572
78573@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
78574 WARN(next->prev != entry,
78575 "list_del corruption. next->prev should be %p, "
78576 "but was %p\n", entry, next->prev))
78577+ return false;
78578+ return true;
78579+}
78580+
78581+void __list_del_entry(struct list_head *entry)
78582+{
78583+ if (!__list_del_entry_debug(entry))
78584 return;
78585
78586- __list_del(prev, next);
78587+ __list_del(entry->prev, entry->next);
78588 }
78589 EXPORT_SYMBOL(__list_del_entry);
78590
78591@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
78592 void __list_add_rcu(struct list_head *new,
78593 struct list_head *prev, struct list_head *next)
78594 {
78595- WARN(next->prev != prev,
78596- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
78597- prev, next->prev, next);
78598- WARN(prev->next != next,
78599- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
78600- next, prev->next, prev);
78601+ if (!__list_add_debug(new, prev, next))
78602+ return;
78603+
78604 new->next = next;
78605 new->prev = prev;
78606 rcu_assign_pointer(list_next_rcu(prev), new);
78607 next->prev = new;
78608 }
78609 EXPORT_SYMBOL(__list_add_rcu);
78610+#endif
78611+
78612+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
78613+{
78614+#ifdef CONFIG_DEBUG_LIST
78615+ if (!__list_add_debug(new, prev, next))
78616+ return;
78617+#endif
78618+
78619+ pax_open_kernel();
78620+ next->prev = new;
78621+ new->next = next;
78622+ new->prev = prev;
78623+ prev->next = new;
78624+ pax_close_kernel();
78625+}
78626+EXPORT_SYMBOL(__pax_list_add);
78627+
78628+void pax_list_del(struct list_head *entry)
78629+{
78630+#ifdef CONFIG_DEBUG_LIST
78631+ if (!__list_del_entry_debug(entry))
78632+ return;
78633+#endif
78634+
78635+ pax_open_kernel();
78636+ __list_del(entry->prev, entry->next);
78637+ entry->next = LIST_POISON1;
78638+ entry->prev = LIST_POISON2;
78639+ pax_close_kernel();
78640+}
78641+EXPORT_SYMBOL(pax_list_del);
78642+
78643+void pax_list_del_init(struct list_head *entry)
78644+{
78645+ pax_open_kernel();
78646+ __list_del(entry->prev, entry->next);
78647+ INIT_LIST_HEAD(entry);
78648+ pax_close_kernel();
78649+}
78650+EXPORT_SYMBOL(pax_list_del_init);
78651+
78652+void __pax_list_add_rcu(struct list_head *new,
78653+ struct list_head *prev, struct list_head *next)
78654+{
78655+#ifdef CONFIG_DEBUG_LIST
78656+ if (!__list_add_debug(new, prev, next))
78657+ return;
78658+#endif
78659+
78660+ pax_open_kernel();
78661+ new->next = next;
78662+ new->prev = prev;
78663+ rcu_assign_pointer(list_next_rcu(prev), new);
78664+ next->prev = new;
78665+ pax_close_kernel();
78666+}
78667+EXPORT_SYMBOL(__pax_list_add_rcu);
78668+
78669+void pax_list_del_rcu(struct list_head *entry)
78670+{
78671+#ifdef CONFIG_DEBUG_LIST
78672+ if (!__list_del_entry_debug(entry))
78673+ return;
78674+#endif
78675+
78676+ pax_open_kernel();
78677+ __list_del(entry->prev, entry->next);
78678+ entry->next = LIST_POISON1;
78679+ entry->prev = LIST_POISON2;
78680+ pax_close_kernel();
78681+}
78682+EXPORT_SYMBOL(pax_list_del_rcu);
78683diff --git a/lib/radix-tree.c b/lib/radix-tree.c
78684index e796429..6e38f9f 100644
78685--- a/lib/radix-tree.c
78686+++ b/lib/radix-tree.c
78687@@ -92,7 +92,7 @@ struct radix_tree_preload {
78688 int nr;
78689 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
78690 };
78691-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
78692+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
78693
78694 static inline void *ptr_to_indirect(void *ptr)
78695 {
78696diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
78697index bb2b201..46abaf9 100644
78698--- a/lib/strncpy_from_user.c
78699+++ b/lib/strncpy_from_user.c
78700@@ -21,7 +21,7 @@
78701 */
78702 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
78703 {
78704- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
78705+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
78706 long res = 0;
78707
78708 /*
78709diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
78710index a28df52..3d55877 100644
78711--- a/lib/strnlen_user.c
78712+++ b/lib/strnlen_user.c
78713@@ -26,7 +26,7 @@
78714 */
78715 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
78716 {
78717- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
78718+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
78719 long align, res = 0;
78720 unsigned long c;
78721
78722diff --git a/lib/swiotlb.c b/lib/swiotlb.c
78723index 196b069..358f342 100644
78724--- a/lib/swiotlb.c
78725+++ b/lib/swiotlb.c
78726@@ -642,7 +642,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
78727
78728 void
78729 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
78730- dma_addr_t dev_addr)
78731+ dma_addr_t dev_addr, struct dma_attrs *attrs)
78732 {
78733 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
78734
78735diff --git a/lib/vsprintf.c b/lib/vsprintf.c
78736index fab33a9..3b5fe68 100644
78737--- a/lib/vsprintf.c
78738+++ b/lib/vsprintf.c
78739@@ -16,6 +16,9 @@
78740 * - scnprintf and vscnprintf
78741 */
78742
78743+#ifdef CONFIG_GRKERNSEC_HIDESYM
78744+#define __INCLUDED_BY_HIDESYM 1
78745+#endif
78746 #include <stdarg.h>
78747 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
78748 #include <linux/types.h>
78749@@ -541,7 +544,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
78750 char sym[KSYM_SYMBOL_LEN];
78751 if (ext == 'B')
78752 sprint_backtrace(sym, value);
78753- else if (ext != 'f' && ext != 's')
78754+ else if (ext != 'f' && ext != 's' && ext != 'a')
78755 sprint_symbol(sym, value);
78756 else
78757 sprint_symbol_no_offset(sym, value);
78758@@ -974,7 +977,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
78759 return number(buf, end, *(const netdev_features_t *)addr, spec);
78760 }
78761
78762+#ifdef CONFIG_GRKERNSEC_HIDESYM
78763+int kptr_restrict __read_mostly = 2;
78764+#else
78765 int kptr_restrict __read_mostly;
78766+#endif
78767
78768 /*
78769 * Show a '%p' thing. A kernel extension is that the '%p' is followed
78770@@ -988,6 +995,8 @@ int kptr_restrict __read_mostly;
78771 * - 'S' For symbolic direct pointers with offset
78772 * - 's' For symbolic direct pointers without offset
78773 * - 'B' For backtraced symbolic direct pointers with offset
78774+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
78775+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
78776 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
78777 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
78778 * - 'M' For a 6-byte MAC address, it prints the address in the
78779@@ -1043,12 +1052,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
78780
78781 if (!ptr && *fmt != 'K') {
78782 /*
78783- * Print (null) with the same width as a pointer so it makes
78784+ * Print (nil) with the same width as a pointer so it makes
78785 * tabular output look nice.
78786 */
78787 if (spec.field_width == -1)
78788 spec.field_width = default_width;
78789- return string(buf, end, "(null)", spec);
78790+ return string(buf, end, "(nil)", spec);
78791 }
78792
78793 switch (*fmt) {
78794@@ -1058,6 +1067,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
78795 /* Fallthrough */
78796 case 'S':
78797 case 's':
78798+#ifdef CONFIG_GRKERNSEC_HIDESYM
78799+ break;
78800+#else
78801+ return symbol_string(buf, end, ptr, spec, *fmt);
78802+#endif
78803+ case 'A':
78804+ case 'a':
78805 case 'B':
78806 return symbol_string(buf, end, ptr, spec, *fmt);
78807 case 'R':
78808@@ -1098,6 +1114,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
78809 va_end(va);
78810 return buf;
78811 }
78812+ case 'P':
78813+ break;
78814 case 'K':
78815 /*
78816 * %pK cannot be used in IRQ context because its test
78817@@ -1121,6 +1139,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
78818 }
78819 break;
78820 }
78821+
78822+#ifdef CONFIG_GRKERNSEC_HIDESYM
78823+ /* 'P' = approved pointers to copy to userland,
78824+ as in the /proc/kallsyms case, as we make it display nothing
78825+ for non-root users, and the real contents for root users
78826+ Also ignore 'K' pointers, since we force their NULLing for non-root users
78827+ above
78828+ */
78829+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
78830+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
78831+ dump_stack();
78832+ ptr = NULL;
78833+ }
78834+#endif
78835+
78836 spec.flags |= SMALL;
78837 if (spec.field_width == -1) {
78838 spec.field_width = default_width;
78839@@ -1842,11 +1875,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
78840 typeof(type) value; \
78841 if (sizeof(type) == 8) { \
78842 args = PTR_ALIGN(args, sizeof(u32)); \
78843- *(u32 *)&value = *(u32 *)args; \
78844- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
78845+ *(u32 *)&value = *(const u32 *)args; \
78846+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
78847 } else { \
78848 args = PTR_ALIGN(args, sizeof(type)); \
78849- value = *(typeof(type) *)args; \
78850+ value = *(const typeof(type) *)args; \
78851 } \
78852 args += sizeof(type); \
78853 value; \
78854@@ -1909,7 +1942,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
78855 case FORMAT_TYPE_STR: {
78856 const char *str_arg = args;
78857 args += strlen(str_arg) + 1;
78858- str = string(str, end, (char *)str_arg, spec);
78859+ str = string(str, end, str_arg, spec);
78860 break;
78861 }
78862
78863diff --git a/localversion-grsec b/localversion-grsec
78864new file mode 100644
78865index 0000000..7cd6065
78866--- /dev/null
78867+++ b/localversion-grsec
78868@@ -0,0 +1 @@
78869+-grsec
78870diff --git a/mm/Kconfig b/mm/Kconfig
78871index 278e3ab..87c384d 100644
78872--- a/mm/Kconfig
78873+++ b/mm/Kconfig
78874@@ -286,10 +286,10 @@ config KSM
78875 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
78876
78877 config DEFAULT_MMAP_MIN_ADDR
78878- int "Low address space to protect from user allocation"
78879+ int "Low address space to protect from user allocation"
78880 depends on MMU
78881- default 4096
78882- help
78883+ default 65536
78884+ help
78885 This is the portion of low virtual memory which should be protected
78886 from userspace allocation. Keeping a user from writing to low pages
78887 can help reduce the impact of kernel NULL pointer bugs.
78888@@ -320,7 +320,7 @@ config MEMORY_FAILURE
78889
78890 config HWPOISON_INJECT
78891 tristate "HWPoison pages injector"
78892- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
78893+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
78894 select PROC_PAGE_MONITOR
78895
78896 config NOMMU_INITIAL_TRIM_EXCESS
78897diff --git a/mm/filemap.c b/mm/filemap.c
78898index 83efee7..3f99381 100644
78899--- a/mm/filemap.c
78900+++ b/mm/filemap.c
78901@@ -1747,7 +1747,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
78902 struct address_space *mapping = file->f_mapping;
78903
78904 if (!mapping->a_ops->readpage)
78905- return -ENOEXEC;
78906+ return -ENODEV;
78907 file_accessed(file);
78908 vma->vm_ops = &generic_file_vm_ops;
78909 return 0;
78910@@ -2087,6 +2087,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
78911 *pos = i_size_read(inode);
78912
78913 if (limit != RLIM_INFINITY) {
78914+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
78915 if (*pos >= limit) {
78916 send_sig(SIGXFSZ, current, 0);
78917 return -EFBIG;
78918diff --git a/mm/fremap.c b/mm/fremap.c
78919index a0aaf0e..20325c3 100644
78920--- a/mm/fremap.c
78921+++ b/mm/fremap.c
78922@@ -157,6 +157,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
78923 retry:
78924 vma = find_vma(mm, start);
78925
78926+#ifdef CONFIG_PAX_SEGMEXEC
78927+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
78928+ goto out;
78929+#endif
78930+
78931 /*
78932 * Make sure the vma is shared, that it supports prefaulting,
78933 * and that the remapped range is valid and fully within
78934diff --git a/mm/highmem.c b/mm/highmem.c
78935index b32b70c..e512eb0 100644
78936--- a/mm/highmem.c
78937+++ b/mm/highmem.c
78938@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
78939 * So no dangers, even with speculative execution.
78940 */
78941 page = pte_page(pkmap_page_table[i]);
78942+ pax_open_kernel();
78943 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
78944-
78945+ pax_close_kernel();
78946 set_page_address(page, NULL);
78947 need_flush = 1;
78948 }
78949@@ -198,9 +199,11 @@ start:
78950 }
78951 }
78952 vaddr = PKMAP_ADDR(last_pkmap_nr);
78953+
78954+ pax_open_kernel();
78955 set_pte_at(&init_mm, vaddr,
78956 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
78957-
78958+ pax_close_kernel();
78959 pkmap_count[last_pkmap_nr] = 1;
78960 set_page_address(page, (void *)vaddr);
78961
78962diff --git a/mm/hugetlb.c b/mm/hugetlb.c
78963index 546db81..34830af 100644
78964--- a/mm/hugetlb.c
78965+++ b/mm/hugetlb.c
78966@@ -2008,15 +2008,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
78967 struct hstate *h = &default_hstate;
78968 unsigned long tmp;
78969 int ret;
78970+ ctl_table_no_const hugetlb_table;
78971
78972 tmp = h->max_huge_pages;
78973
78974 if (write && h->order >= MAX_ORDER)
78975 return -EINVAL;
78976
78977- table->data = &tmp;
78978- table->maxlen = sizeof(unsigned long);
78979- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
78980+ hugetlb_table = *table;
78981+ hugetlb_table.data = &tmp;
78982+ hugetlb_table.maxlen = sizeof(unsigned long);
78983+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
78984 if (ret)
78985 goto out;
78986
78987@@ -2073,15 +2075,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
78988 struct hstate *h = &default_hstate;
78989 unsigned long tmp;
78990 int ret;
78991+ ctl_table_no_const hugetlb_table;
78992
78993 tmp = h->nr_overcommit_huge_pages;
78994
78995 if (write && h->order >= MAX_ORDER)
78996 return -EINVAL;
78997
78998- table->data = &tmp;
78999- table->maxlen = sizeof(unsigned long);
79000- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
79001+ hugetlb_table = *table;
79002+ hugetlb_table.data = &tmp;
79003+ hugetlb_table.maxlen = sizeof(unsigned long);
79004+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
79005 if (ret)
79006 goto out;
79007
79008@@ -2511,6 +2515,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
79009 return 1;
79010 }
79011
79012+#ifdef CONFIG_PAX_SEGMEXEC
79013+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
79014+{
79015+ struct mm_struct *mm = vma->vm_mm;
79016+ struct vm_area_struct *vma_m;
79017+ unsigned long address_m;
79018+ pte_t *ptep_m;
79019+
79020+ vma_m = pax_find_mirror_vma(vma);
79021+ if (!vma_m)
79022+ return;
79023+
79024+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
79025+ address_m = address + SEGMEXEC_TASK_SIZE;
79026+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
79027+ get_page(page_m);
79028+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
79029+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
79030+}
79031+#endif
79032+
79033 /*
79034 * Hugetlb_cow() should be called with page lock of the original hugepage held.
79035 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
79036@@ -2629,6 +2654,11 @@ retry_avoidcopy:
79037 make_huge_pte(vma, new_page, 1));
79038 page_remove_rmap(old_page);
79039 hugepage_add_new_anon_rmap(new_page, vma, address);
79040+
79041+#ifdef CONFIG_PAX_SEGMEXEC
79042+ pax_mirror_huge_pte(vma, address, new_page);
79043+#endif
79044+
79045 /* Make the old page be freed below */
79046 new_page = old_page;
79047 }
79048@@ -2788,6 +2818,10 @@ retry:
79049 && (vma->vm_flags & VM_SHARED)));
79050 set_huge_pte_at(mm, address, ptep, new_pte);
79051
79052+#ifdef CONFIG_PAX_SEGMEXEC
79053+ pax_mirror_huge_pte(vma, address, page);
79054+#endif
79055+
79056 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
79057 /* Optimization, do the COW without a second fault */
79058 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
79059@@ -2817,6 +2851,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79060 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
79061 struct hstate *h = hstate_vma(vma);
79062
79063+#ifdef CONFIG_PAX_SEGMEXEC
79064+ struct vm_area_struct *vma_m;
79065+#endif
79066+
79067 address &= huge_page_mask(h);
79068
79069 ptep = huge_pte_offset(mm, address);
79070@@ -2830,6 +2868,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79071 VM_FAULT_SET_HINDEX(hstate_index(h));
79072 }
79073
79074+#ifdef CONFIG_PAX_SEGMEXEC
79075+ vma_m = pax_find_mirror_vma(vma);
79076+ if (vma_m) {
79077+ unsigned long address_m;
79078+
79079+ if (vma->vm_start > vma_m->vm_start) {
79080+ address_m = address;
79081+ address -= SEGMEXEC_TASK_SIZE;
79082+ vma = vma_m;
79083+ h = hstate_vma(vma);
79084+ } else
79085+ address_m = address + SEGMEXEC_TASK_SIZE;
79086+
79087+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
79088+ return VM_FAULT_OOM;
79089+ address_m &= HPAGE_MASK;
79090+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
79091+ }
79092+#endif
79093+
79094 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
79095 if (!ptep)
79096 return VM_FAULT_OOM;
79097diff --git a/mm/internal.h b/mm/internal.h
79098index 9ba2110..eaf0674 100644
79099--- a/mm/internal.h
79100+++ b/mm/internal.h
79101@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
79102 * in mm/page_alloc.c
79103 */
79104 extern void __free_pages_bootmem(struct page *page, unsigned int order);
79105+extern void free_compound_page(struct page *page);
79106 extern void prep_compound_page(struct page *page, unsigned long order);
79107 #ifdef CONFIG_MEMORY_FAILURE
79108 extern bool is_free_buddy_page(struct page *page);
79109diff --git a/mm/kmemleak.c b/mm/kmemleak.c
79110index 752a705..6c3102e 100644
79111--- a/mm/kmemleak.c
79112+++ b/mm/kmemleak.c
79113@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
79114
79115 for (i = 0; i < object->trace_len; i++) {
79116 void *ptr = (void *)object->trace[i];
79117- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
79118+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
79119 }
79120 }
79121
79122@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
79123 return -ENOMEM;
79124 }
79125
79126- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
79127+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
79128 &kmemleak_fops);
79129 if (!dentry)
79130 pr_warning("Failed to create the debugfs kmemleak file\n");
79131diff --git a/mm/maccess.c b/mm/maccess.c
79132index d53adf9..03a24bf 100644
79133--- a/mm/maccess.c
79134+++ b/mm/maccess.c
79135@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
79136 set_fs(KERNEL_DS);
79137 pagefault_disable();
79138 ret = __copy_from_user_inatomic(dst,
79139- (__force const void __user *)src, size);
79140+ (const void __force_user *)src, size);
79141 pagefault_enable();
79142 set_fs(old_fs);
79143
79144@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
79145
79146 set_fs(KERNEL_DS);
79147 pagefault_disable();
79148- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
79149+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
79150 pagefault_enable();
79151 set_fs(old_fs);
79152
79153diff --git a/mm/madvise.c b/mm/madvise.c
79154index 03dfa5c..b032917 100644
79155--- a/mm/madvise.c
79156+++ b/mm/madvise.c
79157@@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
79158 pgoff_t pgoff;
79159 unsigned long new_flags = vma->vm_flags;
79160
79161+#ifdef CONFIG_PAX_SEGMEXEC
79162+ struct vm_area_struct *vma_m;
79163+#endif
79164+
79165 switch (behavior) {
79166 case MADV_NORMAL:
79167 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
79168@@ -123,6 +127,13 @@ success:
79169 /*
79170 * vm_flags is protected by the mmap_sem held in write mode.
79171 */
79172+
79173+#ifdef CONFIG_PAX_SEGMEXEC
79174+ vma_m = pax_find_mirror_vma(vma);
79175+ if (vma_m)
79176+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
79177+#endif
79178+
79179 vma->vm_flags = new_flags;
79180
79181 out:
79182@@ -181,6 +192,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
79183 struct vm_area_struct ** prev,
79184 unsigned long start, unsigned long end)
79185 {
79186+
79187+#ifdef CONFIG_PAX_SEGMEXEC
79188+ struct vm_area_struct *vma_m;
79189+#endif
79190+
79191 *prev = vma;
79192 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
79193 return -EINVAL;
79194@@ -193,6 +209,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
79195 zap_page_range(vma, start, end - start, &details);
79196 } else
79197 zap_page_range(vma, start, end - start, NULL);
79198+
79199+#ifdef CONFIG_PAX_SEGMEXEC
79200+ vma_m = pax_find_mirror_vma(vma);
79201+ if (vma_m) {
79202+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
79203+ struct zap_details details = {
79204+ .nonlinear_vma = vma_m,
79205+ .last_index = ULONG_MAX,
79206+ };
79207+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
79208+ } else
79209+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
79210+ }
79211+#endif
79212+
79213 return 0;
79214 }
79215
79216@@ -397,6 +428,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
79217 if (end < start)
79218 goto out;
79219
79220+#ifdef CONFIG_PAX_SEGMEXEC
79221+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
79222+ if (end > SEGMEXEC_TASK_SIZE)
79223+ goto out;
79224+ } else
79225+#endif
79226+
79227+ if (end > TASK_SIZE)
79228+ goto out;
79229+
79230 error = 0;
79231 if (end == start)
79232 goto out;
79233diff --git a/mm/memory-failure.c b/mm/memory-failure.c
79234index c6e4dd3..1f41988 100644
79235--- a/mm/memory-failure.c
79236+++ b/mm/memory-failure.c
79237@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
79238
79239 int sysctl_memory_failure_recovery __read_mostly = 1;
79240
79241-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
79242+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
79243
79244 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
79245
79246@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
79247 pfn, t->comm, t->pid);
79248 si.si_signo = SIGBUS;
79249 si.si_errno = 0;
79250- si.si_addr = (void *)addr;
79251+ si.si_addr = (void __user *)addr;
79252 #ifdef __ARCH_SI_TRAPNO
79253 si.si_trapno = trapno;
79254 #endif
79255@@ -760,7 +760,7 @@ static struct page_state {
79256 unsigned long res;
79257 char *msg;
79258 int (*action)(struct page *p, unsigned long pfn);
79259-} error_states[] = {
79260+} __do_const error_states[] = {
79261 { reserved, reserved, "reserved kernel", me_kernel },
79262 /*
79263 * free pages are specially detected outside this table:
79264@@ -1040,7 +1040,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
79265 }
79266
79267 nr_pages = 1 << compound_trans_order(hpage);
79268- atomic_long_add(nr_pages, &mce_bad_pages);
79269+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
79270
79271 /*
79272 * We need/can do nothing about count=0 pages.
79273@@ -1070,7 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
79274 if (!PageHWPoison(hpage)
79275 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
79276 || (p != hpage && TestSetPageHWPoison(hpage))) {
79277- atomic_long_sub(nr_pages, &mce_bad_pages);
79278+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
79279 return 0;
79280 }
79281 set_page_hwpoison_huge_page(hpage);
79282@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
79283 }
79284 if (hwpoison_filter(p)) {
79285 if (TestClearPageHWPoison(p))
79286- atomic_long_sub(nr_pages, &mce_bad_pages);
79287+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
79288 unlock_page(hpage);
79289 put_page(hpage);
79290 return 0;
79291@@ -1323,7 +1323,7 @@ int unpoison_memory(unsigned long pfn)
79292 return 0;
79293 }
79294 if (TestClearPageHWPoison(p))
79295- atomic_long_sub(nr_pages, &mce_bad_pages);
79296+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
79297 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
79298 return 0;
79299 }
79300@@ -1337,7 +1337,7 @@ int unpoison_memory(unsigned long pfn)
79301 */
79302 if (TestClearPageHWPoison(page)) {
79303 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
79304- atomic_long_sub(nr_pages, &mce_bad_pages);
79305+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
79306 freeit = 1;
79307 if (PageHuge(page))
79308 clear_page_hwpoison_huge_page(page);
79309@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
79310 }
79311 done:
79312 if (!PageHWPoison(hpage))
79313- atomic_long_add(1 << compound_trans_order(hpage),
79314+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
79315 &mce_bad_pages);
79316 set_page_hwpoison_huge_page(hpage);
79317 dequeue_hwpoisoned_huge_page(hpage);
79318@@ -1583,7 +1583,7 @@ int soft_offline_page(struct page *page, int flags)
79319 return ret;
79320
79321 done:
79322- atomic_long_add(1, &mce_bad_pages);
79323+ atomic_long_add_unchecked(1, &mce_bad_pages);
79324 SetPageHWPoison(page);
79325 /* keep elevated page count for bad page */
79326 return ret;
79327diff --git a/mm/memory.c b/mm/memory.c
79328index bb1369f..efb96b5 100644
79329--- a/mm/memory.c
79330+++ b/mm/memory.c
79331@@ -433,6 +433,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
79332 free_pte_range(tlb, pmd, addr);
79333 } while (pmd++, addr = next, addr != end);
79334
79335+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
79336 start &= PUD_MASK;
79337 if (start < floor)
79338 return;
79339@@ -447,6 +448,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
79340 pmd = pmd_offset(pud, start);
79341 pud_clear(pud);
79342 pmd_free_tlb(tlb, pmd, start);
79343+#endif
79344+
79345 }
79346
79347 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
79348@@ -466,6 +469,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
79349 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
79350 } while (pud++, addr = next, addr != end);
79351
79352+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
79353 start &= PGDIR_MASK;
79354 if (start < floor)
79355 return;
79356@@ -480,6 +484,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
79357 pud = pud_offset(pgd, start);
79358 pgd_clear(pgd);
79359 pud_free_tlb(tlb, pud, start);
79360+#endif
79361+
79362 }
79363
79364 /*
79365@@ -1618,12 +1624,6 @@ no_page_table:
79366 return page;
79367 }
79368
79369-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
79370-{
79371- return stack_guard_page_start(vma, addr) ||
79372- stack_guard_page_end(vma, addr+PAGE_SIZE);
79373-}
79374-
79375 /**
79376 * __get_user_pages() - pin user pages in memory
79377 * @tsk: task_struct of target task
79378@@ -1709,10 +1709,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
79379
79380 i = 0;
79381
79382- do {
79383+ while (nr_pages) {
79384 struct vm_area_struct *vma;
79385
79386- vma = find_extend_vma(mm, start);
79387+ vma = find_vma(mm, start);
79388 if (!vma && in_gate_area(mm, start)) {
79389 unsigned long pg = start & PAGE_MASK;
79390 pgd_t *pgd;
79391@@ -1760,7 +1760,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
79392 goto next_page;
79393 }
79394
79395- if (!vma ||
79396+ if (!vma || start < vma->vm_start ||
79397 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
79398 !(vm_flags & vma->vm_flags))
79399 return i ? : -EFAULT;
79400@@ -1787,11 +1787,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
79401 int ret;
79402 unsigned int fault_flags = 0;
79403
79404- /* For mlock, just skip the stack guard page. */
79405- if (foll_flags & FOLL_MLOCK) {
79406- if (stack_guard_page(vma, start))
79407- goto next_page;
79408- }
79409 if (foll_flags & FOLL_WRITE)
79410 fault_flags |= FAULT_FLAG_WRITE;
79411 if (nonblocking)
79412@@ -1865,7 +1860,7 @@ next_page:
79413 start += PAGE_SIZE;
79414 nr_pages--;
79415 } while (nr_pages && start < vma->vm_end);
79416- } while (nr_pages);
79417+ }
79418 return i;
79419 }
79420 EXPORT_SYMBOL(__get_user_pages);
79421@@ -2072,6 +2067,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
79422 page_add_file_rmap(page);
79423 set_pte_at(mm, addr, pte, mk_pte(page, prot));
79424
79425+#ifdef CONFIG_PAX_SEGMEXEC
79426+ pax_mirror_file_pte(vma, addr, page, ptl);
79427+#endif
79428+
79429 retval = 0;
79430 pte_unmap_unlock(pte, ptl);
79431 return retval;
79432@@ -2116,9 +2115,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
79433 if (!page_count(page))
79434 return -EINVAL;
79435 if (!(vma->vm_flags & VM_MIXEDMAP)) {
79436+
79437+#ifdef CONFIG_PAX_SEGMEXEC
79438+ struct vm_area_struct *vma_m;
79439+#endif
79440+
79441 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
79442 BUG_ON(vma->vm_flags & VM_PFNMAP);
79443 vma->vm_flags |= VM_MIXEDMAP;
79444+
79445+#ifdef CONFIG_PAX_SEGMEXEC
79446+ vma_m = pax_find_mirror_vma(vma);
79447+ if (vma_m)
79448+ vma_m->vm_flags |= VM_MIXEDMAP;
79449+#endif
79450+
79451 }
79452 return insert_page(vma, addr, page, vma->vm_page_prot);
79453 }
79454@@ -2201,6 +2212,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
79455 unsigned long pfn)
79456 {
79457 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
79458+ BUG_ON(vma->vm_mirror);
79459
79460 if (addr < vma->vm_start || addr >= vma->vm_end)
79461 return -EFAULT;
79462@@ -2401,7 +2413,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
79463
79464 BUG_ON(pud_huge(*pud));
79465
79466- pmd = pmd_alloc(mm, pud, addr);
79467+ pmd = (mm == &init_mm) ?
79468+ pmd_alloc_kernel(mm, pud, addr) :
79469+ pmd_alloc(mm, pud, addr);
79470 if (!pmd)
79471 return -ENOMEM;
79472 do {
79473@@ -2421,7 +2435,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
79474 unsigned long next;
79475 int err;
79476
79477- pud = pud_alloc(mm, pgd, addr);
79478+ pud = (mm == &init_mm) ?
79479+ pud_alloc_kernel(mm, pgd, addr) :
79480+ pud_alloc(mm, pgd, addr);
79481 if (!pud)
79482 return -ENOMEM;
79483 do {
79484@@ -2509,6 +2525,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
79485 copy_user_highpage(dst, src, va, vma);
79486 }
79487
79488+#ifdef CONFIG_PAX_SEGMEXEC
79489+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
79490+{
79491+ struct mm_struct *mm = vma->vm_mm;
79492+ spinlock_t *ptl;
79493+ pte_t *pte, entry;
79494+
79495+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
79496+ entry = *pte;
79497+ if (!pte_present(entry)) {
79498+ if (!pte_none(entry)) {
79499+ BUG_ON(pte_file(entry));
79500+ free_swap_and_cache(pte_to_swp_entry(entry));
79501+ pte_clear_not_present_full(mm, address, pte, 0);
79502+ }
79503+ } else {
79504+ struct page *page;
79505+
79506+ flush_cache_page(vma, address, pte_pfn(entry));
79507+ entry = ptep_clear_flush(vma, address, pte);
79508+ BUG_ON(pte_dirty(entry));
79509+ page = vm_normal_page(vma, address, entry);
79510+ if (page) {
79511+ update_hiwater_rss(mm);
79512+ if (PageAnon(page))
79513+ dec_mm_counter_fast(mm, MM_ANONPAGES);
79514+ else
79515+ dec_mm_counter_fast(mm, MM_FILEPAGES);
79516+ page_remove_rmap(page);
79517+ page_cache_release(page);
79518+ }
79519+ }
79520+ pte_unmap_unlock(pte, ptl);
79521+}
79522+
79523+/* PaX: if vma is mirrored, synchronize the mirror's PTE
79524+ *
79525+ * the ptl of the lower mapped page is held on entry and is not released on exit
79526+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
79527+ */
79528+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
79529+{
79530+ struct mm_struct *mm = vma->vm_mm;
79531+ unsigned long address_m;
79532+ spinlock_t *ptl_m;
79533+ struct vm_area_struct *vma_m;
79534+ pmd_t *pmd_m;
79535+ pte_t *pte_m, entry_m;
79536+
79537+ BUG_ON(!page_m || !PageAnon(page_m));
79538+
79539+ vma_m = pax_find_mirror_vma(vma);
79540+ if (!vma_m)
79541+ return;
79542+
79543+ BUG_ON(!PageLocked(page_m));
79544+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
79545+ address_m = address + SEGMEXEC_TASK_SIZE;
79546+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
79547+ pte_m = pte_offset_map(pmd_m, address_m);
79548+ ptl_m = pte_lockptr(mm, pmd_m);
79549+ if (ptl != ptl_m) {
79550+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
79551+ if (!pte_none(*pte_m))
79552+ goto out;
79553+ }
79554+
79555+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
79556+ page_cache_get(page_m);
79557+ page_add_anon_rmap(page_m, vma_m, address_m);
79558+ inc_mm_counter_fast(mm, MM_ANONPAGES);
79559+ set_pte_at(mm, address_m, pte_m, entry_m);
79560+ update_mmu_cache(vma_m, address_m, entry_m);
79561+out:
79562+ if (ptl != ptl_m)
79563+ spin_unlock(ptl_m);
79564+ pte_unmap(pte_m);
79565+ unlock_page(page_m);
79566+}
79567+
79568+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
79569+{
79570+ struct mm_struct *mm = vma->vm_mm;
79571+ unsigned long address_m;
79572+ spinlock_t *ptl_m;
79573+ struct vm_area_struct *vma_m;
79574+ pmd_t *pmd_m;
79575+ pte_t *pte_m, entry_m;
79576+
79577+ BUG_ON(!page_m || PageAnon(page_m));
79578+
79579+ vma_m = pax_find_mirror_vma(vma);
79580+ if (!vma_m)
79581+ return;
79582+
79583+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
79584+ address_m = address + SEGMEXEC_TASK_SIZE;
79585+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
79586+ pte_m = pte_offset_map(pmd_m, address_m);
79587+ ptl_m = pte_lockptr(mm, pmd_m);
79588+ if (ptl != ptl_m) {
79589+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
79590+ if (!pte_none(*pte_m))
79591+ goto out;
79592+ }
79593+
79594+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
79595+ page_cache_get(page_m);
79596+ page_add_file_rmap(page_m);
79597+ inc_mm_counter_fast(mm, MM_FILEPAGES);
79598+ set_pte_at(mm, address_m, pte_m, entry_m);
79599+ update_mmu_cache(vma_m, address_m, entry_m);
79600+out:
79601+ if (ptl != ptl_m)
79602+ spin_unlock(ptl_m);
79603+ pte_unmap(pte_m);
79604+}
79605+
79606+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
79607+{
79608+ struct mm_struct *mm = vma->vm_mm;
79609+ unsigned long address_m;
79610+ spinlock_t *ptl_m;
79611+ struct vm_area_struct *vma_m;
79612+ pmd_t *pmd_m;
79613+ pte_t *pte_m, entry_m;
79614+
79615+ vma_m = pax_find_mirror_vma(vma);
79616+ if (!vma_m)
79617+ return;
79618+
79619+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
79620+ address_m = address + SEGMEXEC_TASK_SIZE;
79621+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
79622+ pte_m = pte_offset_map(pmd_m, address_m);
79623+ ptl_m = pte_lockptr(mm, pmd_m);
79624+ if (ptl != ptl_m) {
79625+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
79626+ if (!pte_none(*pte_m))
79627+ goto out;
79628+ }
79629+
79630+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
79631+ set_pte_at(mm, address_m, pte_m, entry_m);
79632+out:
79633+ if (ptl != ptl_m)
79634+ spin_unlock(ptl_m);
79635+ pte_unmap(pte_m);
79636+}
79637+
79638+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
79639+{
79640+ struct page *page_m;
79641+ pte_t entry;
79642+
79643+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
79644+ goto out;
79645+
79646+ entry = *pte;
79647+ page_m = vm_normal_page(vma, address, entry);
79648+ if (!page_m)
79649+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
79650+ else if (PageAnon(page_m)) {
79651+ if (pax_find_mirror_vma(vma)) {
79652+ pte_unmap_unlock(pte, ptl);
79653+ lock_page(page_m);
79654+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
79655+ if (pte_same(entry, *pte))
79656+ pax_mirror_anon_pte(vma, address, page_m, ptl);
79657+ else
79658+ unlock_page(page_m);
79659+ }
79660+ } else
79661+ pax_mirror_file_pte(vma, address, page_m, ptl);
79662+
79663+out:
79664+ pte_unmap_unlock(pte, ptl);
79665+}
79666+#endif
79667+
79668 /*
79669 * This routine handles present pages, when users try to write
79670 * to a shared page. It is done by copying the page to a new address
79671@@ -2725,6 +2921,12 @@ gotten:
79672 */
79673 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
79674 if (likely(pte_same(*page_table, orig_pte))) {
79675+
79676+#ifdef CONFIG_PAX_SEGMEXEC
79677+ if (pax_find_mirror_vma(vma))
79678+ BUG_ON(!trylock_page(new_page));
79679+#endif
79680+
79681 if (old_page) {
79682 if (!PageAnon(old_page)) {
79683 dec_mm_counter_fast(mm, MM_FILEPAGES);
79684@@ -2776,6 +2978,10 @@ gotten:
79685 page_remove_rmap(old_page);
79686 }
79687
79688+#ifdef CONFIG_PAX_SEGMEXEC
79689+ pax_mirror_anon_pte(vma, address, new_page, ptl);
79690+#endif
79691+
79692 /* Free the old page.. */
79693 new_page = old_page;
79694 ret |= VM_FAULT_WRITE;
79695@@ -3051,6 +3257,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
79696 swap_free(entry);
79697 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
79698 try_to_free_swap(page);
79699+
79700+#ifdef CONFIG_PAX_SEGMEXEC
79701+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
79702+#endif
79703+
79704 unlock_page(page);
79705 if (swapcache) {
79706 /*
79707@@ -3074,6 +3285,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
79708
79709 /* No need to invalidate - it was non-present before */
79710 update_mmu_cache(vma, address, page_table);
79711+
79712+#ifdef CONFIG_PAX_SEGMEXEC
79713+ pax_mirror_anon_pte(vma, address, page, ptl);
79714+#endif
79715+
79716 unlock:
79717 pte_unmap_unlock(page_table, ptl);
79718 out:
79719@@ -3093,40 +3309,6 @@ out_release:
79720 }
79721
79722 /*
79723- * This is like a special single-page "expand_{down|up}wards()",
79724- * except we must first make sure that 'address{-|+}PAGE_SIZE'
79725- * doesn't hit another vma.
79726- */
79727-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
79728-{
79729- address &= PAGE_MASK;
79730- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
79731- struct vm_area_struct *prev = vma->vm_prev;
79732-
79733- /*
79734- * Is there a mapping abutting this one below?
79735- *
79736- * That's only ok if it's the same stack mapping
79737- * that has gotten split..
79738- */
79739- if (prev && prev->vm_end == address)
79740- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
79741-
79742- expand_downwards(vma, address - PAGE_SIZE);
79743- }
79744- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
79745- struct vm_area_struct *next = vma->vm_next;
79746-
79747- /* As VM_GROWSDOWN but s/below/above/ */
79748- if (next && next->vm_start == address + PAGE_SIZE)
79749- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
79750-
79751- expand_upwards(vma, address + PAGE_SIZE);
79752- }
79753- return 0;
79754-}
79755-
79756-/*
79757 * We enter with non-exclusive mmap_sem (to exclude vma changes,
79758 * but allow concurrent faults), and pte mapped but not yet locked.
79759 * We return with mmap_sem still held, but pte unmapped and unlocked.
79760@@ -3135,27 +3317,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
79761 unsigned long address, pte_t *page_table, pmd_t *pmd,
79762 unsigned int flags)
79763 {
79764- struct page *page;
79765+ struct page *page = NULL;
79766 spinlock_t *ptl;
79767 pte_t entry;
79768
79769- pte_unmap(page_table);
79770-
79771- /* Check if we need to add a guard page to the stack */
79772- if (check_stack_guard_page(vma, address) < 0)
79773- return VM_FAULT_SIGBUS;
79774-
79775- /* Use the zero-page for reads */
79776 if (!(flags & FAULT_FLAG_WRITE)) {
79777 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
79778 vma->vm_page_prot));
79779- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
79780+ ptl = pte_lockptr(mm, pmd);
79781+ spin_lock(ptl);
79782 if (!pte_none(*page_table))
79783 goto unlock;
79784 goto setpte;
79785 }
79786
79787 /* Allocate our own private page. */
79788+ pte_unmap(page_table);
79789+
79790 if (unlikely(anon_vma_prepare(vma)))
79791 goto oom;
79792 page = alloc_zeroed_user_highpage_movable(vma, address);
79793@@ -3174,6 +3352,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
79794 if (!pte_none(*page_table))
79795 goto release;
79796
79797+#ifdef CONFIG_PAX_SEGMEXEC
79798+ if (pax_find_mirror_vma(vma))
79799+ BUG_ON(!trylock_page(page));
79800+#endif
79801+
79802 inc_mm_counter_fast(mm, MM_ANONPAGES);
79803 page_add_new_anon_rmap(page, vma, address);
79804 setpte:
79805@@ -3181,6 +3364,12 @@ setpte:
79806
79807 /* No need to invalidate - it was non-present before */
79808 update_mmu_cache(vma, address, page_table);
79809+
79810+#ifdef CONFIG_PAX_SEGMEXEC
79811+ if (page)
79812+ pax_mirror_anon_pte(vma, address, page, ptl);
79813+#endif
79814+
79815 unlock:
79816 pte_unmap_unlock(page_table, ptl);
79817 return 0;
79818@@ -3324,6 +3513,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79819 */
79820 /* Only go through if we didn't race with anybody else... */
79821 if (likely(pte_same(*page_table, orig_pte))) {
79822+
79823+#ifdef CONFIG_PAX_SEGMEXEC
79824+ if (anon && pax_find_mirror_vma(vma))
79825+ BUG_ON(!trylock_page(page));
79826+#endif
79827+
79828 flush_icache_page(vma, page);
79829 entry = mk_pte(page, vma->vm_page_prot);
79830 if (flags & FAULT_FLAG_WRITE)
79831@@ -3343,6 +3538,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79832
79833 /* no need to invalidate: a not-present page won't be cached */
79834 update_mmu_cache(vma, address, page_table);
79835+
79836+#ifdef CONFIG_PAX_SEGMEXEC
79837+ if (anon)
79838+ pax_mirror_anon_pte(vma, address, page, ptl);
79839+ else
79840+ pax_mirror_file_pte(vma, address, page, ptl);
79841+#endif
79842+
79843 } else {
79844 if (cow_page)
79845 mem_cgroup_uncharge_page(cow_page);
79846@@ -3664,6 +3867,12 @@ int handle_pte_fault(struct mm_struct *mm,
79847 if (flags & FAULT_FLAG_WRITE)
79848 flush_tlb_fix_spurious_fault(vma, address);
79849 }
79850+
79851+#ifdef CONFIG_PAX_SEGMEXEC
79852+ pax_mirror_pte(vma, address, pte, pmd, ptl);
79853+ return 0;
79854+#endif
79855+
79856 unlock:
79857 pte_unmap_unlock(pte, ptl);
79858 return 0;
79859@@ -3680,6 +3889,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79860 pmd_t *pmd;
79861 pte_t *pte;
79862
79863+#ifdef CONFIG_PAX_SEGMEXEC
79864+ struct vm_area_struct *vma_m;
79865+#endif
79866+
79867 __set_current_state(TASK_RUNNING);
79868
79869 count_vm_event(PGFAULT);
79870@@ -3691,6 +3904,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79871 if (unlikely(is_vm_hugetlb_page(vma)))
79872 return hugetlb_fault(mm, vma, address, flags);
79873
79874+#ifdef CONFIG_PAX_SEGMEXEC
79875+ vma_m = pax_find_mirror_vma(vma);
79876+ if (vma_m) {
79877+ unsigned long address_m;
79878+ pgd_t *pgd_m;
79879+ pud_t *pud_m;
79880+ pmd_t *pmd_m;
79881+
79882+ if (vma->vm_start > vma_m->vm_start) {
79883+ address_m = address;
79884+ address -= SEGMEXEC_TASK_SIZE;
79885+ vma = vma_m;
79886+ } else
79887+ address_m = address + SEGMEXEC_TASK_SIZE;
79888+
79889+ pgd_m = pgd_offset(mm, address_m);
79890+ pud_m = pud_alloc(mm, pgd_m, address_m);
79891+ if (!pud_m)
79892+ return VM_FAULT_OOM;
79893+ pmd_m = pmd_alloc(mm, pud_m, address_m);
79894+ if (!pmd_m)
79895+ return VM_FAULT_OOM;
79896+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
79897+ return VM_FAULT_OOM;
79898+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
79899+ }
79900+#endif
79901+
79902 retry:
79903 pgd = pgd_offset(mm, address);
79904 pud = pud_alloc(mm, pgd, address);
79905@@ -3789,6 +4030,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
79906 spin_unlock(&mm->page_table_lock);
79907 return 0;
79908 }
79909+
79910+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
79911+{
79912+ pud_t *new = pud_alloc_one(mm, address);
79913+ if (!new)
79914+ return -ENOMEM;
79915+
79916+ smp_wmb(); /* See comment in __pte_alloc */
79917+
79918+ spin_lock(&mm->page_table_lock);
79919+ if (pgd_present(*pgd)) /* Another has populated it */
79920+ pud_free(mm, new);
79921+ else
79922+ pgd_populate_kernel(mm, pgd, new);
79923+ spin_unlock(&mm->page_table_lock);
79924+ return 0;
79925+}
79926 #endif /* __PAGETABLE_PUD_FOLDED */
79927
79928 #ifndef __PAGETABLE_PMD_FOLDED
79929@@ -3819,6 +4077,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
79930 spin_unlock(&mm->page_table_lock);
79931 return 0;
79932 }
79933+
79934+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
79935+{
79936+ pmd_t *new = pmd_alloc_one(mm, address);
79937+ if (!new)
79938+ return -ENOMEM;
79939+
79940+ smp_wmb(); /* See comment in __pte_alloc */
79941+
79942+ spin_lock(&mm->page_table_lock);
79943+#ifndef __ARCH_HAS_4LEVEL_HACK
79944+ if (pud_present(*pud)) /* Another has populated it */
79945+ pmd_free(mm, new);
79946+ else
79947+ pud_populate_kernel(mm, pud, new);
79948+#else
79949+ if (pgd_present(*pud)) /* Another has populated it */
79950+ pmd_free(mm, new);
79951+ else
79952+ pgd_populate_kernel(mm, pud, new);
79953+#endif /* __ARCH_HAS_4LEVEL_HACK */
79954+ spin_unlock(&mm->page_table_lock);
79955+ return 0;
79956+}
79957 #endif /* __PAGETABLE_PMD_FOLDED */
79958
79959 int make_pages_present(unsigned long addr, unsigned long end)
79960@@ -3856,7 +4138,7 @@ static int __init gate_vma_init(void)
79961 gate_vma.vm_start = FIXADDR_USER_START;
79962 gate_vma.vm_end = FIXADDR_USER_END;
79963 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
79964- gate_vma.vm_page_prot = __P101;
79965+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
79966
79967 return 0;
79968 }
79969diff --git a/mm/mempolicy.c b/mm/mempolicy.c
79970index e2df1c1..1e31d57 100644
79971--- a/mm/mempolicy.c
79972+++ b/mm/mempolicy.c
79973@@ -721,6 +721,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
79974 unsigned long vmstart;
79975 unsigned long vmend;
79976
79977+#ifdef CONFIG_PAX_SEGMEXEC
79978+ struct vm_area_struct *vma_m;
79979+#endif
79980+
79981 vma = find_vma(mm, start);
79982 if (!vma || vma->vm_start > start)
79983 return -EFAULT;
79984@@ -757,9 +761,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
79985 if (err)
79986 goto out;
79987 }
79988+
79989 err = vma_replace_policy(vma, new_pol);
79990 if (err)
79991 goto out;
79992+
79993+#ifdef CONFIG_PAX_SEGMEXEC
79994+ vma_m = pax_find_mirror_vma(vma);
79995+ if (vma_m) {
79996+ err = vma_replace_policy(vma_m, new_pol);
79997+ if (err)
79998+ goto out;
79999+ }
80000+#endif
80001+
80002 }
80003
80004 out:
80005@@ -1216,6 +1231,17 @@ static long do_mbind(unsigned long start, unsigned long len,
80006
80007 if (end < start)
80008 return -EINVAL;
80009+
80010+#ifdef CONFIG_PAX_SEGMEXEC
80011+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
80012+ if (end > SEGMEXEC_TASK_SIZE)
80013+ return -EINVAL;
80014+ } else
80015+#endif
80016+
80017+ if (end > TASK_SIZE)
80018+ return -EINVAL;
80019+
80020 if (end == start)
80021 return 0;
80022
80023@@ -1445,8 +1471,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
80024 */
80025 tcred = __task_cred(task);
80026 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
80027- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
80028- !capable(CAP_SYS_NICE)) {
80029+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
80030 rcu_read_unlock();
80031 err = -EPERM;
80032 goto out_put;
80033@@ -1477,6 +1502,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
80034 goto out;
80035 }
80036
80037+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
80038+ if (mm != current->mm &&
80039+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
80040+ mmput(mm);
80041+ err = -EPERM;
80042+ goto out;
80043+ }
80044+#endif
80045+
80046 err = do_migrate_pages(mm, old, new,
80047 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
80048
80049diff --git a/mm/migrate.c b/mm/migrate.c
80050index 2fd8b4a..d70358f 100644
80051--- a/mm/migrate.c
80052+++ b/mm/migrate.c
80053@@ -1401,8 +1401,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
80054 */
80055 tcred = __task_cred(task);
80056 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
80057- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
80058- !capable(CAP_SYS_NICE)) {
80059+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
80060 rcu_read_unlock();
80061 err = -EPERM;
80062 goto out;
80063diff --git a/mm/mlock.c b/mm/mlock.c
80064index c9bd528..da8d069 100644
80065--- a/mm/mlock.c
80066+++ b/mm/mlock.c
80067@@ -13,6 +13,7 @@
80068 #include <linux/pagemap.h>
80069 #include <linux/mempolicy.h>
80070 #include <linux/syscalls.h>
80071+#include <linux/security.h>
80072 #include <linux/sched.h>
80073 #include <linux/export.h>
80074 #include <linux/rmap.h>
80075@@ -369,7 +370,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
80076 {
80077 unsigned long nstart, end, tmp;
80078 struct vm_area_struct * vma, * prev;
80079- int error;
80080+ int error = 0;
80081
80082 VM_BUG_ON(start & ~PAGE_MASK);
80083 VM_BUG_ON(len != PAGE_ALIGN(len));
80084@@ -378,6 +379,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
80085 return -EINVAL;
80086 if (end == start)
80087 return 0;
80088+ if (end > TASK_SIZE)
80089+ return -EINVAL;
80090+
80091 vma = find_vma(current->mm, start);
80092 if (!vma || vma->vm_start > start)
80093 return -ENOMEM;
80094@@ -389,6 +393,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
80095 for (nstart = start ; ; ) {
80096 vm_flags_t newflags;
80097
80098+#ifdef CONFIG_PAX_SEGMEXEC
80099+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
80100+ break;
80101+#endif
80102+
80103 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
80104
80105 newflags = vma->vm_flags | VM_LOCKED;
80106@@ -494,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
80107 lock_limit >>= PAGE_SHIFT;
80108
80109 /* check against resource limits */
80110+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
80111 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
80112 error = do_mlock(start, len, 1);
80113 up_write(&current->mm->mmap_sem);
80114@@ -528,6 +538,12 @@ static int do_mlockall(int flags)
80115 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
80116 vm_flags_t newflags;
80117
80118+#ifdef CONFIG_PAX_SEGMEXEC
80119+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
80120+ break;
80121+#endif
80122+
80123+ BUG_ON(vma->vm_end > TASK_SIZE);
80124 newflags = vma->vm_flags | VM_LOCKED;
80125 if (!(flags & MCL_CURRENT))
80126 newflags &= ~VM_LOCKED;
80127@@ -560,6 +576,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
80128 lock_limit >>= PAGE_SHIFT;
80129
80130 ret = -ENOMEM;
80131+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
80132 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
80133 capable(CAP_IPC_LOCK))
80134 ret = do_mlockall(flags);
80135diff --git a/mm/mmap.c b/mm/mmap.c
80136index 8832b87..7d36e4f 100644
80137--- a/mm/mmap.c
80138+++ b/mm/mmap.c
80139@@ -32,6 +32,7 @@
80140 #include <linux/khugepaged.h>
80141 #include <linux/uprobes.h>
80142 #include <linux/rbtree_augmented.h>
80143+#include <linux/random.h>
80144
80145 #include <asm/uaccess.h>
80146 #include <asm/cacheflush.h>
80147@@ -48,6 +49,16 @@
80148 #define arch_rebalance_pgtables(addr, len) (addr)
80149 #endif
80150
80151+static inline void verify_mm_writelocked(struct mm_struct *mm)
80152+{
80153+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
80154+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
80155+ up_read(&mm->mmap_sem);
80156+ BUG();
80157+ }
80158+#endif
80159+}
80160+
80161 static void unmap_region(struct mm_struct *mm,
80162 struct vm_area_struct *vma, struct vm_area_struct *prev,
80163 unsigned long start, unsigned long end);
80164@@ -67,22 +78,32 @@ static void unmap_region(struct mm_struct *mm,
80165 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
80166 *
80167 */
80168-pgprot_t protection_map[16] = {
80169+pgprot_t protection_map[16] __read_only = {
80170 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
80171 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
80172 };
80173
80174-pgprot_t vm_get_page_prot(unsigned long vm_flags)
80175+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
80176 {
80177- return __pgprot(pgprot_val(protection_map[vm_flags &
80178+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
80179 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
80180 pgprot_val(arch_vm_get_page_prot(vm_flags)));
80181+
80182+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
80183+ if (!(__supported_pte_mask & _PAGE_NX) &&
80184+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
80185+ (vm_flags & (VM_READ | VM_WRITE)))
80186+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
80187+#endif
80188+
80189+ return prot;
80190 }
80191 EXPORT_SYMBOL(vm_get_page_prot);
80192
80193 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
80194 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
80195 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
80196+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
80197 /*
80198 * Make sure vm_committed_as in one cacheline and not cacheline shared with
80199 * other variables. It can be updated by several CPUs frequently.
80200@@ -238,6 +259,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
80201 struct vm_area_struct *next = vma->vm_next;
80202
80203 might_sleep();
80204+ BUG_ON(vma->vm_mirror);
80205 if (vma->vm_ops && vma->vm_ops->close)
80206 vma->vm_ops->close(vma);
80207 if (vma->vm_file)
80208@@ -281,6 +303,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
80209 * not page aligned -Ram Gupta
80210 */
80211 rlim = rlimit(RLIMIT_DATA);
80212+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
80213 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
80214 (mm->end_data - mm->start_data) > rlim)
80215 goto out;
80216@@ -888,6 +911,12 @@ static int
80217 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
80218 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
80219 {
80220+
80221+#ifdef CONFIG_PAX_SEGMEXEC
80222+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
80223+ return 0;
80224+#endif
80225+
80226 if (is_mergeable_vma(vma, file, vm_flags) &&
80227 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
80228 if (vma->vm_pgoff == vm_pgoff)
80229@@ -907,6 +936,12 @@ static int
80230 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
80231 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
80232 {
80233+
80234+#ifdef CONFIG_PAX_SEGMEXEC
80235+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
80236+ return 0;
80237+#endif
80238+
80239 if (is_mergeable_vma(vma, file, vm_flags) &&
80240 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
80241 pgoff_t vm_pglen;
80242@@ -949,13 +984,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
80243 struct vm_area_struct *vma_merge(struct mm_struct *mm,
80244 struct vm_area_struct *prev, unsigned long addr,
80245 unsigned long end, unsigned long vm_flags,
80246- struct anon_vma *anon_vma, struct file *file,
80247+ struct anon_vma *anon_vma, struct file *file,
80248 pgoff_t pgoff, struct mempolicy *policy)
80249 {
80250 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
80251 struct vm_area_struct *area, *next;
80252 int err;
80253
80254+#ifdef CONFIG_PAX_SEGMEXEC
80255+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
80256+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
80257+
80258+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
80259+#endif
80260+
80261 /*
80262 * We later require that vma->vm_flags == vm_flags,
80263 * so this tests vma->vm_flags & VM_SPECIAL, too.
80264@@ -971,6 +1013,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
80265 if (next && next->vm_end == end) /* cases 6, 7, 8 */
80266 next = next->vm_next;
80267
80268+#ifdef CONFIG_PAX_SEGMEXEC
80269+ if (prev)
80270+ prev_m = pax_find_mirror_vma(prev);
80271+ if (area)
80272+ area_m = pax_find_mirror_vma(area);
80273+ if (next)
80274+ next_m = pax_find_mirror_vma(next);
80275+#endif
80276+
80277 /*
80278 * Can it merge with the predecessor?
80279 */
80280@@ -990,9 +1041,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
80281 /* cases 1, 6 */
80282 err = vma_adjust(prev, prev->vm_start,
80283 next->vm_end, prev->vm_pgoff, NULL);
80284- } else /* cases 2, 5, 7 */
80285+
80286+#ifdef CONFIG_PAX_SEGMEXEC
80287+ if (!err && prev_m)
80288+ err = vma_adjust(prev_m, prev_m->vm_start,
80289+ next_m->vm_end, prev_m->vm_pgoff, NULL);
80290+#endif
80291+
80292+ } else { /* cases 2, 5, 7 */
80293 err = vma_adjust(prev, prev->vm_start,
80294 end, prev->vm_pgoff, NULL);
80295+
80296+#ifdef CONFIG_PAX_SEGMEXEC
80297+ if (!err && prev_m)
80298+ err = vma_adjust(prev_m, prev_m->vm_start,
80299+ end_m, prev_m->vm_pgoff, NULL);
80300+#endif
80301+
80302+ }
80303 if (err)
80304 return NULL;
80305 khugepaged_enter_vma_merge(prev);
80306@@ -1006,12 +1072,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
80307 mpol_equal(policy, vma_policy(next)) &&
80308 can_vma_merge_before(next, vm_flags,
80309 anon_vma, file, pgoff+pglen)) {
80310- if (prev && addr < prev->vm_end) /* case 4 */
80311+ if (prev && addr < prev->vm_end) { /* case 4 */
80312 err = vma_adjust(prev, prev->vm_start,
80313 addr, prev->vm_pgoff, NULL);
80314- else /* cases 3, 8 */
80315+
80316+#ifdef CONFIG_PAX_SEGMEXEC
80317+ if (!err && prev_m)
80318+ err = vma_adjust(prev_m, prev_m->vm_start,
80319+ addr_m, prev_m->vm_pgoff, NULL);
80320+#endif
80321+
80322+ } else { /* cases 3, 8 */
80323 err = vma_adjust(area, addr, next->vm_end,
80324 next->vm_pgoff - pglen, NULL);
80325+
80326+#ifdef CONFIG_PAX_SEGMEXEC
80327+ if (!err && area_m)
80328+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
80329+ next_m->vm_pgoff - pglen, NULL);
80330+#endif
80331+
80332+ }
80333 if (err)
80334 return NULL;
80335 khugepaged_enter_vma_merge(area);
80336@@ -1120,16 +1201,13 @@ none:
80337 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
80338 struct file *file, long pages)
80339 {
80340- const unsigned long stack_flags
80341- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
80342-
80343 mm->total_vm += pages;
80344
80345 if (file) {
80346 mm->shared_vm += pages;
80347 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
80348 mm->exec_vm += pages;
80349- } else if (flags & stack_flags)
80350+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
80351 mm->stack_vm += pages;
80352 }
80353 #endif /* CONFIG_PROC_FS */
80354@@ -1165,7 +1243,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
80355 * (the exception is when the underlying filesystem is noexec
80356 * mounted, in which case we dont add PROT_EXEC.)
80357 */
80358- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
80359+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
80360 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
80361 prot |= PROT_EXEC;
80362
80363@@ -1191,7 +1269,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
80364 /* Obtain the address to map to. we verify (or select) it and ensure
80365 * that it represents a valid section of the address space.
80366 */
80367- addr = get_unmapped_area(file, addr, len, pgoff, flags);
80368+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
80369 if (addr & ~PAGE_MASK)
80370 return addr;
80371
80372@@ -1202,6 +1280,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
80373 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
80374 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
80375
80376+#ifdef CONFIG_PAX_MPROTECT
80377+ if (mm->pax_flags & MF_PAX_MPROTECT) {
80378+#ifndef CONFIG_PAX_MPROTECT_COMPAT
80379+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
80380+ gr_log_rwxmmap(file);
80381+
80382+#ifdef CONFIG_PAX_EMUPLT
80383+ vm_flags &= ~VM_EXEC;
80384+#else
80385+ return -EPERM;
80386+#endif
80387+
80388+ }
80389+
80390+ if (!(vm_flags & VM_EXEC))
80391+ vm_flags &= ~VM_MAYEXEC;
80392+#else
80393+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
80394+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
80395+#endif
80396+ else
80397+ vm_flags &= ~VM_MAYWRITE;
80398+ }
80399+#endif
80400+
80401+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
80402+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
80403+ vm_flags &= ~VM_PAGEEXEC;
80404+#endif
80405+
80406 if (flags & MAP_LOCKED)
80407 if (!can_do_mlock())
80408 return -EPERM;
80409@@ -1213,6 +1321,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
80410 locked += mm->locked_vm;
80411 lock_limit = rlimit(RLIMIT_MEMLOCK);
80412 lock_limit >>= PAGE_SHIFT;
80413+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
80414 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
80415 return -EAGAIN;
80416 }
80417@@ -1279,6 +1388,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
80418 }
80419 }
80420
80421+ if (!gr_acl_handle_mmap(file, prot))
80422+ return -EACCES;
80423+
80424 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
80425 }
80426
80427@@ -1356,7 +1468,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
80428 vm_flags_t vm_flags = vma->vm_flags;
80429
80430 /* If it was private or non-writable, the write bit is already clear */
80431- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
80432+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
80433 return 0;
80434
80435 /* The backer wishes to know when pages are first written to? */
80436@@ -1405,13 +1517,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
80437 unsigned long charged = 0;
80438 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
80439
80440+#ifdef CONFIG_PAX_SEGMEXEC
80441+ struct vm_area_struct *vma_m = NULL;
80442+#endif
80443+
80444+ /*
80445+ * mm->mmap_sem is required to protect against another thread
80446+ * changing the mappings in case we sleep.
80447+ */
80448+ verify_mm_writelocked(mm);
80449+
80450 /* Clear old maps */
80451 error = -ENOMEM;
80452-munmap_back:
80453 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
80454 if (do_munmap(mm, addr, len))
80455 return -ENOMEM;
80456- goto munmap_back;
80457+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
80458 }
80459
80460 /* Check against address space limit. */
80461@@ -1460,6 +1581,16 @@ munmap_back:
80462 goto unacct_error;
80463 }
80464
80465+#ifdef CONFIG_PAX_SEGMEXEC
80466+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
80467+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
80468+ if (!vma_m) {
80469+ error = -ENOMEM;
80470+ goto free_vma;
80471+ }
80472+ }
80473+#endif
80474+
80475 vma->vm_mm = mm;
80476 vma->vm_start = addr;
80477 vma->vm_end = addr + len;
80478@@ -1484,6 +1615,13 @@ munmap_back:
80479 if (error)
80480 goto unmap_and_free_vma;
80481
80482+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
80483+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
80484+ vma->vm_flags |= VM_PAGEEXEC;
80485+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
80486+ }
80487+#endif
80488+
80489 /* Can addr have changed??
80490 *
80491 * Answer: Yes, several device drivers can do it in their
80492@@ -1522,6 +1660,11 @@ munmap_back:
80493 vma_link(mm, vma, prev, rb_link, rb_parent);
80494 file = vma->vm_file;
80495
80496+#ifdef CONFIG_PAX_SEGMEXEC
80497+ if (vma_m)
80498+ BUG_ON(pax_mirror_vma(vma_m, vma));
80499+#endif
80500+
80501 /* Once vma denies write, undo our temporary denial count */
80502 if (correct_wcount)
80503 atomic_inc(&inode->i_writecount);
80504@@ -1529,6 +1672,7 @@ out:
80505 perf_event_mmap(vma);
80506
80507 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
80508+ track_exec_limit(mm, addr, addr + len, vm_flags);
80509 if (vm_flags & VM_LOCKED) {
80510 if (!mlock_vma_pages_range(vma, addr, addr + len))
80511 mm->locked_vm += (len >> PAGE_SHIFT);
80512@@ -1550,6 +1694,12 @@ unmap_and_free_vma:
80513 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
80514 charged = 0;
80515 free_vma:
80516+
80517+#ifdef CONFIG_PAX_SEGMEXEC
80518+ if (vma_m)
80519+ kmem_cache_free(vm_area_cachep, vma_m);
80520+#endif
80521+
80522 kmem_cache_free(vm_area_cachep, vma);
80523 unacct_error:
80524 if (charged)
80525@@ -1557,6 +1707,62 @@ unacct_error:
80526 return error;
80527 }
80528
80529+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
80530+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
80531+{
80532+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
80533+ return (random32() & 0xFF) << PAGE_SHIFT;
80534+
80535+ return 0;
80536+}
80537+#endif
80538+
80539+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
80540+{
80541+ if (!vma) {
80542+#ifdef CONFIG_STACK_GROWSUP
80543+ if (addr > sysctl_heap_stack_gap)
80544+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
80545+ else
80546+ vma = find_vma(current->mm, 0);
80547+ if (vma && (vma->vm_flags & VM_GROWSUP))
80548+ return false;
80549+#endif
80550+ return true;
80551+ }
80552+
80553+ if (addr + len > vma->vm_start)
80554+ return false;
80555+
80556+ if (vma->vm_flags & VM_GROWSDOWN)
80557+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
80558+#ifdef CONFIG_STACK_GROWSUP
80559+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
80560+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
80561+#endif
80562+ else if (offset)
80563+ return offset <= vma->vm_start - addr - len;
80564+
80565+ return true;
80566+}
80567+
80568+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
80569+{
80570+ if (vma->vm_start < len)
80571+ return -ENOMEM;
80572+
80573+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
80574+ if (offset <= vma->vm_start - len)
80575+ return vma->vm_start - len - offset;
80576+ else
80577+ return -ENOMEM;
80578+ }
80579+
80580+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
80581+ return vma->vm_start - len - sysctl_heap_stack_gap;
80582+ return -ENOMEM;
80583+}
80584+
80585 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
80586 {
80587 /*
80588@@ -1776,6 +1982,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
80589 struct mm_struct *mm = current->mm;
80590 struct vm_area_struct *vma;
80591 struct vm_unmapped_area_info info;
80592+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
80593
80594 if (len > TASK_SIZE)
80595 return -ENOMEM;
80596@@ -1783,17 +1990,26 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
80597 if (flags & MAP_FIXED)
80598 return addr;
80599
80600+#ifdef CONFIG_PAX_RANDMMAP
80601+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
80602+#endif
80603+
80604 if (addr) {
80605 addr = PAGE_ALIGN(addr);
80606 vma = find_vma(mm, addr);
80607- if (TASK_SIZE - len >= addr &&
80608- (!vma || addr + len <= vma->vm_start))
80609+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
80610 return addr;
80611 }
80612
80613 info.flags = 0;
80614 info.length = len;
80615 info.low_limit = TASK_UNMAPPED_BASE;
80616+
80617+#ifdef CONFIG_PAX_RANDMMAP
80618+ if (mm->pax_flags & MF_PAX_RANDMMAP)
80619+ info.low_limit += mm->delta_mmap;
80620+#endif
80621+
80622 info.high_limit = TASK_SIZE;
80623 info.align_mask = 0;
80624 return vm_unmapped_area(&info);
80625@@ -1802,10 +2018,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
80626
80627 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
80628 {
80629+
80630+#ifdef CONFIG_PAX_SEGMEXEC
80631+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
80632+ return;
80633+#endif
80634+
80635 /*
80636 * Is this a new hole at the lowest possible address?
80637 */
80638- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
80639+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
80640 mm->free_area_cache = addr;
80641 }
80642
80643@@ -1823,6 +2045,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
80644 struct mm_struct *mm = current->mm;
80645 unsigned long addr = addr0;
80646 struct vm_unmapped_area_info info;
80647+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
80648
80649 /* requested length too big for entire address space */
80650 if (len > TASK_SIZE)
80651@@ -1831,12 +2054,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
80652 if (flags & MAP_FIXED)
80653 return addr;
80654
80655+#ifdef CONFIG_PAX_RANDMMAP
80656+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
80657+#endif
80658+
80659 /* requesting a specific address */
80660 if (addr) {
80661 addr = PAGE_ALIGN(addr);
80662 vma = find_vma(mm, addr);
80663- if (TASK_SIZE - len >= addr &&
80664- (!vma || addr + len <= vma->vm_start))
80665+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
80666 return addr;
80667 }
80668
80669@@ -1857,6 +2083,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
80670 VM_BUG_ON(addr != -ENOMEM);
80671 info.flags = 0;
80672 info.low_limit = TASK_UNMAPPED_BASE;
80673+
80674+#ifdef CONFIG_PAX_RANDMMAP
80675+ if (mm->pax_flags & MF_PAX_RANDMMAP)
80676+ info.low_limit += mm->delta_mmap;
80677+#endif
80678+
80679 info.high_limit = TASK_SIZE;
80680 addr = vm_unmapped_area(&info);
80681 }
80682@@ -1867,6 +2099,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
80683
80684 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
80685 {
80686+
80687+#ifdef CONFIG_PAX_SEGMEXEC
80688+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
80689+ return;
80690+#endif
80691+
80692 /*
80693 * Is this a new hole at the highest possible address?
80694 */
80695@@ -1874,8 +2112,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
80696 mm->free_area_cache = addr;
80697
80698 /* dont allow allocations above current base */
80699- if (mm->free_area_cache > mm->mmap_base)
80700+ if (mm->free_area_cache > mm->mmap_base) {
80701 mm->free_area_cache = mm->mmap_base;
80702+ mm->cached_hole_size = ~0UL;
80703+ }
80704 }
80705
80706 unsigned long
80707@@ -1974,6 +2214,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
80708 return vma;
80709 }
80710
80711+#ifdef CONFIG_PAX_SEGMEXEC
80712+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
80713+{
80714+ struct vm_area_struct *vma_m;
80715+
80716+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
80717+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
80718+ BUG_ON(vma->vm_mirror);
80719+ return NULL;
80720+ }
80721+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
80722+ vma_m = vma->vm_mirror;
80723+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
80724+ BUG_ON(vma->vm_file != vma_m->vm_file);
80725+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
80726+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
80727+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
80728+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
80729+ return vma_m;
80730+}
80731+#endif
80732+
80733 /*
80734 * Verify that the stack growth is acceptable and
80735 * update accounting. This is shared with both the
80736@@ -1990,6 +2252,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
80737 return -ENOMEM;
80738
80739 /* Stack limit test */
80740+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
80741 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
80742 return -ENOMEM;
80743
80744@@ -2000,6 +2263,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
80745 locked = mm->locked_vm + grow;
80746 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
80747 limit >>= PAGE_SHIFT;
80748+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
80749 if (locked > limit && !capable(CAP_IPC_LOCK))
80750 return -ENOMEM;
80751 }
80752@@ -2029,37 +2293,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
80753 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
80754 * vma is the last one with address > vma->vm_end. Have to extend vma.
80755 */
80756+#ifndef CONFIG_IA64
80757+static
80758+#endif
80759 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
80760 {
80761 int error;
80762+ bool locknext;
80763
80764 if (!(vma->vm_flags & VM_GROWSUP))
80765 return -EFAULT;
80766
80767+ /* Also guard against wrapping around to address 0. */
80768+ if (address < PAGE_ALIGN(address+1))
80769+ address = PAGE_ALIGN(address+1);
80770+ else
80771+ return -ENOMEM;
80772+
80773 /*
80774 * We must make sure the anon_vma is allocated
80775 * so that the anon_vma locking is not a noop.
80776 */
80777 if (unlikely(anon_vma_prepare(vma)))
80778 return -ENOMEM;
80779+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
80780+ if (locknext && anon_vma_prepare(vma->vm_next))
80781+ return -ENOMEM;
80782 vma_lock_anon_vma(vma);
80783+ if (locknext)
80784+ vma_lock_anon_vma(vma->vm_next);
80785
80786 /*
80787 * vma->vm_start/vm_end cannot change under us because the caller
80788 * is required to hold the mmap_sem in read mode. We need the
80789- * anon_vma lock to serialize against concurrent expand_stacks.
80790- * Also guard against wrapping around to address 0.
80791+ * anon_vma locks to serialize against concurrent expand_stacks
80792+ * and expand_upwards.
80793 */
80794- if (address < PAGE_ALIGN(address+4))
80795- address = PAGE_ALIGN(address+4);
80796- else {
80797- vma_unlock_anon_vma(vma);
80798- return -ENOMEM;
80799- }
80800 error = 0;
80801
80802 /* Somebody else might have raced and expanded it already */
80803- if (address > vma->vm_end) {
80804+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
80805+ error = -ENOMEM;
80806+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
80807 unsigned long size, grow;
80808
80809 size = address - vma->vm_start;
80810@@ -2094,6 +2369,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
80811 }
80812 }
80813 }
80814+ if (locknext)
80815+ vma_unlock_anon_vma(vma->vm_next);
80816 vma_unlock_anon_vma(vma);
80817 khugepaged_enter_vma_merge(vma);
80818 validate_mm(vma->vm_mm);
80819@@ -2108,6 +2385,8 @@ int expand_downwards(struct vm_area_struct *vma,
80820 unsigned long address)
80821 {
80822 int error;
80823+ bool lockprev = false;
80824+ struct vm_area_struct *prev;
80825
80826 /*
80827 * We must make sure the anon_vma is allocated
80828@@ -2121,6 +2400,15 @@ int expand_downwards(struct vm_area_struct *vma,
80829 if (error)
80830 return error;
80831
80832+ prev = vma->vm_prev;
80833+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
80834+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
80835+#endif
80836+ if (lockprev && anon_vma_prepare(prev))
80837+ return -ENOMEM;
80838+ if (lockprev)
80839+ vma_lock_anon_vma(prev);
80840+
80841 vma_lock_anon_vma(vma);
80842
80843 /*
80844@@ -2130,9 +2418,17 @@ int expand_downwards(struct vm_area_struct *vma,
80845 */
80846
80847 /* Somebody else might have raced and expanded it already */
80848- if (address < vma->vm_start) {
80849+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
80850+ error = -ENOMEM;
80851+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
80852 unsigned long size, grow;
80853
80854+#ifdef CONFIG_PAX_SEGMEXEC
80855+ struct vm_area_struct *vma_m;
80856+
80857+ vma_m = pax_find_mirror_vma(vma);
80858+#endif
80859+
80860 size = vma->vm_end - address;
80861 grow = (vma->vm_start - address) >> PAGE_SHIFT;
80862
80863@@ -2157,6 +2453,18 @@ int expand_downwards(struct vm_area_struct *vma,
80864 vma->vm_pgoff -= grow;
80865 anon_vma_interval_tree_post_update_vma(vma);
80866 vma_gap_update(vma);
80867+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
80868+
80869+#ifdef CONFIG_PAX_SEGMEXEC
80870+ if (vma_m) {
80871+ anon_vma_interval_tree_pre_update_vma(vma_m);
80872+ vma_m->vm_start -= grow << PAGE_SHIFT;
80873+ vma_m->vm_pgoff -= grow;
80874+ anon_vma_interval_tree_post_update_vma(vma_m);
80875+ vma_gap_update(vma_m);
80876+ }
80877+#endif
80878+
80879 spin_unlock(&vma->vm_mm->page_table_lock);
80880
80881 perf_event_mmap(vma);
80882@@ -2263,6 +2571,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
80883 do {
80884 long nrpages = vma_pages(vma);
80885
80886+#ifdef CONFIG_PAX_SEGMEXEC
80887+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
80888+ vma = remove_vma(vma);
80889+ continue;
80890+ }
80891+#endif
80892+
80893 if (vma->vm_flags & VM_ACCOUNT)
80894 nr_accounted += nrpages;
80895 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
80896@@ -2308,6 +2623,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
80897 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
80898 vma->vm_prev = NULL;
80899 do {
80900+
80901+#ifdef CONFIG_PAX_SEGMEXEC
80902+ if (vma->vm_mirror) {
80903+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
80904+ vma->vm_mirror->vm_mirror = NULL;
80905+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
80906+ vma->vm_mirror = NULL;
80907+ }
80908+#endif
80909+
80910 vma_rb_erase(vma, &mm->mm_rb);
80911 mm->map_count--;
80912 tail_vma = vma;
80913@@ -2339,14 +2664,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
80914 struct vm_area_struct *new;
80915 int err = -ENOMEM;
80916
80917+#ifdef CONFIG_PAX_SEGMEXEC
80918+ struct vm_area_struct *vma_m, *new_m = NULL;
80919+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
80920+#endif
80921+
80922 if (is_vm_hugetlb_page(vma) && (addr &
80923 ~(huge_page_mask(hstate_vma(vma)))))
80924 return -EINVAL;
80925
80926+#ifdef CONFIG_PAX_SEGMEXEC
80927+ vma_m = pax_find_mirror_vma(vma);
80928+#endif
80929+
80930 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
80931 if (!new)
80932 goto out_err;
80933
80934+#ifdef CONFIG_PAX_SEGMEXEC
80935+ if (vma_m) {
80936+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
80937+ if (!new_m) {
80938+ kmem_cache_free(vm_area_cachep, new);
80939+ goto out_err;
80940+ }
80941+ }
80942+#endif
80943+
80944 /* most fields are the same, copy all, and then fixup */
80945 *new = *vma;
80946
80947@@ -2359,6 +2703,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
80948 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
80949 }
80950
80951+#ifdef CONFIG_PAX_SEGMEXEC
80952+ if (vma_m) {
80953+ *new_m = *vma_m;
80954+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
80955+ new_m->vm_mirror = new;
80956+ new->vm_mirror = new_m;
80957+
80958+ if (new_below)
80959+ new_m->vm_end = addr_m;
80960+ else {
80961+ new_m->vm_start = addr_m;
80962+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
80963+ }
80964+ }
80965+#endif
80966+
80967 pol = mpol_dup(vma_policy(vma));
80968 if (IS_ERR(pol)) {
80969 err = PTR_ERR(pol);
80970@@ -2381,6 +2741,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
80971 else
80972 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
80973
80974+#ifdef CONFIG_PAX_SEGMEXEC
80975+ if (!err && vma_m) {
80976+ if (anon_vma_clone(new_m, vma_m))
80977+ goto out_free_mpol;
80978+
80979+ mpol_get(pol);
80980+ vma_set_policy(new_m, pol);
80981+
80982+ if (new_m->vm_file)
80983+ get_file(new_m->vm_file);
80984+
80985+ if (new_m->vm_ops && new_m->vm_ops->open)
80986+ new_m->vm_ops->open(new_m);
80987+
80988+ if (new_below)
80989+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
80990+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
80991+ else
80992+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
80993+
80994+ if (err) {
80995+ if (new_m->vm_ops && new_m->vm_ops->close)
80996+ new_m->vm_ops->close(new_m);
80997+ if (new_m->vm_file)
80998+ fput(new_m->vm_file);
80999+ mpol_put(pol);
81000+ }
81001+ }
81002+#endif
81003+
81004 /* Success. */
81005 if (!err)
81006 return 0;
81007@@ -2390,10 +2780,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
81008 new->vm_ops->close(new);
81009 if (new->vm_file)
81010 fput(new->vm_file);
81011- unlink_anon_vmas(new);
81012 out_free_mpol:
81013 mpol_put(pol);
81014 out_free_vma:
81015+
81016+#ifdef CONFIG_PAX_SEGMEXEC
81017+ if (new_m) {
81018+ unlink_anon_vmas(new_m);
81019+ kmem_cache_free(vm_area_cachep, new_m);
81020+ }
81021+#endif
81022+
81023+ unlink_anon_vmas(new);
81024 kmem_cache_free(vm_area_cachep, new);
81025 out_err:
81026 return err;
81027@@ -2406,6 +2804,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
81028 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
81029 unsigned long addr, int new_below)
81030 {
81031+
81032+#ifdef CONFIG_PAX_SEGMEXEC
81033+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
81034+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
81035+ if (mm->map_count >= sysctl_max_map_count-1)
81036+ return -ENOMEM;
81037+ } else
81038+#endif
81039+
81040 if (mm->map_count >= sysctl_max_map_count)
81041 return -ENOMEM;
81042
81043@@ -2417,11 +2824,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
81044 * work. This now handles partial unmappings.
81045 * Jeremy Fitzhardinge <jeremy@goop.org>
81046 */
81047+#ifdef CONFIG_PAX_SEGMEXEC
81048 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
81049 {
81050+ int ret = __do_munmap(mm, start, len);
81051+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
81052+ return ret;
81053+
81054+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
81055+}
81056+
81057+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
81058+#else
81059+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
81060+#endif
81061+{
81062 unsigned long end;
81063 struct vm_area_struct *vma, *prev, *last;
81064
81065+ /*
81066+ * mm->mmap_sem is required to protect against another thread
81067+ * changing the mappings in case we sleep.
81068+ */
81069+ verify_mm_writelocked(mm);
81070+
81071 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
81072 return -EINVAL;
81073
81074@@ -2496,6 +2922,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
81075 /* Fix up all other VM information */
81076 remove_vma_list(mm, vma);
81077
81078+ track_exec_limit(mm, start, end, 0UL);
81079+
81080 return 0;
81081 }
81082
81083@@ -2504,6 +2932,13 @@ int vm_munmap(unsigned long start, size_t len)
81084 int ret;
81085 struct mm_struct *mm = current->mm;
81086
81087+
81088+#ifdef CONFIG_PAX_SEGMEXEC
81089+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
81090+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
81091+ return -EINVAL;
81092+#endif
81093+
81094 down_write(&mm->mmap_sem);
81095 ret = do_munmap(mm, start, len);
81096 up_write(&mm->mmap_sem);
81097@@ -2517,16 +2952,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
81098 return vm_munmap(addr, len);
81099 }
81100
81101-static inline void verify_mm_writelocked(struct mm_struct *mm)
81102-{
81103-#ifdef CONFIG_DEBUG_VM
81104- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
81105- WARN_ON(1);
81106- up_read(&mm->mmap_sem);
81107- }
81108-#endif
81109-}
81110-
81111 /*
81112 * this is really a simplified "do_mmap". it only handles
81113 * anonymous maps. eventually we may be able to do some
81114@@ -2540,6 +2965,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
81115 struct rb_node ** rb_link, * rb_parent;
81116 pgoff_t pgoff = addr >> PAGE_SHIFT;
81117 int error;
81118+ unsigned long charged;
81119
81120 len = PAGE_ALIGN(len);
81121 if (!len)
81122@@ -2547,16 +2973,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
81123
81124 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
81125
81126+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
81127+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
81128+ flags &= ~VM_EXEC;
81129+
81130+#ifdef CONFIG_PAX_MPROTECT
81131+ if (mm->pax_flags & MF_PAX_MPROTECT)
81132+ flags &= ~VM_MAYEXEC;
81133+#endif
81134+
81135+ }
81136+#endif
81137+
81138 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
81139 if (error & ~PAGE_MASK)
81140 return error;
81141
81142+ charged = len >> PAGE_SHIFT;
81143+
81144 /*
81145 * mlock MCL_FUTURE?
81146 */
81147 if (mm->def_flags & VM_LOCKED) {
81148 unsigned long locked, lock_limit;
81149- locked = len >> PAGE_SHIFT;
81150+ locked = charged;
81151 locked += mm->locked_vm;
81152 lock_limit = rlimit(RLIMIT_MEMLOCK);
81153 lock_limit >>= PAGE_SHIFT;
81154@@ -2573,21 +3013,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
81155 /*
81156 * Clear old maps. this also does some error checking for us
81157 */
81158- munmap_back:
81159 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
81160 if (do_munmap(mm, addr, len))
81161 return -ENOMEM;
81162- goto munmap_back;
81163+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
81164 }
81165
81166 /* Check against address space limits *after* clearing old maps... */
81167- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
81168+ if (!may_expand_vm(mm, charged))
81169 return -ENOMEM;
81170
81171 if (mm->map_count > sysctl_max_map_count)
81172 return -ENOMEM;
81173
81174- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
81175+ if (security_vm_enough_memory_mm(mm, charged))
81176 return -ENOMEM;
81177
81178 /* Can we just expand an old private anonymous mapping? */
81179@@ -2601,7 +3040,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
81180 */
81181 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
81182 if (!vma) {
81183- vm_unacct_memory(len >> PAGE_SHIFT);
81184+ vm_unacct_memory(charged);
81185 return -ENOMEM;
81186 }
81187
81188@@ -2615,11 +3054,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
81189 vma_link(mm, vma, prev, rb_link, rb_parent);
81190 out:
81191 perf_event_mmap(vma);
81192- mm->total_vm += len >> PAGE_SHIFT;
81193+ mm->total_vm += charged;
81194 if (flags & VM_LOCKED) {
81195 if (!mlock_vma_pages_range(vma, addr, addr + len))
81196- mm->locked_vm += (len >> PAGE_SHIFT);
81197+ mm->locked_vm += charged;
81198 }
81199+ track_exec_limit(mm, addr, addr + len, flags);
81200 return addr;
81201 }
81202
81203@@ -2677,6 +3117,7 @@ void exit_mmap(struct mm_struct *mm)
81204 while (vma) {
81205 if (vma->vm_flags & VM_ACCOUNT)
81206 nr_accounted += vma_pages(vma);
81207+ vma->vm_mirror = NULL;
81208 vma = remove_vma(vma);
81209 }
81210 vm_unacct_memory(nr_accounted);
81211@@ -2693,6 +3134,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
81212 struct vm_area_struct *prev;
81213 struct rb_node **rb_link, *rb_parent;
81214
81215+#ifdef CONFIG_PAX_SEGMEXEC
81216+ struct vm_area_struct *vma_m = NULL;
81217+#endif
81218+
81219+ if (security_mmap_addr(vma->vm_start))
81220+ return -EPERM;
81221+
81222 /*
81223 * The vm_pgoff of a purely anonymous vma should be irrelevant
81224 * until its first write fault, when page's anon_vma and index
81225@@ -2716,7 +3164,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
81226 security_vm_enough_memory_mm(mm, vma_pages(vma)))
81227 return -ENOMEM;
81228
81229+#ifdef CONFIG_PAX_SEGMEXEC
81230+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
81231+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
81232+ if (!vma_m)
81233+ return -ENOMEM;
81234+ }
81235+#endif
81236+
81237 vma_link(mm, vma, prev, rb_link, rb_parent);
81238+
81239+#ifdef CONFIG_PAX_SEGMEXEC
81240+ if (vma_m)
81241+ BUG_ON(pax_mirror_vma(vma_m, vma));
81242+#endif
81243+
81244 return 0;
81245 }
81246
81247@@ -2736,6 +3198,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
81248 struct mempolicy *pol;
81249 bool faulted_in_anon_vma = true;
81250
81251+ BUG_ON(vma->vm_mirror);
81252+
81253 /*
81254 * If anonymous vma has not yet been faulted, update new pgoff
81255 * to match new location, to increase its chance of merging.
81256@@ -2802,6 +3266,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
81257 return NULL;
81258 }
81259
81260+#ifdef CONFIG_PAX_SEGMEXEC
81261+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
81262+{
81263+ struct vm_area_struct *prev_m;
81264+ struct rb_node **rb_link_m, *rb_parent_m;
81265+ struct mempolicy *pol_m;
81266+
81267+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
81268+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
81269+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
81270+ *vma_m = *vma;
81271+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
81272+ if (anon_vma_clone(vma_m, vma))
81273+ return -ENOMEM;
81274+ pol_m = vma_policy(vma_m);
81275+ mpol_get(pol_m);
81276+ vma_set_policy(vma_m, pol_m);
81277+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
81278+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
81279+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
81280+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
81281+ if (vma_m->vm_file)
81282+ get_file(vma_m->vm_file);
81283+ if (vma_m->vm_ops && vma_m->vm_ops->open)
81284+ vma_m->vm_ops->open(vma_m);
81285+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
81286+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
81287+ vma_m->vm_mirror = vma;
81288+ vma->vm_mirror = vma_m;
81289+ return 0;
81290+}
81291+#endif
81292+
81293 /*
81294 * Return true if the calling process may expand its vm space by the passed
81295 * number of pages
81296@@ -2813,6 +3310,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
81297
81298 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
81299
81300+#ifdef CONFIG_PAX_RANDMMAP
81301+ if (mm->pax_flags & MF_PAX_RANDMMAP)
81302+ cur -= mm->brk_gap;
81303+#endif
81304+
81305+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
81306 if (cur + npages > lim)
81307 return 0;
81308 return 1;
81309@@ -2883,6 +3386,22 @@ int install_special_mapping(struct mm_struct *mm,
81310 vma->vm_start = addr;
81311 vma->vm_end = addr + len;
81312
81313+#ifdef CONFIG_PAX_MPROTECT
81314+ if (mm->pax_flags & MF_PAX_MPROTECT) {
81315+#ifndef CONFIG_PAX_MPROTECT_COMPAT
81316+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
81317+ return -EPERM;
81318+ if (!(vm_flags & VM_EXEC))
81319+ vm_flags &= ~VM_MAYEXEC;
81320+#else
81321+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
81322+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
81323+#endif
81324+ else
81325+ vm_flags &= ~VM_MAYWRITE;
81326+ }
81327+#endif
81328+
81329 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
81330 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
81331
81332diff --git a/mm/mprotect.c b/mm/mprotect.c
81333index 94722a4..9837984 100644
81334--- a/mm/mprotect.c
81335+++ b/mm/mprotect.c
81336@@ -23,10 +23,17 @@
81337 #include <linux/mmu_notifier.h>
81338 #include <linux/migrate.h>
81339 #include <linux/perf_event.h>
81340+
81341+#ifdef CONFIG_PAX_MPROTECT
81342+#include <linux/elf.h>
81343+#include <linux/binfmts.h>
81344+#endif
81345+
81346 #include <asm/uaccess.h>
81347 #include <asm/pgtable.h>
81348 #include <asm/cacheflush.h>
81349 #include <asm/tlbflush.h>
81350+#include <asm/mmu_context.h>
81351
81352 #ifndef pgprot_modify
81353 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
81354@@ -233,6 +240,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
81355 return pages;
81356 }
81357
81358+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
81359+/* called while holding the mmap semaphor for writing except stack expansion */
81360+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
81361+{
81362+ unsigned long oldlimit, newlimit = 0UL;
81363+
81364+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
81365+ return;
81366+
81367+ spin_lock(&mm->page_table_lock);
81368+ oldlimit = mm->context.user_cs_limit;
81369+ if ((prot & VM_EXEC) && oldlimit < end)
81370+ /* USER_CS limit moved up */
81371+ newlimit = end;
81372+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
81373+ /* USER_CS limit moved down */
81374+ newlimit = start;
81375+
81376+ if (newlimit) {
81377+ mm->context.user_cs_limit = newlimit;
81378+
81379+#ifdef CONFIG_SMP
81380+ wmb();
81381+ cpus_clear(mm->context.cpu_user_cs_mask);
81382+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
81383+#endif
81384+
81385+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
81386+ }
81387+ spin_unlock(&mm->page_table_lock);
81388+ if (newlimit == end) {
81389+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
81390+
81391+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
81392+ if (is_vm_hugetlb_page(vma))
81393+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
81394+ else
81395+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
81396+ }
81397+}
81398+#endif
81399+
81400 int
81401 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
81402 unsigned long start, unsigned long end, unsigned long newflags)
81403@@ -245,11 +294,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
81404 int error;
81405 int dirty_accountable = 0;
81406
81407+#ifdef CONFIG_PAX_SEGMEXEC
81408+ struct vm_area_struct *vma_m = NULL;
81409+ unsigned long start_m, end_m;
81410+
81411+ start_m = start + SEGMEXEC_TASK_SIZE;
81412+ end_m = end + SEGMEXEC_TASK_SIZE;
81413+#endif
81414+
81415 if (newflags == oldflags) {
81416 *pprev = vma;
81417 return 0;
81418 }
81419
81420+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
81421+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
81422+
81423+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
81424+ return -ENOMEM;
81425+
81426+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
81427+ return -ENOMEM;
81428+ }
81429+
81430 /*
81431 * If we make a private mapping writable we increase our commit;
81432 * but (without finer accounting) cannot reduce our commit if we
81433@@ -266,6 +333,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
81434 }
81435 }
81436
81437+#ifdef CONFIG_PAX_SEGMEXEC
81438+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
81439+ if (start != vma->vm_start) {
81440+ error = split_vma(mm, vma, start, 1);
81441+ if (error)
81442+ goto fail;
81443+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
81444+ *pprev = (*pprev)->vm_next;
81445+ }
81446+
81447+ if (end != vma->vm_end) {
81448+ error = split_vma(mm, vma, end, 0);
81449+ if (error)
81450+ goto fail;
81451+ }
81452+
81453+ if (pax_find_mirror_vma(vma)) {
81454+ error = __do_munmap(mm, start_m, end_m - start_m);
81455+ if (error)
81456+ goto fail;
81457+ } else {
81458+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
81459+ if (!vma_m) {
81460+ error = -ENOMEM;
81461+ goto fail;
81462+ }
81463+ vma->vm_flags = newflags;
81464+ error = pax_mirror_vma(vma_m, vma);
81465+ if (error) {
81466+ vma->vm_flags = oldflags;
81467+ goto fail;
81468+ }
81469+ }
81470+ }
81471+#endif
81472+
81473 /*
81474 * First try to merge with previous and/or next vma.
81475 */
81476@@ -296,9 +399,21 @@ success:
81477 * vm_flags and vm_page_prot are protected by the mmap_sem
81478 * held in write mode.
81479 */
81480+
81481+#ifdef CONFIG_PAX_SEGMEXEC
81482+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
81483+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
81484+#endif
81485+
81486 vma->vm_flags = newflags;
81487+
81488+#ifdef CONFIG_PAX_MPROTECT
81489+ if (mm->binfmt && mm->binfmt->handle_mprotect)
81490+ mm->binfmt->handle_mprotect(vma, newflags);
81491+#endif
81492+
81493 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
81494- vm_get_page_prot(newflags));
81495+ vm_get_page_prot(vma->vm_flags));
81496
81497 if (vma_wants_writenotify(vma)) {
81498 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
81499@@ -337,6 +452,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
81500 end = start + len;
81501 if (end <= start)
81502 return -ENOMEM;
81503+
81504+#ifdef CONFIG_PAX_SEGMEXEC
81505+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
81506+ if (end > SEGMEXEC_TASK_SIZE)
81507+ return -EINVAL;
81508+ } else
81509+#endif
81510+
81511+ if (end > TASK_SIZE)
81512+ return -EINVAL;
81513+
81514 if (!arch_validate_prot(prot))
81515 return -EINVAL;
81516
81517@@ -344,7 +470,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
81518 /*
81519 * Does the application expect PROT_READ to imply PROT_EXEC:
81520 */
81521- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
81522+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
81523 prot |= PROT_EXEC;
81524
81525 vm_flags = calc_vm_prot_bits(prot);
81526@@ -376,6 +502,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
81527 if (start > vma->vm_start)
81528 prev = vma;
81529
81530+#ifdef CONFIG_PAX_MPROTECT
81531+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
81532+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
81533+#endif
81534+
81535 for (nstart = start ; ; ) {
81536 unsigned long newflags;
81537
81538@@ -386,6 +517,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
81539
81540 /* newflags >> 4 shift VM_MAY% in place of VM_% */
81541 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
81542+ if (prot & (PROT_WRITE | PROT_EXEC))
81543+ gr_log_rwxmprotect(vma->vm_file);
81544+
81545+ error = -EACCES;
81546+ goto out;
81547+ }
81548+
81549+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
81550 error = -EACCES;
81551 goto out;
81552 }
81553@@ -400,6 +539,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
81554 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
81555 if (error)
81556 goto out;
81557+
81558+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
81559+
81560 nstart = tmp;
81561
81562 if (nstart < prev->vm_end)
81563diff --git a/mm/mremap.c b/mm/mremap.c
81564index e1031e1..1f2a0a1 100644
81565--- a/mm/mremap.c
81566+++ b/mm/mremap.c
81567@@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
81568 continue;
81569 pte = ptep_get_and_clear(mm, old_addr, old_pte);
81570 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
81571+
81572+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
81573+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
81574+ pte = pte_exprotect(pte);
81575+#endif
81576+
81577 set_pte_at(mm, new_addr, new_pte, pte);
81578 }
81579
81580@@ -319,6 +325,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
81581 if (is_vm_hugetlb_page(vma))
81582 goto Einval;
81583
81584+#ifdef CONFIG_PAX_SEGMEXEC
81585+ if (pax_find_mirror_vma(vma))
81586+ goto Einval;
81587+#endif
81588+
81589 /* We can't remap across vm area boundaries */
81590 if (old_len > vma->vm_end - addr)
81591 goto Efault;
81592@@ -375,20 +386,25 @@ static unsigned long mremap_to(unsigned long addr,
81593 unsigned long ret = -EINVAL;
81594 unsigned long charged = 0;
81595 unsigned long map_flags;
81596+ unsigned long pax_task_size = TASK_SIZE;
81597
81598 if (new_addr & ~PAGE_MASK)
81599 goto out;
81600
81601- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
81602+#ifdef CONFIG_PAX_SEGMEXEC
81603+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
81604+ pax_task_size = SEGMEXEC_TASK_SIZE;
81605+#endif
81606+
81607+ pax_task_size -= PAGE_SIZE;
81608+
81609+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
81610 goto out;
81611
81612 /* Check if the location we're moving into overlaps the
81613 * old location at all, and fail if it does.
81614 */
81615- if ((new_addr <= addr) && (new_addr+new_len) > addr)
81616- goto out;
81617-
81618- if ((addr <= new_addr) && (addr+old_len) > new_addr)
81619+ if (addr + old_len > new_addr && new_addr + new_len > addr)
81620 goto out;
81621
81622 ret = do_munmap(mm, new_addr, new_len);
81623@@ -456,6 +472,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
81624 struct vm_area_struct *vma;
81625 unsigned long ret = -EINVAL;
81626 unsigned long charged = 0;
81627+ unsigned long pax_task_size = TASK_SIZE;
81628
81629 down_write(&current->mm->mmap_sem);
81630
81631@@ -476,6 +493,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
81632 if (!new_len)
81633 goto out;
81634
81635+#ifdef CONFIG_PAX_SEGMEXEC
81636+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
81637+ pax_task_size = SEGMEXEC_TASK_SIZE;
81638+#endif
81639+
81640+ pax_task_size -= PAGE_SIZE;
81641+
81642+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
81643+ old_len > pax_task_size || addr > pax_task_size-old_len)
81644+ goto out;
81645+
81646 if (flags & MREMAP_FIXED) {
81647 if (flags & MREMAP_MAYMOVE)
81648 ret = mremap_to(addr, old_len, new_addr, new_len);
81649@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
81650 addr + new_len);
81651 }
81652 ret = addr;
81653+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
81654 goto out;
81655 }
81656 }
81657@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
81658 goto out;
81659 }
81660
81661+ map_flags = vma->vm_flags;
81662 ret = move_vma(vma, addr, old_len, new_len, new_addr);
81663+ if (!(ret & ~PAGE_MASK)) {
81664+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
81665+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
81666+ }
81667 }
81668 out:
81669 if (ret & ~PAGE_MASK)
81670diff --git a/mm/nommu.c b/mm/nommu.c
81671index 79c3cac..4d357e0 100644
81672--- a/mm/nommu.c
81673+++ b/mm/nommu.c
81674@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
81675 int sysctl_overcommit_ratio = 50; /* default is 50% */
81676 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
81677 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
81678-int heap_stack_gap = 0;
81679
81680 atomic_long_t mmap_pages_allocated;
81681
81682@@ -839,15 +838,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
81683 EXPORT_SYMBOL(find_vma);
81684
81685 /*
81686- * find a VMA
81687- * - we don't extend stack VMAs under NOMMU conditions
81688- */
81689-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
81690-{
81691- return find_vma(mm, addr);
81692-}
81693-
81694-/*
81695 * expand a stack to a given address
81696 * - not supported under NOMMU conditions
81697 */
81698@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
81699
81700 /* most fields are the same, copy all, and then fixup */
81701 *new = *vma;
81702+ INIT_LIST_HEAD(&new->anon_vma_chain);
81703 *region = *vma->vm_region;
81704 new->vm_region = region;
81705
81706diff --git a/mm/page-writeback.c b/mm/page-writeback.c
81707index 0713bfb..e3774e0 100644
81708--- a/mm/page-writeback.c
81709+++ b/mm/page-writeback.c
81710@@ -1630,7 +1630,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
81711 }
81712 }
81713
81714-static struct notifier_block __cpuinitdata ratelimit_nb = {
81715+static struct notifier_block ratelimit_nb = {
81716 .notifier_call = ratelimit_handler,
81717 .next = NULL,
81718 };
81719diff --git a/mm/page_alloc.c b/mm/page_alloc.c
81720index 6a83cd3..bc2dcb6 100644
81721--- a/mm/page_alloc.c
81722+++ b/mm/page_alloc.c
81723@@ -338,7 +338,7 @@ out:
81724 * This usage means that zero-order pages may not be compound.
81725 */
81726
81727-static void free_compound_page(struct page *page)
81728+void free_compound_page(struct page *page)
81729 {
81730 __free_pages_ok(page, compound_order(page));
81731 }
81732@@ -693,6 +693,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
81733 int i;
81734 int bad = 0;
81735
81736+#ifdef CONFIG_PAX_MEMORY_SANITIZE
81737+ unsigned long index = 1UL << order;
81738+#endif
81739+
81740 trace_mm_page_free(page, order);
81741 kmemcheck_free_shadow(page, order);
81742
81743@@ -708,6 +712,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
81744 debug_check_no_obj_freed(page_address(page),
81745 PAGE_SIZE << order);
81746 }
81747+
81748+#ifdef CONFIG_PAX_MEMORY_SANITIZE
81749+ for (; index; --index)
81750+ sanitize_highpage(page + index - 1);
81751+#endif
81752+
81753 arch_free_page(page, order);
81754 kernel_map_pages(page, 1 << order, 0);
81755
81756@@ -861,8 +871,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
81757 arch_alloc_page(page, order);
81758 kernel_map_pages(page, 1 << order, 1);
81759
81760+#ifndef CONFIG_PAX_MEMORY_SANITIZE
81761 if (gfp_flags & __GFP_ZERO)
81762 prep_zero_page(page, order, gfp_flags);
81763+#endif
81764
81765 if (order && (gfp_flags & __GFP_COMP))
81766 prep_compound_page(page, order);
81767@@ -3752,7 +3764,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
81768 unsigned long pfn;
81769
81770 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
81771+#ifdef CONFIG_X86_32
81772+ /* boot failures in VMware 8 on 32bit vanilla since
81773+ this change */
81774+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
81775+#else
81776 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
81777+#endif
81778 return 1;
81779 }
81780 return 0;
81781diff --git a/mm/percpu.c b/mm/percpu.c
81782index 8c8e08f..73a5cda 100644
81783--- a/mm/percpu.c
81784+++ b/mm/percpu.c
81785@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
81786 static unsigned int pcpu_high_unit_cpu __read_mostly;
81787
81788 /* the address of the first chunk which starts with the kernel static area */
81789-void *pcpu_base_addr __read_mostly;
81790+void *pcpu_base_addr __read_only;
81791 EXPORT_SYMBOL_GPL(pcpu_base_addr);
81792
81793 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
81794diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
81795index 926b466..b23df53 100644
81796--- a/mm/process_vm_access.c
81797+++ b/mm/process_vm_access.c
81798@@ -13,6 +13,7 @@
81799 #include <linux/uio.h>
81800 #include <linux/sched.h>
81801 #include <linux/highmem.h>
81802+#include <linux/security.h>
81803 #include <linux/ptrace.h>
81804 #include <linux/slab.h>
81805 #include <linux/syscalls.h>
81806@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
81807 size_t iov_l_curr_offset = 0;
81808 ssize_t iov_len;
81809
81810+ return -ENOSYS; // PaX: until properly audited
81811+
81812 /*
81813 * Work out how many pages of struct pages we're going to need
81814 * when eventually calling get_user_pages
81815 */
81816 for (i = 0; i < riovcnt; i++) {
81817 iov_len = rvec[i].iov_len;
81818- if (iov_len > 0) {
81819- nr_pages_iov = ((unsigned long)rvec[i].iov_base
81820- + iov_len)
81821- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
81822- / PAGE_SIZE + 1;
81823- nr_pages = max(nr_pages, nr_pages_iov);
81824- }
81825+ if (iov_len <= 0)
81826+ continue;
81827+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
81828+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
81829+ nr_pages = max(nr_pages, nr_pages_iov);
81830 }
81831
81832 if (nr_pages == 0)
81833@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
81834 goto free_proc_pages;
81835 }
81836
81837+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
81838+ rc = -EPERM;
81839+ goto put_task_struct;
81840+ }
81841+
81842 mm = mm_access(task, PTRACE_MODE_ATTACH);
81843 if (!mm || IS_ERR(mm)) {
81844 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
81845diff --git a/mm/rmap.c b/mm/rmap.c
81846index 2c78f8c..9e9c624 100644
81847--- a/mm/rmap.c
81848+++ b/mm/rmap.c
81849@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
81850 struct anon_vma *anon_vma = vma->anon_vma;
81851 struct anon_vma_chain *avc;
81852
81853+#ifdef CONFIG_PAX_SEGMEXEC
81854+ struct anon_vma_chain *avc_m = NULL;
81855+#endif
81856+
81857 might_sleep();
81858 if (unlikely(!anon_vma)) {
81859 struct mm_struct *mm = vma->vm_mm;
81860@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
81861 if (!avc)
81862 goto out_enomem;
81863
81864+#ifdef CONFIG_PAX_SEGMEXEC
81865+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
81866+ if (!avc_m)
81867+ goto out_enomem_free_avc;
81868+#endif
81869+
81870 anon_vma = find_mergeable_anon_vma(vma);
81871 allocated = NULL;
81872 if (!anon_vma) {
81873@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
81874 /* page_table_lock to protect against threads */
81875 spin_lock(&mm->page_table_lock);
81876 if (likely(!vma->anon_vma)) {
81877+
81878+#ifdef CONFIG_PAX_SEGMEXEC
81879+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
81880+
81881+ if (vma_m) {
81882+ BUG_ON(vma_m->anon_vma);
81883+ vma_m->anon_vma = anon_vma;
81884+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
81885+ avc_m = NULL;
81886+ }
81887+#endif
81888+
81889 vma->anon_vma = anon_vma;
81890 anon_vma_chain_link(vma, avc, anon_vma);
81891 allocated = NULL;
81892@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
81893
81894 if (unlikely(allocated))
81895 put_anon_vma(allocated);
81896+
81897+#ifdef CONFIG_PAX_SEGMEXEC
81898+ if (unlikely(avc_m))
81899+ anon_vma_chain_free(avc_m);
81900+#endif
81901+
81902 if (unlikely(avc))
81903 anon_vma_chain_free(avc);
81904 }
81905 return 0;
81906
81907 out_enomem_free_avc:
81908+
81909+#ifdef CONFIG_PAX_SEGMEXEC
81910+ if (avc_m)
81911+ anon_vma_chain_free(avc_m);
81912+#endif
81913+
81914 anon_vma_chain_free(avc);
81915 out_enomem:
81916 return -ENOMEM;
81917@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
81918 * Attach the anon_vmas from src to dst.
81919 * Returns 0 on success, -ENOMEM on failure.
81920 */
81921-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
81922+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
81923 {
81924 struct anon_vma_chain *avc, *pavc;
81925 struct anon_vma *root = NULL;
81926@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
81927 * the corresponding VMA in the parent process is attached to.
81928 * Returns 0 on success, non-zero on failure.
81929 */
81930-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
81931+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
81932 {
81933 struct anon_vma_chain *avc;
81934 struct anon_vma *anon_vma;
81935diff --git a/mm/shmem.c b/mm/shmem.c
81936index efd0b3a..994b702 100644
81937--- a/mm/shmem.c
81938+++ b/mm/shmem.c
81939@@ -31,7 +31,7 @@
81940 #include <linux/export.h>
81941 #include <linux/swap.h>
81942
81943-static struct vfsmount *shm_mnt;
81944+struct vfsmount *shm_mnt;
81945
81946 #ifdef CONFIG_SHMEM
81947 /*
81948@@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
81949 #define BOGO_DIRENT_SIZE 20
81950
81951 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
81952-#define SHORT_SYMLINK_LEN 128
81953+#define SHORT_SYMLINK_LEN 64
81954
81955 /*
81956 * shmem_fallocate and shmem_writepage communicate via inode->i_private
81957@@ -2202,6 +2202,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
81958 static int shmem_xattr_validate(const char *name)
81959 {
81960 struct { const char *prefix; size_t len; } arr[] = {
81961+
81962+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
81963+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
81964+#endif
81965+
81966 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
81967 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
81968 };
81969@@ -2257,6 +2262,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
81970 if (err)
81971 return err;
81972
81973+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
81974+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
81975+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
81976+ return -EOPNOTSUPP;
81977+ if (size > 8)
81978+ return -EINVAL;
81979+ }
81980+#endif
81981+
81982 return simple_xattr_set(&info->xattrs, name, value, size, flags);
81983 }
81984
81985@@ -2562,8 +2576,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
81986 int err = -ENOMEM;
81987
81988 /* Round up to L1_CACHE_BYTES to resist false sharing */
81989- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
81990- L1_CACHE_BYTES), GFP_KERNEL);
81991+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
81992 if (!sbinfo)
81993 return -ENOMEM;
81994
81995diff --git a/mm/slab.c b/mm/slab.c
81996index e7667a3..b62c169 100644
81997--- a/mm/slab.c
81998+++ b/mm/slab.c
81999@@ -306,7 +306,7 @@ struct kmem_list3 {
82000 * Need this for bootstrapping a per node allocator.
82001 */
82002 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
82003-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
82004+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
82005 #define CACHE_CACHE 0
82006 #define SIZE_AC MAX_NUMNODES
82007 #define SIZE_L3 (2 * MAX_NUMNODES)
82008@@ -407,10 +407,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
82009 if ((x)->max_freeable < i) \
82010 (x)->max_freeable = i; \
82011 } while (0)
82012-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
82013-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
82014-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
82015-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
82016+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
82017+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
82018+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
82019+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
82020 #else
82021 #define STATS_INC_ACTIVE(x) do { } while (0)
82022 #define STATS_DEC_ACTIVE(x) do { } while (0)
82023@@ -518,7 +518,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
82024 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
82025 */
82026 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
82027- const struct slab *slab, void *obj)
82028+ const struct slab *slab, const void *obj)
82029 {
82030 u32 offset = (obj - slab->s_mem);
82031 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
82032@@ -539,12 +539,13 @@ EXPORT_SYMBOL(malloc_sizes);
82033 struct cache_names {
82034 char *name;
82035 char *name_dma;
82036+ char *name_usercopy;
82037 };
82038
82039 static struct cache_names __initdata cache_names[] = {
82040-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
82041+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
82042 #include <linux/kmalloc_sizes.h>
82043- {NULL,}
82044+ {NULL}
82045 #undef CACHE
82046 };
82047
82048@@ -729,6 +730,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
82049 if (unlikely(gfpflags & GFP_DMA))
82050 return csizep->cs_dmacachep;
82051 #endif
82052+
82053+#ifdef CONFIG_PAX_USERCOPY_SLABS
82054+ if (unlikely(gfpflags & GFP_USERCOPY))
82055+ return csizep->cs_usercopycachep;
82056+#endif
82057+
82058 return csizep->cs_cachep;
82059 }
82060
82061@@ -1482,7 +1489,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
82062 return notifier_from_errno(err);
82063 }
82064
82065-static struct notifier_block __cpuinitdata cpucache_notifier = {
82066+static struct notifier_block cpucache_notifier = {
82067 &cpuup_callback, NULL, 0
82068 };
82069
82070@@ -1667,12 +1674,12 @@ void __init kmem_cache_init(void)
82071 */
82072
82073 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
82074- sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
82075+ sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
82076
82077 if (INDEX_AC != INDEX_L3)
82078 sizes[INDEX_L3].cs_cachep =
82079 create_kmalloc_cache(names[INDEX_L3].name,
82080- sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
82081+ sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
82082
82083 slab_early_init = 0;
82084
82085@@ -1686,13 +1693,20 @@ void __init kmem_cache_init(void)
82086 */
82087 if (!sizes->cs_cachep)
82088 sizes->cs_cachep = create_kmalloc_cache(names->name,
82089- sizes->cs_size, ARCH_KMALLOC_FLAGS);
82090+ sizes->cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
82091
82092 #ifdef CONFIG_ZONE_DMA
82093 sizes->cs_dmacachep = create_kmalloc_cache(
82094 names->name_dma, sizes->cs_size,
82095 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
82096 #endif
82097+
82098+#ifdef CONFIG_PAX_USERCOPY_SLABS
82099+ sizes->cs_usercopycachep = create_kmalloc_cache(
82100+ names->name_usercopy, sizes->cs_size,
82101+ ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
82102+#endif
82103+
82104 sizes++;
82105 names++;
82106 }
82107@@ -4365,10 +4379,10 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
82108 }
82109 /* cpu stats */
82110 {
82111- unsigned long allochit = atomic_read(&cachep->allochit);
82112- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
82113- unsigned long freehit = atomic_read(&cachep->freehit);
82114- unsigned long freemiss = atomic_read(&cachep->freemiss);
82115+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
82116+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
82117+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
82118+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
82119
82120 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
82121 allochit, allocmiss, freehit, freemiss);
82122@@ -4600,13 +4614,71 @@ static const struct file_operations proc_slabstats_operations = {
82123 static int __init slab_proc_init(void)
82124 {
82125 #ifdef CONFIG_DEBUG_SLAB_LEAK
82126- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
82127+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
82128 #endif
82129 return 0;
82130 }
82131 module_init(slab_proc_init);
82132 #endif
82133
82134+bool is_usercopy_object(const void *ptr)
82135+{
82136+ struct page *page;
82137+ struct kmem_cache *cachep;
82138+
82139+ if (ZERO_OR_NULL_PTR(ptr))
82140+ return false;
82141+
82142+ if (!slab_is_available())
82143+ return false;
82144+
82145+ if (!virt_addr_valid(ptr))
82146+ return false;
82147+
82148+ page = virt_to_head_page(ptr);
82149+
82150+ if (!PageSlab(page))
82151+ return false;
82152+
82153+ cachep = page->slab_cache;
82154+ return cachep->flags & SLAB_USERCOPY;
82155+}
82156+
82157+#ifdef CONFIG_PAX_USERCOPY
82158+const char *check_heap_object(const void *ptr, unsigned long n)
82159+{
82160+ struct page *page;
82161+ struct kmem_cache *cachep;
82162+ struct slab *slabp;
82163+ unsigned int objnr;
82164+ unsigned long offset;
82165+
82166+ if (ZERO_OR_NULL_PTR(ptr))
82167+ return "<null>";
82168+
82169+ if (!virt_addr_valid(ptr))
82170+ return NULL;
82171+
82172+ page = virt_to_head_page(ptr);
82173+
82174+ if (!PageSlab(page))
82175+ return NULL;
82176+
82177+ cachep = page->slab_cache;
82178+ if (!(cachep->flags & SLAB_USERCOPY))
82179+ return cachep->name;
82180+
82181+ slabp = page->slab_page;
82182+ objnr = obj_to_index(cachep, slabp, ptr);
82183+ BUG_ON(objnr >= cachep->num);
82184+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
82185+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
82186+ return NULL;
82187+
82188+ return cachep->name;
82189+}
82190+#endif
82191+
82192 /**
82193 * ksize - get the actual amount of memory allocated for a given object
82194 * @objp: Pointer to the object
82195diff --git a/mm/slab.h b/mm/slab.h
82196index 34a98d6..73633d1 100644
82197--- a/mm/slab.h
82198+++ b/mm/slab.h
82199@@ -58,7 +58,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
82200
82201 /* Legal flag mask for kmem_cache_create(), for various configurations */
82202 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
82203- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
82204+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_USERCOPY)
82205
82206 #if defined(CONFIG_DEBUG_SLAB)
82207 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
82208@@ -220,6 +220,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
82209 return s;
82210
82211 page = virt_to_head_page(x);
82212+
82213+ BUG_ON(!PageSlab(page));
82214+
82215 cachep = page->slab_cache;
82216 if (slab_equal_or_root(cachep, s))
82217 return cachep;
82218diff --git a/mm/slab_common.c b/mm/slab_common.c
82219index 3f3cd97..93b0236 100644
82220--- a/mm/slab_common.c
82221+++ b/mm/slab_common.c
82222@@ -22,7 +22,7 @@
82223
82224 #include "slab.h"
82225
82226-enum slab_state slab_state;
82227+enum slab_state slab_state __read_only;
82228 LIST_HEAD(slab_caches);
82229 DEFINE_MUTEX(slab_mutex);
82230 struct kmem_cache *kmem_cache;
82231@@ -209,7 +209,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
82232
82233 err = __kmem_cache_create(s, flags);
82234 if (!err) {
82235- s->refcount = 1;
82236+ atomic_set(&s->refcount, 1);
82237 list_add(&s->list, &slab_caches);
82238 memcg_cache_list_add(memcg, s);
82239 } else {
82240@@ -255,8 +255,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
82241
82242 get_online_cpus();
82243 mutex_lock(&slab_mutex);
82244- s->refcount--;
82245- if (!s->refcount) {
82246+ if (atomic_dec_and_test(&s->refcount)) {
82247 list_del(&s->list);
82248
82249 if (!__kmem_cache_shutdown(s)) {
82250@@ -302,7 +301,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
82251 panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
82252 name, size, err);
82253
82254- s->refcount = -1; /* Exempt from merging for now */
82255+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
82256 }
82257
82258 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
82259@@ -315,7 +314,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
82260
82261 create_boot_cache(s, name, size, flags);
82262 list_add(&s->list, &slab_caches);
82263- s->refcount = 1;
82264+ atomic_set(&s->refcount, 1);
82265 return s;
82266 }
82267
82268diff --git a/mm/slob.c b/mm/slob.c
82269index a99fdf7..f5b6577 100644
82270--- a/mm/slob.c
82271+++ b/mm/slob.c
82272@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
82273 /*
82274 * Return the size of a slob block.
82275 */
82276-static slobidx_t slob_units(slob_t *s)
82277+static slobidx_t slob_units(const slob_t *s)
82278 {
82279 if (s->units > 0)
82280 return s->units;
82281@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
82282 /*
82283 * Return the next free slob block pointer after this one.
82284 */
82285-static slob_t *slob_next(slob_t *s)
82286+static slob_t *slob_next(const slob_t *s)
82287 {
82288 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
82289 slobidx_t next;
82290@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
82291 /*
82292 * Returns true if s is the last free block in its page.
82293 */
82294-static int slob_last(slob_t *s)
82295+static int slob_last(const slob_t *s)
82296 {
82297 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
82298 }
82299
82300-static void *slob_new_pages(gfp_t gfp, int order, int node)
82301+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
82302 {
82303- void *page;
82304+ struct page *page;
82305
82306 #ifdef CONFIG_NUMA
82307 if (node != NUMA_NO_NODE)
82308@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
82309 if (!page)
82310 return NULL;
82311
82312- return page_address(page);
82313+ __SetPageSlab(page);
82314+ return page;
82315 }
82316
82317-static void slob_free_pages(void *b, int order)
82318+static void slob_free_pages(struct page *sp, int order)
82319 {
82320 if (current->reclaim_state)
82321 current->reclaim_state->reclaimed_slab += 1 << order;
82322- free_pages((unsigned long)b, order);
82323+ __ClearPageSlab(sp);
82324+ reset_page_mapcount(sp);
82325+ sp->private = 0;
82326+ __free_pages(sp, order);
82327 }
82328
82329 /*
82330@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
82331
82332 /* Not enough space: must allocate a new page */
82333 if (!b) {
82334- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
82335- if (!b)
82336+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
82337+ if (!sp)
82338 return NULL;
82339- sp = virt_to_page(b);
82340- __SetPageSlab(sp);
82341+ b = page_address(sp);
82342
82343 spin_lock_irqsave(&slob_lock, flags);
82344 sp->units = SLOB_UNITS(PAGE_SIZE);
82345 sp->freelist = b;
82346+ sp->private = 0;
82347 INIT_LIST_HEAD(&sp->list);
82348 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
82349 set_slob_page_free(sp, slob_list);
82350@@ -359,9 +363,7 @@ static void slob_free(void *block, int size)
82351 if (slob_page_free(sp))
82352 clear_slob_page_free(sp);
82353 spin_unlock_irqrestore(&slob_lock, flags);
82354- __ClearPageSlab(sp);
82355- reset_page_mapcount(sp);
82356- slob_free_pages(b, 0);
82357+ slob_free_pages(sp, 0);
82358 return;
82359 }
82360
82361@@ -424,11 +426,10 @@ out:
82362 */
82363
82364 static __always_inline void *
82365-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
82366+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
82367 {
82368- unsigned int *m;
82369- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
82370- void *ret;
82371+ slob_t *m;
82372+ void *ret = NULL;
82373
82374 gfp &= gfp_allowed_mask;
82375
82376@@ -442,23 +443,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
82377
82378 if (!m)
82379 return NULL;
82380- *m = size;
82381+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
82382+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
82383+ m[0].units = size;
82384+ m[1].units = align;
82385 ret = (void *)m + align;
82386
82387 trace_kmalloc_node(caller, ret,
82388 size, size + align, gfp, node);
82389 } else {
82390 unsigned int order = get_order(size);
82391+ struct page *page;
82392
82393 if (likely(order))
82394 gfp |= __GFP_COMP;
82395- ret = slob_new_pages(gfp, order, node);
82396+ page = slob_new_pages(gfp, order, node);
82397+ if (page) {
82398+ ret = page_address(page);
82399+ page->private = size;
82400+ }
82401
82402 trace_kmalloc_node(caller, ret,
82403 size, PAGE_SIZE << order, gfp, node);
82404 }
82405
82406- kmemleak_alloc(ret, size, 1, gfp);
82407+ return ret;
82408+}
82409+
82410+static __always_inline void *
82411+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
82412+{
82413+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
82414+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
82415+
82416+ if (!ZERO_OR_NULL_PTR(ret))
82417+ kmemleak_alloc(ret, size, 1, gfp);
82418 return ret;
82419 }
82420
82421@@ -494,33 +513,110 @@ void kfree(const void *block)
82422 kmemleak_free(block);
82423
82424 sp = virt_to_page(block);
82425- if (PageSlab(sp)) {
82426+ VM_BUG_ON(!PageSlab(sp));
82427+ if (!sp->private) {
82428 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
82429- unsigned int *m = (unsigned int *)(block - align);
82430- slob_free(m, *m + align);
82431- } else
82432+ slob_t *m = (slob_t *)(block - align);
82433+ slob_free(m, m[0].units + align);
82434+ } else {
82435+ __ClearPageSlab(sp);
82436+ reset_page_mapcount(sp);
82437+ sp->private = 0;
82438 __free_pages(sp, compound_order(sp));
82439+ }
82440 }
82441 EXPORT_SYMBOL(kfree);
82442
82443+bool is_usercopy_object(const void *ptr)
82444+{
82445+ if (!slab_is_available())
82446+ return false;
82447+
82448+ // PAX: TODO
82449+
82450+ return false;
82451+}
82452+
82453+#ifdef CONFIG_PAX_USERCOPY
82454+const char *check_heap_object(const void *ptr, unsigned long n)
82455+{
82456+ struct page *page;
82457+ const slob_t *free;
82458+ const void *base;
82459+ unsigned long flags;
82460+
82461+ if (ZERO_OR_NULL_PTR(ptr))
82462+ return "<null>";
82463+
82464+ if (!virt_addr_valid(ptr))
82465+ return NULL;
82466+
82467+ page = virt_to_head_page(ptr);
82468+ if (!PageSlab(page))
82469+ return NULL;
82470+
82471+ if (page->private) {
82472+ base = page;
82473+ if (base <= ptr && n <= page->private - (ptr - base))
82474+ return NULL;
82475+ return "<slob>";
82476+ }
82477+
82478+ /* some tricky double walking to find the chunk */
82479+ spin_lock_irqsave(&slob_lock, flags);
82480+ base = (void *)((unsigned long)ptr & PAGE_MASK);
82481+ free = page->freelist;
82482+
82483+ while (!slob_last(free) && (void *)free <= ptr) {
82484+ base = free + slob_units(free);
82485+ free = slob_next(free);
82486+ }
82487+
82488+ while (base < (void *)free) {
82489+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
82490+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
82491+ int offset;
82492+
82493+ if (ptr < base + align)
82494+ break;
82495+
82496+ offset = ptr - base - align;
82497+ if (offset >= m) {
82498+ base += size;
82499+ continue;
82500+ }
82501+
82502+ if (n > m - offset)
82503+ break;
82504+
82505+ spin_unlock_irqrestore(&slob_lock, flags);
82506+ return NULL;
82507+ }
82508+
82509+ spin_unlock_irqrestore(&slob_lock, flags);
82510+ return "<slob>";
82511+}
82512+#endif
82513+
82514 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
82515 size_t ksize(const void *block)
82516 {
82517 struct page *sp;
82518 int align;
82519- unsigned int *m;
82520+ slob_t *m;
82521
82522 BUG_ON(!block);
82523 if (unlikely(block == ZERO_SIZE_PTR))
82524 return 0;
82525
82526 sp = virt_to_page(block);
82527- if (unlikely(!PageSlab(sp)))
82528- return PAGE_SIZE << compound_order(sp);
82529+ VM_BUG_ON(!PageSlab(sp));
82530+ if (sp->private)
82531+ return sp->private;
82532
82533 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
82534- m = (unsigned int *)(block - align);
82535- return SLOB_UNITS(*m) * SLOB_UNIT;
82536+ m = (slob_t *)(block - align);
82537+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
82538 }
82539 EXPORT_SYMBOL(ksize);
82540
82541@@ -536,23 +632,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
82542
82543 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
82544 {
82545- void *b;
82546+ void *b = NULL;
82547
82548 flags &= gfp_allowed_mask;
82549
82550 lockdep_trace_alloc(flags);
82551
82552+#ifdef CONFIG_PAX_USERCOPY_SLABS
82553+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
82554+#else
82555 if (c->size < PAGE_SIZE) {
82556 b = slob_alloc(c->size, flags, c->align, node);
82557 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
82558 SLOB_UNITS(c->size) * SLOB_UNIT,
82559 flags, node);
82560 } else {
82561- b = slob_new_pages(flags, get_order(c->size), node);
82562+ struct page *sp;
82563+
82564+ sp = slob_new_pages(flags, get_order(c->size), node);
82565+ if (sp) {
82566+ b = page_address(sp);
82567+ sp->private = c->size;
82568+ }
82569 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
82570 PAGE_SIZE << get_order(c->size),
82571 flags, node);
82572 }
82573+#endif
82574
82575 if (c->ctor)
82576 c->ctor(b);
82577@@ -564,10 +670,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
82578
82579 static void __kmem_cache_free(void *b, int size)
82580 {
82581- if (size < PAGE_SIZE)
82582+ struct page *sp;
82583+
82584+ sp = virt_to_page(b);
82585+ BUG_ON(!PageSlab(sp));
82586+ if (!sp->private)
82587 slob_free(b, size);
82588 else
82589- slob_free_pages(b, get_order(size));
82590+ slob_free_pages(sp, get_order(size));
82591 }
82592
82593 static void kmem_rcu_free(struct rcu_head *head)
82594@@ -580,17 +690,31 @@ static void kmem_rcu_free(struct rcu_head *head)
82595
82596 void kmem_cache_free(struct kmem_cache *c, void *b)
82597 {
82598+ int size = c->size;
82599+
82600+#ifdef CONFIG_PAX_USERCOPY_SLABS
82601+ if (size + c->align < PAGE_SIZE) {
82602+ size += c->align;
82603+ b -= c->align;
82604+ }
82605+#endif
82606+
82607 kmemleak_free_recursive(b, c->flags);
82608 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
82609 struct slob_rcu *slob_rcu;
82610- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
82611- slob_rcu->size = c->size;
82612+ slob_rcu = b + (size - sizeof(struct slob_rcu));
82613+ slob_rcu->size = size;
82614 call_rcu(&slob_rcu->head, kmem_rcu_free);
82615 } else {
82616- __kmem_cache_free(b, c->size);
82617+ __kmem_cache_free(b, size);
82618 }
82619
82620+#ifdef CONFIG_PAX_USERCOPY_SLABS
82621+ trace_kfree(_RET_IP_, b);
82622+#else
82623 trace_kmem_cache_free(_RET_IP_, b);
82624+#endif
82625+
82626 }
82627 EXPORT_SYMBOL(kmem_cache_free);
82628
82629diff --git a/mm/slub.c b/mm/slub.c
82630index ba2ca53..00b1f4e 100644
82631--- a/mm/slub.c
82632+++ b/mm/slub.c
82633@@ -197,7 +197,7 @@ struct track {
82634
82635 enum track_item { TRACK_ALLOC, TRACK_FREE };
82636
82637-#ifdef CONFIG_SYSFS
82638+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
82639 static int sysfs_slab_add(struct kmem_cache *);
82640 static int sysfs_slab_alias(struct kmem_cache *, const char *);
82641 static void sysfs_slab_remove(struct kmem_cache *);
82642@@ -518,7 +518,7 @@ static void print_track(const char *s, struct track *t)
82643 if (!t->addr)
82644 return;
82645
82646- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
82647+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
82648 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
82649 #ifdef CONFIG_STACKTRACE
82650 {
82651@@ -2653,7 +2653,7 @@ static int slub_min_objects;
82652 * Merge control. If this is set then no merging of slab caches will occur.
82653 * (Could be removed. This was introduced to pacify the merge skeptics.)
82654 */
82655-static int slub_nomerge;
82656+static int slub_nomerge = 1;
82657
82658 /*
82659 * Calculate the order of allocation given an slab object size.
82660@@ -3181,6 +3181,10 @@ EXPORT_SYMBOL(kmalloc_caches);
82661 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
82662 #endif
82663
82664+#ifdef CONFIG_PAX_USERCOPY_SLABS
82665+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
82666+#endif
82667+
82668 static int __init setup_slub_min_order(char *str)
82669 {
82670 get_option(&str, &slub_min_order);
82671@@ -3272,6 +3276,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
82672 return kmalloc_dma_caches[index];
82673
82674 #endif
82675+
82676+#ifdef CONFIG_PAX_USERCOPY_SLABS
82677+ if (flags & SLAB_USERCOPY)
82678+ return kmalloc_usercopy_caches[index];
82679+
82680+#endif
82681+
82682 return kmalloc_caches[index];
82683 }
82684
82685@@ -3340,6 +3351,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
82686 EXPORT_SYMBOL(__kmalloc_node);
82687 #endif
82688
82689+bool is_usercopy_object(const void *ptr)
82690+{
82691+ struct page *page;
82692+ struct kmem_cache *s;
82693+
82694+ if (ZERO_OR_NULL_PTR(ptr))
82695+ return false;
82696+
82697+ if (!slab_is_available())
82698+ return false;
82699+
82700+ if (!virt_addr_valid(ptr))
82701+ return false;
82702+
82703+ page = virt_to_head_page(ptr);
82704+
82705+ if (!PageSlab(page))
82706+ return false;
82707+
82708+ s = page->slab_cache;
82709+ return s->flags & SLAB_USERCOPY;
82710+}
82711+
82712+#ifdef CONFIG_PAX_USERCOPY
82713+const char *check_heap_object(const void *ptr, unsigned long n)
82714+{
82715+ struct page *page;
82716+ struct kmem_cache *s;
82717+ unsigned long offset;
82718+
82719+ if (ZERO_OR_NULL_PTR(ptr))
82720+ return "<null>";
82721+
82722+ if (!virt_addr_valid(ptr))
82723+ return NULL;
82724+
82725+ page = virt_to_head_page(ptr);
82726+
82727+ if (!PageSlab(page))
82728+ return NULL;
82729+
82730+ s = page->slab_cache;
82731+ if (!(s->flags & SLAB_USERCOPY))
82732+ return s->name;
82733+
82734+ offset = (ptr - page_address(page)) % s->size;
82735+ if (offset <= s->object_size && n <= s->object_size - offset)
82736+ return NULL;
82737+
82738+ return s->name;
82739+}
82740+#endif
82741+
82742 size_t ksize(const void *object)
82743 {
82744 struct page *page;
82745@@ -3712,17 +3776,17 @@ void __init kmem_cache_init(void)
82746
82747 /* Caches that are not of the two-to-the-power-of size */
82748 if (KMALLOC_MIN_SIZE <= 32) {
82749- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
82750+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
82751 caches++;
82752 }
82753
82754 if (KMALLOC_MIN_SIZE <= 64) {
82755- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
82756+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
82757 caches++;
82758 }
82759
82760 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
82761- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
82762+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
82763 caches++;
82764 }
82765
82766@@ -3764,6 +3828,22 @@ void __init kmem_cache_init(void)
82767 }
82768 }
82769 #endif
82770+
82771+#ifdef CONFIG_PAX_USERCOPY_SLABS
82772+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
82773+ struct kmem_cache *s = kmalloc_caches[i];
82774+
82775+ if (s && s->size) {
82776+ char *name = kasprintf(GFP_NOWAIT,
82777+ "usercopy-kmalloc-%d", s->object_size);
82778+
82779+ BUG_ON(!name);
82780+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
82781+ s->object_size, SLAB_USERCOPY);
82782+ }
82783+ }
82784+#endif
82785+
82786 printk(KERN_INFO
82787 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
82788 " CPUs=%d, Nodes=%d\n",
82789@@ -3790,7 +3870,7 @@ static int slab_unmergeable(struct kmem_cache *s)
82790 /*
82791 * We may have set a slab to be unmergeable during bootstrap.
82792 */
82793- if (s->refcount < 0)
82794+ if (atomic_read(&s->refcount) < 0)
82795 return 1;
82796
82797 return 0;
82798@@ -3848,7 +3928,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
82799
82800 s = find_mergeable(memcg, size, align, flags, name, ctor);
82801 if (s) {
82802- s->refcount++;
82803+ atomic_inc(&s->refcount);
82804 /*
82805 * Adjust the object sizes so that we clear
82806 * the complete object on kzalloc.
82807@@ -3857,7 +3937,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
82808 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
82809
82810 if (sysfs_slab_alias(s, name)) {
82811- s->refcount--;
82812+ atomic_dec(&s->refcount);
82813 s = NULL;
82814 }
82815 }
82816@@ -3919,7 +3999,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
82817 return NOTIFY_OK;
82818 }
82819
82820-static struct notifier_block __cpuinitdata slab_notifier = {
82821+static struct notifier_block slab_notifier = {
82822 .notifier_call = slab_cpuup_callback
82823 };
82824
82825@@ -3977,7 +4057,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
82826 }
82827 #endif
82828
82829-#ifdef CONFIG_SYSFS
82830+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
82831 static int count_inuse(struct page *page)
82832 {
82833 return page->inuse;
82834@@ -4364,12 +4444,12 @@ static void resiliency_test(void)
82835 validate_slab_cache(kmalloc_caches[9]);
82836 }
82837 #else
82838-#ifdef CONFIG_SYSFS
82839+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
82840 static void resiliency_test(void) {};
82841 #endif
82842 #endif
82843
82844-#ifdef CONFIG_SYSFS
82845+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
82846 enum slab_stat_type {
82847 SL_ALL, /* All slabs */
82848 SL_PARTIAL, /* Only partially allocated slabs */
82849@@ -4613,7 +4693,7 @@ SLAB_ATTR_RO(ctor);
82850
82851 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
82852 {
82853- return sprintf(buf, "%d\n", s->refcount - 1);
82854+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
82855 }
82856 SLAB_ATTR_RO(aliases);
82857
82858@@ -5266,6 +5346,7 @@ static char *create_unique_id(struct kmem_cache *s)
82859 return name;
82860 }
82861
82862+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
82863 static int sysfs_slab_add(struct kmem_cache *s)
82864 {
82865 int err;
82866@@ -5323,6 +5404,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
82867 kobject_del(&s->kobj);
82868 kobject_put(&s->kobj);
82869 }
82870+#endif
82871
82872 /*
82873 * Need to buffer aliases during bootup until sysfs becomes
82874@@ -5336,6 +5418,7 @@ struct saved_alias {
82875
82876 static struct saved_alias *alias_list;
82877
82878+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
82879 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
82880 {
82881 struct saved_alias *al;
82882@@ -5358,6 +5441,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
82883 alias_list = al;
82884 return 0;
82885 }
82886+#endif
82887
82888 static int __init slab_sysfs_init(void)
82889 {
82890diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
82891index 1b7e22a..3fcd4f3 100644
82892--- a/mm/sparse-vmemmap.c
82893+++ b/mm/sparse-vmemmap.c
82894@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
82895 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
82896 if (!p)
82897 return NULL;
82898- pud_populate(&init_mm, pud, p);
82899+ pud_populate_kernel(&init_mm, pud, p);
82900 }
82901 return pud;
82902 }
82903@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
82904 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
82905 if (!p)
82906 return NULL;
82907- pgd_populate(&init_mm, pgd, p);
82908+ pgd_populate_kernel(&init_mm, pgd, p);
82909 }
82910 return pgd;
82911 }
82912diff --git a/mm/sparse.c b/mm/sparse.c
82913index 6b5fb76..db0c190 100644
82914--- a/mm/sparse.c
82915+++ b/mm/sparse.c
82916@@ -782,7 +782,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
82917
82918 for (i = 0; i < PAGES_PER_SECTION; i++) {
82919 if (PageHWPoison(&memmap[i])) {
82920- atomic_long_sub(1, &mce_bad_pages);
82921+ atomic_long_sub_unchecked(1, &mce_bad_pages);
82922 ClearPageHWPoison(&memmap[i]);
82923 }
82924 }
82925diff --git a/mm/swap.c b/mm/swap.c
82926index 6310dc2..3662b3f 100644
82927--- a/mm/swap.c
82928+++ b/mm/swap.c
82929@@ -30,6 +30,7 @@
82930 #include <linux/backing-dev.h>
82931 #include <linux/memcontrol.h>
82932 #include <linux/gfp.h>
82933+#include <linux/hugetlb.h>
82934
82935 #include "internal.h"
82936
82937@@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
82938
82939 __page_cache_release(page);
82940 dtor = get_compound_page_dtor(page);
82941+ if (!PageHuge(page))
82942+ BUG_ON(dtor != free_compound_page);
82943 (*dtor)(page);
82944 }
82945
82946diff --git a/mm/swapfile.c b/mm/swapfile.c
82947index e97a0e5..b50e796 100644
82948--- a/mm/swapfile.c
82949+++ b/mm/swapfile.c
82950@@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
82951
82952 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
82953 /* Activity counter to indicate that a swapon or swapoff has occurred */
82954-static atomic_t proc_poll_event = ATOMIC_INIT(0);
82955+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
82956
82957 static inline unsigned char swap_count(unsigned char ent)
82958 {
82959@@ -1608,7 +1608,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
82960 }
82961 filp_close(swap_file, NULL);
82962 err = 0;
82963- atomic_inc(&proc_poll_event);
82964+ atomic_inc_unchecked(&proc_poll_event);
82965 wake_up_interruptible(&proc_poll_wait);
82966
82967 out_dput:
82968@@ -1625,8 +1625,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
82969
82970 poll_wait(file, &proc_poll_wait, wait);
82971
82972- if (seq->poll_event != atomic_read(&proc_poll_event)) {
82973- seq->poll_event = atomic_read(&proc_poll_event);
82974+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
82975+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
82976 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
82977 }
82978
82979@@ -1724,7 +1724,7 @@ static int swaps_open(struct inode *inode, struct file *file)
82980 return ret;
82981
82982 seq = file->private_data;
82983- seq->poll_event = atomic_read(&proc_poll_event);
82984+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
82985 return 0;
82986 }
82987
82988@@ -2066,7 +2066,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
82989 (frontswap_map) ? "FS" : "");
82990
82991 mutex_unlock(&swapon_mutex);
82992- atomic_inc(&proc_poll_event);
82993+ atomic_inc_unchecked(&proc_poll_event);
82994 wake_up_interruptible(&proc_poll_wait);
82995
82996 if (S_ISREG(inode->i_mode))
82997diff --git a/mm/util.c b/mm/util.c
82998index c55e26b..3f913a9 100644
82999--- a/mm/util.c
83000+++ b/mm/util.c
83001@@ -292,6 +292,12 @@ done:
83002 void arch_pick_mmap_layout(struct mm_struct *mm)
83003 {
83004 mm->mmap_base = TASK_UNMAPPED_BASE;
83005+
83006+#ifdef CONFIG_PAX_RANDMMAP
83007+ if (mm->pax_flags & MF_PAX_RANDMMAP)
83008+ mm->mmap_base += mm->delta_mmap;
83009+#endif
83010+
83011 mm->get_unmapped_area = arch_get_unmapped_area;
83012 mm->unmap_area = arch_unmap_area;
83013 }
83014diff --git a/mm/vmalloc.c b/mm/vmalloc.c
83015index 5123a16..f234a48 100644
83016--- a/mm/vmalloc.c
83017+++ b/mm/vmalloc.c
83018@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
83019
83020 pte = pte_offset_kernel(pmd, addr);
83021 do {
83022- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
83023- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
83024+
83025+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
83026+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
83027+ BUG_ON(!pte_exec(*pte));
83028+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
83029+ continue;
83030+ }
83031+#endif
83032+
83033+ {
83034+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
83035+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
83036+ }
83037 } while (pte++, addr += PAGE_SIZE, addr != end);
83038 }
83039
83040@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
83041 pte = pte_alloc_kernel(pmd, addr);
83042 if (!pte)
83043 return -ENOMEM;
83044+
83045+ pax_open_kernel();
83046 do {
83047 struct page *page = pages[*nr];
83048
83049- if (WARN_ON(!pte_none(*pte)))
83050+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
83051+ if (pgprot_val(prot) & _PAGE_NX)
83052+#endif
83053+
83054+ if (!pte_none(*pte)) {
83055+ pax_close_kernel();
83056+ WARN_ON(1);
83057 return -EBUSY;
83058- if (WARN_ON(!page))
83059+ }
83060+ if (!page) {
83061+ pax_close_kernel();
83062+ WARN_ON(1);
83063 return -ENOMEM;
83064+ }
83065 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
83066 (*nr)++;
83067 } while (pte++, addr += PAGE_SIZE, addr != end);
83068+ pax_close_kernel();
83069 return 0;
83070 }
83071
83072@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
83073 pmd_t *pmd;
83074 unsigned long next;
83075
83076- pmd = pmd_alloc(&init_mm, pud, addr);
83077+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
83078 if (!pmd)
83079 return -ENOMEM;
83080 do {
83081@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
83082 pud_t *pud;
83083 unsigned long next;
83084
83085- pud = pud_alloc(&init_mm, pgd, addr);
83086+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
83087 if (!pud)
83088 return -ENOMEM;
83089 do {
83090@@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
83091 * and fall back on vmalloc() if that fails. Others
83092 * just put it in the vmalloc space.
83093 */
83094-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
83095+#ifdef CONFIG_MODULES
83096+#ifdef MODULES_VADDR
83097 unsigned long addr = (unsigned long)x;
83098 if (addr >= MODULES_VADDR && addr < MODULES_END)
83099 return 1;
83100 #endif
83101+
83102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
83103+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
83104+ return 1;
83105+#endif
83106+
83107+#endif
83108+
83109 return is_vmalloc_addr(x);
83110 }
83111
83112@@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
83113
83114 if (!pgd_none(*pgd)) {
83115 pud_t *pud = pud_offset(pgd, addr);
83116+#ifdef CONFIG_X86
83117+ if (!pud_large(*pud))
83118+#endif
83119 if (!pud_none(*pud)) {
83120 pmd_t *pmd = pmd_offset(pud, addr);
83121+#ifdef CONFIG_X86
83122+ if (!pmd_large(*pmd))
83123+#endif
83124 if (!pmd_none(*pmd)) {
83125 pte_t *ptep, pte;
83126
83127@@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
83128 * Allocate a region of KVA of the specified size and alignment, within the
83129 * vstart and vend.
83130 */
83131-static struct vmap_area *alloc_vmap_area(unsigned long size,
83132+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
83133 unsigned long align,
83134 unsigned long vstart, unsigned long vend,
83135 int node, gfp_t gfp_mask)
83136@@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
83137 struct vm_struct *area;
83138
83139 BUG_ON(in_interrupt());
83140+
83141+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
83142+ if (flags & VM_KERNEXEC) {
83143+ if (start != VMALLOC_START || end != VMALLOC_END)
83144+ return NULL;
83145+ start = (unsigned long)MODULES_EXEC_VADDR;
83146+ end = (unsigned long)MODULES_EXEC_END;
83147+ }
83148+#endif
83149+
83150 if (flags & VM_IOREMAP) {
83151 int bit = fls(size);
83152
83153@@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
83154 if (count > totalram_pages)
83155 return NULL;
83156
83157+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
83158+ if (!(pgprot_val(prot) & _PAGE_NX))
83159+ flags |= VM_KERNEXEC;
83160+#endif
83161+
83162 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
83163 __builtin_return_address(0));
83164 if (!area)
83165@@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
83166 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
83167 goto fail;
83168
83169+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
83170+ if (!(pgprot_val(prot) & _PAGE_NX))
83171+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
83172+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
83173+ else
83174+#endif
83175+
83176 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
83177 start, end, node, gfp_mask, caller);
83178 if (!area)
83179@@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
83180 * For tight control over page level allocator and protection flags
83181 * use __vmalloc() instead.
83182 */
83183-
83184 void *vmalloc_exec(unsigned long size)
83185 {
83186- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
83187+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
83188 -1, __builtin_return_address(0));
83189 }
83190
83191@@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
83192 unsigned long uaddr = vma->vm_start;
83193 unsigned long usize = vma->vm_end - vma->vm_start;
83194
83195+ BUG_ON(vma->vm_mirror);
83196+
83197 if ((PAGE_SIZE-1) & (unsigned long)addr)
83198 return -EINVAL;
83199
83200@@ -2575,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
83201 v->addr, v->addr + v->size, v->size);
83202
83203 if (v->caller)
83204+#ifdef CONFIG_GRKERNSEC_HIDESYM
83205+ seq_printf(m, " %pK", v->caller);
83206+#else
83207 seq_printf(m, " %pS", v->caller);
83208+#endif
83209
83210 if (v->nr_pages)
83211 seq_printf(m, " pages=%d", v->nr_pages);
83212diff --git a/mm/vmstat.c b/mm/vmstat.c
83213index 9800306..76b4b27 100644
83214--- a/mm/vmstat.c
83215+++ b/mm/vmstat.c
83216@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
83217 *
83218 * vm_stat contains the global counters
83219 */
83220-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
83221+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
83222 EXPORT_SYMBOL(vm_stat);
83223
83224 #ifdef CONFIG_SMP
83225@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
83226 v = p->vm_stat_diff[i];
83227 p->vm_stat_diff[i] = 0;
83228 local_irq_restore(flags);
83229- atomic_long_add(v, &zone->vm_stat[i]);
83230+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
83231 global_diff[i] += v;
83232 #ifdef CONFIG_NUMA
83233 /* 3 seconds idle till flush */
83234@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
83235
83236 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
83237 if (global_diff[i])
83238- atomic_long_add(global_diff[i], &vm_stat[i]);
83239+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
83240 }
83241
83242 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
83243@@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
83244 if (pset->vm_stat_diff[i]) {
83245 int v = pset->vm_stat_diff[i];
83246 pset->vm_stat_diff[i] = 0;
83247- atomic_long_add(v, &zone->vm_stat[i]);
83248- atomic_long_add(v, &vm_stat[i]);
83249+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
83250+ atomic_long_add_unchecked(v, &vm_stat[i]);
83251 }
83252 }
83253 #endif
83254@@ -1223,7 +1223,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
83255 return NOTIFY_OK;
83256 }
83257
83258-static struct notifier_block __cpuinitdata vmstat_notifier =
83259+static struct notifier_block vmstat_notifier =
83260 { &vmstat_cpuup_callback, NULL, 0 };
83261 #endif
83262
83263@@ -1238,10 +1238,20 @@ static int __init setup_vmstat(void)
83264 start_cpu_timer(cpu);
83265 #endif
83266 #ifdef CONFIG_PROC_FS
83267- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
83268- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
83269- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
83270- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
83271+ {
83272+ mode_t gr_mode = S_IRUGO;
83273+#ifdef CONFIG_GRKERNSEC_PROC_ADD
83274+ gr_mode = S_IRUSR;
83275+#endif
83276+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
83277+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
83278+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
83279+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
83280+#else
83281+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
83282+#endif
83283+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
83284+ }
83285 #endif
83286 return 0;
83287 }
83288diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
83289index a292e80..785ee68 100644
83290--- a/net/8021q/vlan.c
83291+++ b/net/8021q/vlan.c
83292@@ -485,7 +485,7 @@ out:
83293 return NOTIFY_DONE;
83294 }
83295
83296-static struct notifier_block vlan_notifier_block __read_mostly = {
83297+static struct notifier_block vlan_notifier_block = {
83298 .notifier_call = vlan_device_event,
83299 };
83300
83301@@ -560,8 +560,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
83302 err = -EPERM;
83303 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
83304 break;
83305- if ((args.u.name_type >= 0) &&
83306- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
83307+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
83308 struct vlan_net *vn;
83309
83310 vn = net_generic(net, vlan_net_id);
83311diff --git a/net/9p/mod.c b/net/9p/mod.c
83312index 6ab36ae..6f1841b 100644
83313--- a/net/9p/mod.c
83314+++ b/net/9p/mod.c
83315@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
83316 void v9fs_register_trans(struct p9_trans_module *m)
83317 {
83318 spin_lock(&v9fs_trans_lock);
83319- list_add_tail(&m->list, &v9fs_trans_list);
83320+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
83321 spin_unlock(&v9fs_trans_lock);
83322 }
83323 EXPORT_SYMBOL(v9fs_register_trans);
83324@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
83325 void v9fs_unregister_trans(struct p9_trans_module *m)
83326 {
83327 spin_lock(&v9fs_trans_lock);
83328- list_del_init(&m->list);
83329+ pax_list_del_init((struct list_head *)&m->list);
83330 spin_unlock(&v9fs_trans_lock);
83331 }
83332 EXPORT_SYMBOL(v9fs_unregister_trans);
83333diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
83334index 02efb25..41541a9 100644
83335--- a/net/9p/trans_fd.c
83336+++ b/net/9p/trans_fd.c
83337@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
83338 oldfs = get_fs();
83339 set_fs(get_ds());
83340 /* The cast to a user pointer is valid due to the set_fs() */
83341- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
83342+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
83343 set_fs(oldfs);
83344
83345 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
83346diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
83347index 876fbe8..8bbea9f 100644
83348--- a/net/atm/atm_misc.c
83349+++ b/net/atm/atm_misc.c
83350@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
83351 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
83352 return 1;
83353 atm_return(vcc, truesize);
83354- atomic_inc(&vcc->stats->rx_drop);
83355+ atomic_inc_unchecked(&vcc->stats->rx_drop);
83356 return 0;
83357 }
83358 EXPORT_SYMBOL(atm_charge);
83359@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
83360 }
83361 }
83362 atm_return(vcc, guess);
83363- atomic_inc(&vcc->stats->rx_drop);
83364+ atomic_inc_unchecked(&vcc->stats->rx_drop);
83365 return NULL;
83366 }
83367 EXPORT_SYMBOL(atm_alloc_charge);
83368@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
83369
83370 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
83371 {
83372-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
83373+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
83374 __SONET_ITEMS
83375 #undef __HANDLE_ITEM
83376 }
83377@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
83378
83379 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
83380 {
83381-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
83382+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
83383 __SONET_ITEMS
83384 #undef __HANDLE_ITEM
83385 }
83386diff --git a/net/atm/lec.h b/net/atm/lec.h
83387index a86aff9..3a0d6f6 100644
83388--- a/net/atm/lec.h
83389+++ b/net/atm/lec.h
83390@@ -48,7 +48,7 @@ struct lane2_ops {
83391 const u8 *tlvs, u32 sizeoftlvs);
83392 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
83393 const u8 *tlvs, u32 sizeoftlvs);
83394-};
83395+} __no_const;
83396
83397 /*
83398 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
83399diff --git a/net/atm/proc.c b/net/atm/proc.c
83400index 0d020de..011c7bb 100644
83401--- a/net/atm/proc.c
83402+++ b/net/atm/proc.c
83403@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
83404 const struct k_atm_aal_stats *stats)
83405 {
83406 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
83407- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
83408- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
83409- atomic_read(&stats->rx_drop));
83410+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
83411+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
83412+ atomic_read_unchecked(&stats->rx_drop));
83413 }
83414
83415 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
83416diff --git a/net/atm/resources.c b/net/atm/resources.c
83417index 0447d5d..3cf4728 100644
83418--- a/net/atm/resources.c
83419+++ b/net/atm/resources.c
83420@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
83421 static void copy_aal_stats(struct k_atm_aal_stats *from,
83422 struct atm_aal_stats *to)
83423 {
83424-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
83425+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
83426 __AAL_STAT_ITEMS
83427 #undef __HANDLE_ITEM
83428 }
83429@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
83430 static void subtract_aal_stats(struct k_atm_aal_stats *from,
83431 struct atm_aal_stats *to)
83432 {
83433-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
83434+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
83435 __AAL_STAT_ITEMS
83436 #undef __HANDLE_ITEM
83437 }
83438diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
83439index d5744b7..506bae3 100644
83440--- a/net/ax25/sysctl_net_ax25.c
83441+++ b/net/ax25/sysctl_net_ax25.c
83442@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
83443 {
83444 char path[sizeof("net/ax25/") + IFNAMSIZ];
83445 int k;
83446- struct ctl_table *table;
83447+ ctl_table_no_const *table;
83448
83449 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
83450 if (!table)
83451diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
83452index 7d02ebd..4d4cc01 100644
83453--- a/net/batman-adv/bat_iv_ogm.c
83454+++ b/net/batman-adv/bat_iv_ogm.c
83455@@ -63,7 +63,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
83456
83457 /* randomize initial seqno to avoid collision */
83458 get_random_bytes(&random_seqno, sizeof(random_seqno));
83459- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
83460+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
83461
83462 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
83463 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
83464@@ -615,9 +615,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
83465 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
83466
83467 /* change sequence number to network order */
83468- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
83469+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
83470 batadv_ogm_packet->seqno = htonl(seqno);
83471- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
83472+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
83473
83474 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
83475 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
83476@@ -1022,7 +1022,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
83477 return;
83478
83479 /* could be changed by schedule_own_packet() */
83480- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
83481+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
83482
83483 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
83484 has_directlink_flag = 1;
83485diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
83486index f1d37cd..4190879 100644
83487--- a/net/batman-adv/hard-interface.c
83488+++ b/net/batman-adv/hard-interface.c
83489@@ -370,7 +370,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
83490 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
83491 dev_add_pack(&hard_iface->batman_adv_ptype);
83492
83493- atomic_set(&hard_iface->frag_seqno, 1);
83494+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
83495 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
83496 hard_iface->net_dev->name);
83497
83498@@ -493,7 +493,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
83499 /* This can't be called via a bat_priv callback because
83500 * we have no bat_priv yet.
83501 */
83502- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
83503+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
83504 hard_iface->bat_iv.ogm_buff = NULL;
83505
83506 return hard_iface;
83507diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
83508index 6b548fd..fc32c8d 100644
83509--- a/net/batman-adv/soft-interface.c
83510+++ b/net/batman-adv/soft-interface.c
83511@@ -252,7 +252,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
83512 primary_if->net_dev->dev_addr, ETH_ALEN);
83513
83514 /* set broadcast sequence number */
83515- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
83516+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
83517 bcast_packet->seqno = htonl(seqno);
83518
83519 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
83520@@ -497,7 +497,7 @@ struct net_device *batadv_softif_create(const char *name)
83521 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
83522
83523 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
83524- atomic_set(&bat_priv->bcast_seqno, 1);
83525+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
83526 atomic_set(&bat_priv->tt.vn, 0);
83527 atomic_set(&bat_priv->tt.local_changes, 0);
83528 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
83529diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
83530index ae9ac9a..11e0fe7 100644
83531--- a/net/batman-adv/types.h
83532+++ b/net/batman-adv/types.h
83533@@ -48,7 +48,7 @@
83534 struct batadv_hard_iface_bat_iv {
83535 unsigned char *ogm_buff;
83536 int ogm_buff_len;
83537- atomic_t ogm_seqno;
83538+ atomic_unchecked_t ogm_seqno;
83539 };
83540
83541 struct batadv_hard_iface {
83542@@ -56,7 +56,7 @@ struct batadv_hard_iface {
83543 int16_t if_num;
83544 char if_status;
83545 struct net_device *net_dev;
83546- atomic_t frag_seqno;
83547+ atomic_unchecked_t frag_seqno;
83548 struct kobject *hardif_obj;
83549 atomic_t refcount;
83550 struct packet_type batman_adv_ptype;
83551@@ -284,7 +284,7 @@ struct batadv_priv {
83552 atomic_t orig_interval; /* uint */
83553 atomic_t hop_penalty; /* uint */
83554 atomic_t log_level; /* uint */
83555- atomic_t bcast_seqno;
83556+ atomic_unchecked_t bcast_seqno;
83557 atomic_t bcast_queue_left;
83558 atomic_t batman_queue_left;
83559 char num_ifaces;
83560diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
83561index 10aff49..ea8e021 100644
83562--- a/net/batman-adv/unicast.c
83563+++ b/net/batman-adv/unicast.c
83564@@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
83565 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
83566 frag2->flags = large_tail;
83567
83568- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
83569+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
83570 frag1->seqno = htons(seqno - 1);
83571 frag2->seqno = htons(seqno);
83572
83573diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
83574index 07f0739..3c42e34 100644
83575--- a/net/bluetooth/hci_sock.c
83576+++ b/net/bluetooth/hci_sock.c
83577@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
83578 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
83579 }
83580
83581- len = min_t(unsigned int, len, sizeof(uf));
83582+ len = min((size_t)len, sizeof(uf));
83583 if (copy_from_user(&uf, optval, len)) {
83584 err = -EFAULT;
83585 break;
83586diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
83587index 22e6583..426e2f3 100644
83588--- a/net/bluetooth/l2cap_core.c
83589+++ b/net/bluetooth/l2cap_core.c
83590@@ -3400,8 +3400,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
83591 break;
83592
83593 case L2CAP_CONF_RFC:
83594- if (olen == sizeof(rfc))
83595- memcpy(&rfc, (void *)val, olen);
83596+ if (olen != sizeof(rfc))
83597+ break;
83598+
83599+ memcpy(&rfc, (void *)val, olen);
83600
83601 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
83602 rfc.mode != chan->mode)
83603diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
83604index 1bcfb84..dad9f98 100644
83605--- a/net/bluetooth/l2cap_sock.c
83606+++ b/net/bluetooth/l2cap_sock.c
83607@@ -479,7 +479,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
83608 struct sock *sk = sock->sk;
83609 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
83610 struct l2cap_options opts;
83611- int len, err = 0;
83612+ int err = 0;
83613+ size_t len = optlen;
83614 u32 opt;
83615
83616 BT_DBG("sk %p", sk);
83617@@ -501,7 +502,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
83618 opts.max_tx = chan->max_tx;
83619 opts.txwin_size = chan->tx_win;
83620
83621- len = min_t(unsigned int, sizeof(opts), optlen);
83622+ len = min(sizeof(opts), len);
83623 if (copy_from_user((char *) &opts, optval, len)) {
83624 err = -EFAULT;
83625 break;
83626@@ -581,7 +582,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
83627 struct bt_security sec;
83628 struct bt_power pwr;
83629 struct l2cap_conn *conn;
83630- int len, err = 0;
83631+ int err = 0;
83632+ size_t len = optlen;
83633 u32 opt;
83634
83635 BT_DBG("sk %p", sk);
83636@@ -604,7 +606,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
83637
83638 sec.level = BT_SECURITY_LOW;
83639
83640- len = min_t(unsigned int, sizeof(sec), optlen);
83641+ len = min(sizeof(sec), len);
83642 if (copy_from_user((char *) &sec, optval, len)) {
83643 err = -EFAULT;
83644 break;
83645@@ -701,7 +703,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
83646
83647 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
83648
83649- len = min_t(unsigned int, sizeof(pwr), optlen);
83650+ len = min(sizeof(pwr), len);
83651 if (copy_from_user((char *) &pwr, optval, len)) {
83652 err = -EFAULT;
83653 break;
83654diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
83655index ce3f665..2c7d08f 100644
83656--- a/net/bluetooth/rfcomm/sock.c
83657+++ b/net/bluetooth/rfcomm/sock.c
83658@@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
83659 struct sock *sk = sock->sk;
83660 struct bt_security sec;
83661 int err = 0;
83662- size_t len;
83663+ size_t len = optlen;
83664 u32 opt;
83665
83666 BT_DBG("sk %p", sk);
83667@@ -689,7 +689,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
83668
83669 sec.level = BT_SECURITY_LOW;
83670
83671- len = min_t(unsigned int, sizeof(sec), optlen);
83672+ len = min(sizeof(sec), len);
83673 if (copy_from_user((char *) &sec, optval, len)) {
83674 err = -EFAULT;
83675 break;
83676diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
83677index bd6fd0f..6492cba 100644
83678--- a/net/bluetooth/rfcomm/tty.c
83679+++ b/net/bluetooth/rfcomm/tty.c
83680@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
83681 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
83682
83683 spin_lock_irqsave(&dev->port.lock, flags);
83684- if (dev->port.count > 0) {
83685+ if (atomic_read(&dev->port.count) > 0) {
83686 spin_unlock_irqrestore(&dev->port.lock, flags);
83687 return;
83688 }
83689@@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
83690 return -ENODEV;
83691
83692 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
83693- dev->channel, dev->port.count);
83694+ dev->channel, atomic_read(&dev->port.count));
83695
83696 spin_lock_irqsave(&dev->port.lock, flags);
83697- if (++dev->port.count > 1) {
83698+ if (atomic_inc_return(&dev->port.count) > 1) {
83699 spin_unlock_irqrestore(&dev->port.lock, flags);
83700 return 0;
83701 }
83702@@ -732,10 +732,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
83703 return;
83704
83705 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
83706- dev->port.count);
83707+ atomic_read(&dev->port.count));
83708
83709 spin_lock_irqsave(&dev->port.lock, flags);
83710- if (!--dev->port.count) {
83711+ if (!atomic_dec_return(&dev->port.count)) {
83712 spin_unlock_irqrestore(&dev->port.lock, flags);
83713 if (dev->tty_dev->parent)
83714 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
83715diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
83716index 5fe2ff3..121d696 100644
83717--- a/net/bridge/netfilter/ebtables.c
83718+++ b/net/bridge/netfilter/ebtables.c
83719@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
83720 tmp.valid_hooks = t->table->valid_hooks;
83721 }
83722 mutex_unlock(&ebt_mutex);
83723- if (copy_to_user(user, &tmp, *len) != 0){
83724+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
83725 BUGPRINT("c2u Didn't work\n");
83726 ret = -EFAULT;
83727 break;
83728@@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
83729 goto out;
83730 tmp.valid_hooks = t->valid_hooks;
83731
83732- if (copy_to_user(user, &tmp, *len) != 0) {
83733+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
83734 ret = -EFAULT;
83735 break;
83736 }
83737@@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
83738 tmp.entries_size = t->table->entries_size;
83739 tmp.valid_hooks = t->table->valid_hooks;
83740
83741- if (copy_to_user(user, &tmp, *len) != 0) {
83742+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
83743 ret = -EFAULT;
83744 break;
83745 }
83746diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
83747index a376ec1..1fbd6be 100644
83748--- a/net/caif/cfctrl.c
83749+++ b/net/caif/cfctrl.c
83750@@ -10,6 +10,7 @@
83751 #include <linux/spinlock.h>
83752 #include <linux/slab.h>
83753 #include <linux/pkt_sched.h>
83754+#include <linux/sched.h>
83755 #include <net/caif/caif_layer.h>
83756 #include <net/caif/cfpkt.h>
83757 #include <net/caif/cfctrl.h>
83758@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
83759 memset(&dev_info, 0, sizeof(dev_info));
83760 dev_info.id = 0xff;
83761 cfsrvl_init(&this->serv, 0, &dev_info, false);
83762- atomic_set(&this->req_seq_no, 1);
83763- atomic_set(&this->rsp_seq_no, 1);
83764+ atomic_set_unchecked(&this->req_seq_no, 1);
83765+ atomic_set_unchecked(&this->rsp_seq_no, 1);
83766 this->serv.layer.receive = cfctrl_recv;
83767 sprintf(this->serv.layer.name, "ctrl");
83768 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
83769@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
83770 struct cfctrl_request_info *req)
83771 {
83772 spin_lock_bh(&ctrl->info_list_lock);
83773- atomic_inc(&ctrl->req_seq_no);
83774- req->sequence_no = atomic_read(&ctrl->req_seq_no);
83775+ atomic_inc_unchecked(&ctrl->req_seq_no);
83776+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
83777 list_add_tail(&req->list, &ctrl->list);
83778 spin_unlock_bh(&ctrl->info_list_lock);
83779 }
83780@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
83781 if (p != first)
83782 pr_warn("Requests are not received in order\n");
83783
83784- atomic_set(&ctrl->rsp_seq_no,
83785+ atomic_set_unchecked(&ctrl->rsp_seq_no,
83786 p->sequence_no);
83787 list_del(&p->list);
83788 goto out;
83789diff --git a/net/can/af_can.c b/net/can/af_can.c
83790index ddac1ee..3ee0a78 100644
83791--- a/net/can/af_can.c
83792+++ b/net/can/af_can.c
83793@@ -872,7 +872,7 @@ static const struct net_proto_family can_family_ops = {
83794 };
83795
83796 /* notifier block for netdevice event */
83797-static struct notifier_block can_netdev_notifier __read_mostly = {
83798+static struct notifier_block can_netdev_notifier = {
83799 .notifier_call = can_notifier,
83800 };
83801
83802diff --git a/net/can/gw.c b/net/can/gw.c
83803index 574dda78e..3d2b3da 100644
83804--- a/net/can/gw.c
83805+++ b/net/can/gw.c
83806@@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
83807 MODULE_ALIAS("can-gw");
83808
83809 static HLIST_HEAD(cgw_list);
83810-static struct notifier_block notifier;
83811
83812 static struct kmem_cache *cgw_cache __read_mostly;
83813
83814@@ -893,6 +892,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
83815 return err;
83816 }
83817
83818+static struct notifier_block notifier = {
83819+ .notifier_call = cgw_notifier
83820+};
83821+
83822 static __init int cgw_module_init(void)
83823 {
83824 printk(banner);
83825@@ -904,7 +907,6 @@ static __init int cgw_module_init(void)
83826 return -ENOMEM;
83827
83828 /* set notifier */
83829- notifier.notifier_call = cgw_notifier;
83830 register_netdevice_notifier(&notifier);
83831
83832 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
83833diff --git a/net/compat.c b/net/compat.c
83834index 79ae884..17c5c09 100644
83835--- a/net/compat.c
83836+++ b/net/compat.c
83837@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
83838 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
83839 __get_user(kmsg->msg_flags, &umsg->msg_flags))
83840 return -EFAULT;
83841- kmsg->msg_name = compat_ptr(tmp1);
83842- kmsg->msg_iov = compat_ptr(tmp2);
83843- kmsg->msg_control = compat_ptr(tmp3);
83844+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
83845+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
83846+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
83847 return 0;
83848 }
83849
83850@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
83851
83852 if (kern_msg->msg_namelen) {
83853 if (mode == VERIFY_READ) {
83854- int err = move_addr_to_kernel(kern_msg->msg_name,
83855+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
83856 kern_msg->msg_namelen,
83857 kern_address);
83858 if (err < 0)
83859@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
83860 kern_msg->msg_name = NULL;
83861
83862 tot_len = iov_from_user_compat_to_kern(kern_iov,
83863- (struct compat_iovec __user *)kern_msg->msg_iov,
83864+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
83865 kern_msg->msg_iovlen);
83866 if (tot_len >= 0)
83867 kern_msg->msg_iov = kern_iov;
83868@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
83869
83870 #define CMSG_COMPAT_FIRSTHDR(msg) \
83871 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
83872- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
83873+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
83874 (struct compat_cmsghdr __user *)NULL)
83875
83876 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
83877 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
83878 (ucmlen) <= (unsigned long) \
83879 ((mhdr)->msg_controllen - \
83880- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
83881+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
83882
83883 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
83884 struct compat_cmsghdr __user *cmsg, int cmsg_len)
83885 {
83886 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
83887- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
83888+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
83889 msg->msg_controllen)
83890 return NULL;
83891 return (struct compat_cmsghdr __user *)ptr;
83892@@ -219,7 +219,7 @@ Efault:
83893
83894 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
83895 {
83896- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
83897+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
83898 struct compat_cmsghdr cmhdr;
83899 struct compat_timeval ctv;
83900 struct compat_timespec cts[3];
83901@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
83902
83903 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
83904 {
83905- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
83906+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
83907 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
83908 int fdnum = scm->fp->count;
83909 struct file **fp = scm->fp->fp;
83910@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
83911 return -EFAULT;
83912 old_fs = get_fs();
83913 set_fs(KERNEL_DS);
83914- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
83915+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
83916 set_fs(old_fs);
83917
83918 return err;
83919@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
83920 len = sizeof(ktime);
83921 old_fs = get_fs();
83922 set_fs(KERNEL_DS);
83923- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
83924+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
83925 set_fs(old_fs);
83926
83927 if (!err) {
83928@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
83929 case MCAST_JOIN_GROUP:
83930 case MCAST_LEAVE_GROUP:
83931 {
83932- struct compat_group_req __user *gr32 = (void *)optval;
83933+ struct compat_group_req __user *gr32 = (void __user *)optval;
83934 struct group_req __user *kgr =
83935 compat_alloc_user_space(sizeof(struct group_req));
83936 u32 interface;
83937@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
83938 case MCAST_BLOCK_SOURCE:
83939 case MCAST_UNBLOCK_SOURCE:
83940 {
83941- struct compat_group_source_req __user *gsr32 = (void *)optval;
83942+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
83943 struct group_source_req __user *kgsr = compat_alloc_user_space(
83944 sizeof(struct group_source_req));
83945 u32 interface;
83946@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
83947 }
83948 case MCAST_MSFILTER:
83949 {
83950- struct compat_group_filter __user *gf32 = (void *)optval;
83951+ struct compat_group_filter __user *gf32 = (void __user *)optval;
83952 struct group_filter __user *kgf;
83953 u32 interface, fmode, numsrc;
83954
83955@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
83956 char __user *optval, int __user *optlen,
83957 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
83958 {
83959- struct compat_group_filter __user *gf32 = (void *)optval;
83960+ struct compat_group_filter __user *gf32 = (void __user *)optval;
83961 struct group_filter __user *kgf;
83962 int __user *koptlen;
83963 u32 interface, fmode, numsrc;
83964@@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
83965
83966 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
83967 return -EINVAL;
83968- if (copy_from_user(a, args, nas[call]))
83969+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
83970 return -EFAULT;
83971 a0 = a[0];
83972 a1 = a[1];
83973diff --git a/net/core/datagram.c b/net/core/datagram.c
83974index 368f9c3..f82d4a3 100644
83975--- a/net/core/datagram.c
83976+++ b/net/core/datagram.c
83977@@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
83978 }
83979
83980 kfree_skb(skb);
83981- atomic_inc(&sk->sk_drops);
83982+ atomic_inc_unchecked(&sk->sk_drops);
83983 sk_mem_reclaim_partial(sk);
83984
83985 return err;
83986diff --git a/net/core/dev.c b/net/core/dev.c
83987index f64e439..8f959e6 100644
83988--- a/net/core/dev.c
83989+++ b/net/core/dev.c
83990@@ -1250,9 +1250,13 @@ void dev_load(struct net *net, const char *name)
83991 if (no_module && capable(CAP_NET_ADMIN))
83992 no_module = request_module("netdev-%s", name);
83993 if (no_module && capable(CAP_SYS_MODULE)) {
83994+#ifdef CONFIG_GRKERNSEC_MODHARDEN
83995+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
83996+#else
83997 if (!request_module("%s", name))
83998 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
83999 name);
84000+#endif
84001 }
84002 }
84003 EXPORT_SYMBOL(dev_load);
84004@@ -1715,7 +1719,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
84005 {
84006 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
84007 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
84008- atomic_long_inc(&dev->rx_dropped);
84009+ atomic_long_inc_unchecked(&dev->rx_dropped);
84010 kfree_skb(skb);
84011 return NET_RX_DROP;
84012 }
84013@@ -1725,7 +1729,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
84014 nf_reset(skb);
84015
84016 if (unlikely(!is_skb_forwardable(dev, skb))) {
84017- atomic_long_inc(&dev->rx_dropped);
84018+ atomic_long_inc_unchecked(&dev->rx_dropped);
84019 kfree_skb(skb);
84020 return NET_RX_DROP;
84021 }
84022@@ -2180,7 +2184,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
84023
84024 struct dev_gso_cb {
84025 void (*destructor)(struct sk_buff *skb);
84026-};
84027+} __no_const;
84028
84029 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
84030
84031@@ -3053,7 +3057,7 @@ enqueue:
84032
84033 local_irq_restore(flags);
84034
84035- atomic_long_inc(&skb->dev->rx_dropped);
84036+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
84037 kfree_skb(skb);
84038 return NET_RX_DROP;
84039 }
84040@@ -3125,7 +3129,7 @@ int netif_rx_ni(struct sk_buff *skb)
84041 }
84042 EXPORT_SYMBOL(netif_rx_ni);
84043
84044-static void net_tx_action(struct softirq_action *h)
84045+static void net_tx_action(void)
84046 {
84047 struct softnet_data *sd = &__get_cpu_var(softnet_data);
84048
84049@@ -3456,7 +3460,7 @@ ncls:
84050 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
84051 } else {
84052 drop:
84053- atomic_long_inc(&skb->dev->rx_dropped);
84054+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
84055 kfree_skb(skb);
84056 /* Jamal, now you will not able to escape explaining
84057 * me how you were going to use this. :-)
84058@@ -4039,7 +4043,7 @@ void netif_napi_del(struct napi_struct *napi)
84059 }
84060 EXPORT_SYMBOL(netif_napi_del);
84061
84062-static void net_rx_action(struct softirq_action *h)
84063+static void net_rx_action(void)
84064 {
84065 struct softnet_data *sd = &__get_cpu_var(softnet_data);
84066 unsigned long time_limit = jiffies + 2;
84067@@ -4523,8 +4527,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
84068 else
84069 seq_printf(seq, "%04x", ntohs(pt->type));
84070
84071+#ifdef CONFIG_GRKERNSEC_HIDESYM
84072+ seq_printf(seq, " %-8s %p\n",
84073+ pt->dev ? pt->dev->name : "", NULL);
84074+#else
84075 seq_printf(seq, " %-8s %pF\n",
84076 pt->dev ? pt->dev->name : "", pt->func);
84077+#endif
84078 }
84079
84080 return 0;
84081@@ -6096,7 +6105,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
84082 } else {
84083 netdev_stats_to_stats64(storage, &dev->stats);
84084 }
84085- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
84086+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
84087 return storage;
84088 }
84089 EXPORT_SYMBOL(dev_get_stats);
84090diff --git a/net/core/flow.c b/net/core/flow.c
84091index b0901ee..7d3c2ca 100644
84092--- a/net/core/flow.c
84093+++ b/net/core/flow.c
84094@@ -61,7 +61,7 @@ struct flow_cache {
84095 struct timer_list rnd_timer;
84096 };
84097
84098-atomic_t flow_cache_genid = ATOMIC_INIT(0);
84099+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
84100 EXPORT_SYMBOL(flow_cache_genid);
84101 static struct flow_cache flow_cache_global;
84102 static struct kmem_cache *flow_cachep __read_mostly;
84103@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
84104
84105 static int flow_entry_valid(struct flow_cache_entry *fle)
84106 {
84107- if (atomic_read(&flow_cache_genid) != fle->genid)
84108+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
84109 return 0;
84110 if (fle->object && !fle->object->ops->check(fle->object))
84111 return 0;
84112@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
84113 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
84114 fcp->hash_count++;
84115 }
84116- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
84117+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
84118 flo = fle->object;
84119 if (!flo)
84120 goto ret_object;
84121@@ -280,7 +280,7 @@ nocache:
84122 }
84123 flo = resolver(net, key, family, dir, flo, ctx);
84124 if (fle) {
84125- fle->genid = atomic_read(&flow_cache_genid);
84126+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
84127 if (!IS_ERR(flo))
84128 fle->object = flo;
84129 else
84130diff --git a/net/core/iovec.c b/net/core/iovec.c
84131index 7e7aeb0..2a998cb 100644
84132--- a/net/core/iovec.c
84133+++ b/net/core/iovec.c
84134@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
84135 if (m->msg_namelen) {
84136 if (mode == VERIFY_READ) {
84137 void __user *namep;
84138- namep = (void __user __force *) m->msg_name;
84139+ namep = (void __force_user *) m->msg_name;
84140 err = move_addr_to_kernel(namep, m->msg_namelen,
84141 address);
84142 if (err < 0)
84143@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
84144 }
84145
84146 size = m->msg_iovlen * sizeof(struct iovec);
84147- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
84148+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
84149 return -EFAULT;
84150
84151 m->msg_iov = iov;
84152diff --git a/net/core/neighbour.c b/net/core/neighbour.c
84153index c815f28..e6403f2 100644
84154--- a/net/core/neighbour.c
84155+++ b/net/core/neighbour.c
84156@@ -2776,7 +2776,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
84157 size_t *lenp, loff_t *ppos)
84158 {
84159 int size, ret;
84160- ctl_table tmp = *ctl;
84161+ ctl_table_no_const tmp = *ctl;
84162
84163 tmp.extra1 = &zero;
84164 tmp.extra2 = &unres_qlen_max;
84165diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
84166index 28c5f5a..7edf2e2 100644
84167--- a/net/core/net-sysfs.c
84168+++ b/net/core/net-sysfs.c
84169@@ -1455,7 +1455,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
84170 }
84171 EXPORT_SYMBOL(netdev_class_remove_file);
84172
84173-int netdev_kobject_init(void)
84174+int __init netdev_kobject_init(void)
84175 {
84176 kobj_ns_type_register(&net_ns_type_operations);
84177 return class_register(&net_class);
84178diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
84179index 8acce01..2e306bb 100644
84180--- a/net/core/net_namespace.c
84181+++ b/net/core/net_namespace.c
84182@@ -442,7 +442,7 @@ static int __register_pernet_operations(struct list_head *list,
84183 int error;
84184 LIST_HEAD(net_exit_list);
84185
84186- list_add_tail(&ops->list, list);
84187+ pax_list_add_tail((struct list_head *)&ops->list, list);
84188 if (ops->init || (ops->id && ops->size)) {
84189 for_each_net(net) {
84190 error = ops_init(ops, net);
84191@@ -455,7 +455,7 @@ static int __register_pernet_operations(struct list_head *list,
84192
84193 out_undo:
84194 /* If I have an error cleanup all namespaces I initialized */
84195- list_del(&ops->list);
84196+ pax_list_del((struct list_head *)&ops->list);
84197 ops_exit_list(ops, &net_exit_list);
84198 ops_free_list(ops, &net_exit_list);
84199 return error;
84200@@ -466,7 +466,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
84201 struct net *net;
84202 LIST_HEAD(net_exit_list);
84203
84204- list_del(&ops->list);
84205+ pax_list_del((struct list_head *)&ops->list);
84206 for_each_net(net)
84207 list_add_tail(&net->exit_list, &net_exit_list);
84208 ops_exit_list(ops, &net_exit_list);
84209@@ -600,7 +600,7 @@ int register_pernet_device(struct pernet_operations *ops)
84210 mutex_lock(&net_mutex);
84211 error = register_pernet_operations(&pernet_list, ops);
84212 if (!error && (first_device == &pernet_list))
84213- first_device = &ops->list;
84214+ first_device = (struct list_head *)&ops->list;
84215 mutex_unlock(&net_mutex);
84216 return error;
84217 }
84218diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
84219index 1868625..b1b1284 100644
84220--- a/net/core/rtnetlink.c
84221+++ b/net/core/rtnetlink.c
84222@@ -58,7 +58,7 @@ struct rtnl_link {
84223 rtnl_doit_func doit;
84224 rtnl_dumpit_func dumpit;
84225 rtnl_calcit_func calcit;
84226-};
84227+} __no_const;
84228
84229 static DEFINE_MUTEX(rtnl_mutex);
84230
84231@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
84232 if (rtnl_link_ops_get(ops->kind))
84233 return -EEXIST;
84234
84235- if (!ops->dellink)
84236- ops->dellink = unregister_netdevice_queue;
84237+ if (!ops->dellink) {
84238+ pax_open_kernel();
84239+ *(void **)&ops->dellink = unregister_netdevice_queue;
84240+ pax_close_kernel();
84241+ }
84242
84243- list_add_tail(&ops->list, &link_ops);
84244+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
84245 return 0;
84246 }
84247 EXPORT_SYMBOL_GPL(__rtnl_link_register);
84248@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
84249 for_each_net(net) {
84250 __rtnl_kill_links(net, ops);
84251 }
84252- list_del(&ops->list);
84253+ pax_list_del((struct list_head *)&ops->list);
84254 }
84255 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
84256
84257diff --git a/net/core/scm.c b/net/core/scm.c
84258index 905dcc6..14ee2d6 100644
84259--- a/net/core/scm.c
84260+++ b/net/core/scm.c
84261@@ -224,7 +224,7 @@ EXPORT_SYMBOL(__scm_send);
84262 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
84263 {
84264 struct cmsghdr __user *cm
84265- = (__force struct cmsghdr __user *)msg->msg_control;
84266+ = (struct cmsghdr __force_user *)msg->msg_control;
84267 struct cmsghdr cmhdr;
84268 int cmlen = CMSG_LEN(len);
84269 int err;
84270@@ -247,7 +247,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
84271 err = -EFAULT;
84272 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
84273 goto out;
84274- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
84275+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
84276 goto out;
84277 cmlen = CMSG_SPACE(len);
84278 if (msg->msg_controllen < cmlen)
84279@@ -263,7 +263,7 @@ EXPORT_SYMBOL(put_cmsg);
84280 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
84281 {
84282 struct cmsghdr __user *cm
84283- = (__force struct cmsghdr __user*)msg->msg_control;
84284+ = (struct cmsghdr __force_user *)msg->msg_control;
84285
84286 int fdmax = 0;
84287 int fdnum = scm->fp->count;
84288@@ -283,7 +283,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
84289 if (fdnum < fdmax)
84290 fdmax = fdnum;
84291
84292- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
84293+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
84294 i++, cmfptr++)
84295 {
84296 struct socket *sock;
84297diff --git a/net/core/sock.c b/net/core/sock.c
84298index bc131d4..029e378 100644
84299--- a/net/core/sock.c
84300+++ b/net/core/sock.c
84301@@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
84302 struct sk_buff_head *list = &sk->sk_receive_queue;
84303
84304 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
84305- atomic_inc(&sk->sk_drops);
84306+ atomic_inc_unchecked(&sk->sk_drops);
84307 trace_sock_rcvqueue_full(sk, skb);
84308 return -ENOMEM;
84309 }
84310@@ -398,7 +398,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
84311 return err;
84312
84313 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
84314- atomic_inc(&sk->sk_drops);
84315+ atomic_inc_unchecked(&sk->sk_drops);
84316 return -ENOBUFS;
84317 }
84318
84319@@ -418,7 +418,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
84320 skb_dst_force(skb);
84321
84322 spin_lock_irqsave(&list->lock, flags);
84323- skb->dropcount = atomic_read(&sk->sk_drops);
84324+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
84325 __skb_queue_tail(list, skb);
84326 spin_unlock_irqrestore(&list->lock, flags);
84327
84328@@ -438,7 +438,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
84329 skb->dev = NULL;
84330
84331 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
84332- atomic_inc(&sk->sk_drops);
84333+ atomic_inc_unchecked(&sk->sk_drops);
84334 goto discard_and_relse;
84335 }
84336 if (nested)
84337@@ -456,7 +456,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
84338 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
84339 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
84340 bh_unlock_sock(sk);
84341- atomic_inc(&sk->sk_drops);
84342+ atomic_inc_unchecked(&sk->sk_drops);
84343 goto discard_and_relse;
84344 }
84345
84346@@ -930,12 +930,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
84347 struct timeval tm;
84348 } v;
84349
84350- int lv = sizeof(int);
84351- int len;
84352+ unsigned int lv = sizeof(int);
84353+ unsigned int len;
84354
84355 if (get_user(len, optlen))
84356 return -EFAULT;
84357- if (len < 0)
84358+ if (len > INT_MAX)
84359 return -EINVAL;
84360
84361 memset(&v, 0, sizeof(v));
84362@@ -1083,11 +1083,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
84363
84364 case SO_PEERNAME:
84365 {
84366- char address[128];
84367+ char address[_K_SS_MAXSIZE];
84368
84369 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
84370 return -ENOTCONN;
84371- if (lv < len)
84372+ if (lv < len || sizeof address < len)
84373 return -EINVAL;
84374 if (copy_to_user(optval, address, len))
84375 return -EFAULT;
84376@@ -1146,7 +1146,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
84377
84378 if (len > lv)
84379 len = lv;
84380- if (copy_to_user(optval, &v, len))
84381+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
84382 return -EFAULT;
84383 lenout:
84384 if (put_user(len, optlen))
84385@@ -2276,7 +2276,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
84386 */
84387 smp_wmb();
84388 atomic_set(&sk->sk_refcnt, 1);
84389- atomic_set(&sk->sk_drops, 0);
84390+ atomic_set_unchecked(&sk->sk_drops, 0);
84391 }
84392 EXPORT_SYMBOL(sock_init_data);
84393
84394diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
84395index 750f44f..922399c 100644
84396--- a/net/core/sock_diag.c
84397+++ b/net/core/sock_diag.c
84398@@ -9,26 +9,33 @@
84399 #include <linux/inet_diag.h>
84400 #include <linux/sock_diag.h>
84401
84402-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
84403+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
84404 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
84405 static DEFINE_MUTEX(sock_diag_table_mutex);
84406
84407 int sock_diag_check_cookie(void *sk, __u32 *cookie)
84408 {
84409+#ifndef CONFIG_GRKERNSEC_HIDESYM
84410 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
84411 cookie[1] != INET_DIAG_NOCOOKIE) &&
84412 ((u32)(unsigned long)sk != cookie[0] ||
84413 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
84414 return -ESTALE;
84415 else
84416+#endif
84417 return 0;
84418 }
84419 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
84420
84421 void sock_diag_save_cookie(void *sk, __u32 *cookie)
84422 {
84423+#ifdef CONFIG_GRKERNSEC_HIDESYM
84424+ cookie[0] = 0;
84425+ cookie[1] = 0;
84426+#else
84427 cookie[0] = (u32)(unsigned long)sk;
84428 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
84429+#endif
84430 }
84431 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
84432
84433@@ -75,8 +82,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
84434 mutex_lock(&sock_diag_table_mutex);
84435 if (sock_diag_handlers[hndl->family])
84436 err = -EBUSY;
84437- else
84438+ else {
84439+ pax_open_kernel();
84440 sock_diag_handlers[hndl->family] = hndl;
84441+ pax_close_kernel();
84442+ }
84443 mutex_unlock(&sock_diag_table_mutex);
84444
84445 return err;
84446@@ -92,26 +102,13 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
84447
84448 mutex_lock(&sock_diag_table_mutex);
84449 BUG_ON(sock_diag_handlers[family] != hnld);
84450+ pax_open_kernel();
84451 sock_diag_handlers[family] = NULL;
84452+ pax_close_kernel();
84453 mutex_unlock(&sock_diag_table_mutex);
84454 }
84455 EXPORT_SYMBOL_GPL(sock_diag_unregister);
84456
84457-static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
84458-{
84459- if (sock_diag_handlers[family] == NULL)
84460- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
84461- NETLINK_SOCK_DIAG, family);
84462-
84463- mutex_lock(&sock_diag_table_mutex);
84464- return sock_diag_handlers[family];
84465-}
84466-
84467-static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
84468-{
84469- mutex_unlock(&sock_diag_table_mutex);
84470-}
84471-
84472 static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
84473 {
84474 int err;
84475@@ -124,12 +121,17 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
84476 if (req->sdiag_family >= AF_MAX)
84477 return -EINVAL;
84478
84479- hndl = sock_diag_lock_handler(req->sdiag_family);
84480+ if (sock_diag_handlers[req->sdiag_family] == NULL)
84481+ request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
84482+ NETLINK_SOCK_DIAG, req->sdiag_family);
84483+
84484+ mutex_lock(&sock_diag_table_mutex);
84485+ hndl = sock_diag_handlers[req->sdiag_family];
84486 if (hndl == NULL)
84487 err = -ENOENT;
84488 else
84489 err = hndl->dump(skb, nlh);
84490- sock_diag_unlock_handler(hndl);
84491+ mutex_unlock(&sock_diag_table_mutex);
84492
84493 return err;
84494 }
84495diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
84496index d1b0804..4aed0a5 100644
84497--- a/net/core/sysctl_net_core.c
84498+++ b/net/core/sysctl_net_core.c
84499@@ -26,7 +26,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
84500 {
84501 unsigned int orig_size, size;
84502 int ret, i;
84503- ctl_table tmp = {
84504+ ctl_table_no_const tmp = {
84505 .data = &size,
84506 .maxlen = sizeof(size),
84507 .mode = table->mode
84508@@ -205,13 +205,12 @@ static struct ctl_table netns_core_table[] = {
84509
84510 static __net_init int sysctl_core_net_init(struct net *net)
84511 {
84512- struct ctl_table *tbl;
84513+ ctl_table_no_const *tbl = NULL;
84514
84515 net->core.sysctl_somaxconn = SOMAXCONN;
84516
84517- tbl = netns_core_table;
84518 if (!net_eq(net, &init_net)) {
84519- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
84520+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
84521 if (tbl == NULL)
84522 goto err_dup;
84523
84524@@ -221,16 +220,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
84525 if (net->user_ns != &init_user_ns) {
84526 tbl[0].procname = NULL;
84527 }
84528- }
84529-
84530- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
84531+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
84532+ } else
84533+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
84534 if (net->core.sysctl_hdr == NULL)
84535 goto err_reg;
84536
84537 return 0;
84538
84539 err_reg:
84540- if (tbl != netns_core_table)
84541+ if (tbl)
84542 kfree(tbl);
84543 err_dup:
84544 return -ENOMEM;
84545@@ -246,7 +245,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
84546 kfree(tbl);
84547 }
84548
84549-static __net_initdata struct pernet_operations sysctl_core_ops = {
84550+static __net_initconst struct pernet_operations sysctl_core_ops = {
84551 .init = sysctl_core_net_init,
84552 .exit = sysctl_core_net_exit,
84553 };
84554diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
84555index 307c322..78a4c6f 100644
84556--- a/net/decnet/af_decnet.c
84557+++ b/net/decnet/af_decnet.c
84558@@ -468,6 +468,7 @@ static struct proto dn_proto = {
84559 .sysctl_rmem = sysctl_decnet_rmem,
84560 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
84561 .obj_size = sizeof(struct dn_sock),
84562+ .slab_flags = SLAB_USERCOPY,
84563 };
84564
84565 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
84566diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
84567index a55eecc..dd8428c 100644
84568--- a/net/decnet/sysctl_net_decnet.c
84569+++ b/net/decnet/sysctl_net_decnet.c
84570@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
84571
84572 if (len > *lenp) len = *lenp;
84573
84574- if (copy_to_user(buffer, addr, len))
84575+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
84576 return -EFAULT;
84577
84578 *lenp = len;
84579@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
84580
84581 if (len > *lenp) len = *lenp;
84582
84583- if (copy_to_user(buffer, devname, len))
84584+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
84585 return -EFAULT;
84586
84587 *lenp = len;
84588diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
84589index fcf104e..95552d4 100644
84590--- a/net/ipv4/af_inet.c
84591+++ b/net/ipv4/af_inet.c
84592@@ -1717,13 +1717,9 @@ static int __init inet_init(void)
84593
84594 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
84595
84596- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
84597- if (!sysctl_local_reserved_ports)
84598- goto out;
84599-
84600 rc = proto_register(&tcp_prot, 1);
84601 if (rc)
84602- goto out_free_reserved_ports;
84603+ goto out;
84604
84605 rc = proto_register(&udp_prot, 1);
84606 if (rc)
84607@@ -1832,8 +1828,6 @@ out_unregister_udp_proto:
84608 proto_unregister(&udp_prot);
84609 out_unregister_tcp_proto:
84610 proto_unregister(&tcp_prot);
84611-out_free_reserved_ports:
84612- kfree(sysctl_local_reserved_ports);
84613 goto out;
84614 }
84615
84616diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
84617index a69b4e4..dbccba5 100644
84618--- a/net/ipv4/ah4.c
84619+++ b/net/ipv4/ah4.c
84620@@ -421,7 +421,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
84621 return;
84622
84623 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
84624- atomic_inc(&flow_cache_genid);
84625+ atomic_inc_unchecked(&flow_cache_genid);
84626 rt_genid_bump(net);
84627
84628 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
84629diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
84630index a8e4f26..25e5f40 100644
84631--- a/net/ipv4/devinet.c
84632+++ b/net/ipv4/devinet.c
84633@@ -1763,7 +1763,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
84634 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
84635 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
84636
84637-static struct devinet_sysctl_table {
84638+static const struct devinet_sysctl_table {
84639 struct ctl_table_header *sysctl_header;
84640 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
84641 } devinet_sysctl = {
84642@@ -1881,7 +1881,7 @@ static __net_init int devinet_init_net(struct net *net)
84643 int err;
84644 struct ipv4_devconf *all, *dflt;
84645 #ifdef CONFIG_SYSCTL
84646- struct ctl_table *tbl = ctl_forward_entry;
84647+ ctl_table_no_const *tbl = NULL;
84648 struct ctl_table_header *forw_hdr;
84649 #endif
84650
84651@@ -1899,7 +1899,7 @@ static __net_init int devinet_init_net(struct net *net)
84652 goto err_alloc_dflt;
84653
84654 #ifdef CONFIG_SYSCTL
84655- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
84656+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
84657 if (tbl == NULL)
84658 goto err_alloc_ctl;
84659
84660@@ -1919,7 +1919,10 @@ static __net_init int devinet_init_net(struct net *net)
84661 goto err_reg_dflt;
84662
84663 err = -ENOMEM;
84664- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
84665+ if (!net_eq(net, &init_net))
84666+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
84667+ else
84668+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
84669 if (forw_hdr == NULL)
84670 goto err_reg_ctl;
84671 net->ipv4.forw_hdr = forw_hdr;
84672@@ -1935,8 +1938,7 @@ err_reg_ctl:
84673 err_reg_dflt:
84674 __devinet_sysctl_unregister(all);
84675 err_reg_all:
84676- if (tbl != ctl_forward_entry)
84677- kfree(tbl);
84678+ kfree(tbl);
84679 err_alloc_ctl:
84680 #endif
84681 if (dflt != &ipv4_devconf_dflt)
84682diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
84683index 3b4f0cd..8cb864c 100644
84684--- a/net/ipv4/esp4.c
84685+++ b/net/ipv4/esp4.c
84686@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
84687 return;
84688
84689 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
84690- atomic_inc(&flow_cache_genid);
84691+ atomic_inc_unchecked(&flow_cache_genid);
84692 rt_genid_bump(net);
84693
84694 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
84695diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
84696index 5cd75e2..f57ef39 100644
84697--- a/net/ipv4/fib_frontend.c
84698+++ b/net/ipv4/fib_frontend.c
84699@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
84700 #ifdef CONFIG_IP_ROUTE_MULTIPATH
84701 fib_sync_up(dev);
84702 #endif
84703- atomic_inc(&net->ipv4.dev_addr_genid);
84704+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
84705 rt_cache_flush(dev_net(dev));
84706 break;
84707 case NETDEV_DOWN:
84708 fib_del_ifaddr(ifa, NULL);
84709- atomic_inc(&net->ipv4.dev_addr_genid);
84710+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
84711 if (ifa->ifa_dev->ifa_list == NULL) {
84712 /* Last address was deleted from this interface.
84713 * Disable IP.
84714@@ -1061,7 +1061,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
84715 #ifdef CONFIG_IP_ROUTE_MULTIPATH
84716 fib_sync_up(dev);
84717 #endif
84718- atomic_inc(&net->ipv4.dev_addr_genid);
84719+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
84720 rt_cache_flush(net);
84721 break;
84722 case NETDEV_DOWN:
84723diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
84724index 4797a80..2bd54e9 100644
84725--- a/net/ipv4/fib_semantics.c
84726+++ b/net/ipv4/fib_semantics.c
84727@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
84728 nh->nh_saddr = inet_select_addr(nh->nh_dev,
84729 nh->nh_gw,
84730 nh->nh_parent->fib_scope);
84731- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
84732+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
84733
84734 return nh->nh_saddr;
84735 }
84736diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
84737index d0670f0..744ac80 100644
84738--- a/net/ipv4/inet_connection_sock.c
84739+++ b/net/ipv4/inet_connection_sock.c
84740@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
84741 .range = { 32768, 61000 },
84742 };
84743
84744-unsigned long *sysctl_local_reserved_ports;
84745+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
84746 EXPORT_SYMBOL(sysctl_local_reserved_ports);
84747
84748 void inet_get_local_port_range(int *low, int *high)
84749diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
84750index fa3ae81..0dbe6b8 100644
84751--- a/net/ipv4/inet_hashtables.c
84752+++ b/net/ipv4/inet_hashtables.c
84753@@ -18,12 +18,15 @@
84754 #include <linux/sched.h>
84755 #include <linux/slab.h>
84756 #include <linux/wait.h>
84757+#include <linux/security.h>
84758
84759 #include <net/inet_connection_sock.h>
84760 #include <net/inet_hashtables.h>
84761 #include <net/secure_seq.h>
84762 #include <net/ip.h>
84763
84764+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
84765+
84766 /*
84767 * Allocate and initialize a new local port bind bucket.
84768 * The bindhash mutex for snum's hash chain must be held here.
84769@@ -540,6 +543,8 @@ ok:
84770 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
84771 spin_unlock(&head->lock);
84772
84773+ gr_update_task_in_ip_table(current, inet_sk(sk));
84774+
84775 if (tw) {
84776 inet_twsk_deschedule(tw, death_row);
84777 while (twrefcnt) {
84778diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
84779index 000e3d2..5472da3 100644
84780--- a/net/ipv4/inetpeer.c
84781+++ b/net/ipv4/inetpeer.c
84782@@ -503,8 +503,8 @@ relookup:
84783 if (p) {
84784 p->daddr = *daddr;
84785 atomic_set(&p->refcnt, 1);
84786- atomic_set(&p->rid, 0);
84787- atomic_set(&p->ip_id_count,
84788+ atomic_set_unchecked(&p->rid, 0);
84789+ atomic_set_unchecked(&p->ip_id_count,
84790 (daddr->family == AF_INET) ?
84791 secure_ip_id(daddr->addr.a4) :
84792 secure_ipv6_id(daddr->addr.a6));
84793diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
84794index eb9d63a..5bae5f4 100644
84795--- a/net/ipv4/ip_fragment.c
84796+++ b/net/ipv4/ip_fragment.c
84797@@ -322,7 +322,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
84798 return 0;
84799
84800 start = qp->rid;
84801- end = atomic_inc_return(&peer->rid);
84802+ end = atomic_inc_return_unchecked(&peer->rid);
84803 qp->rid = end;
84804
84805 rc = qp->q.fragments && (end - start) > max;
84806@@ -789,12 +789,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
84807
84808 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
84809 {
84810- struct ctl_table *table;
84811+ ctl_table_no_const *table = NULL;
84812 struct ctl_table_header *hdr;
84813
84814- table = ip4_frags_ns_ctl_table;
84815 if (!net_eq(net, &init_net)) {
84816- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
84817+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
84818 if (table == NULL)
84819 goto err_alloc;
84820
84821@@ -805,9 +804,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
84822 /* Don't export sysctls to unprivileged users */
84823 if (net->user_ns != &init_user_ns)
84824 table[0].procname = NULL;
84825+ hdr = register_net_sysctl(net, "net/ipv4", table);
84826 }
84827+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
84828
84829- hdr = register_net_sysctl(net, "net/ipv4", table);
84830 if (hdr == NULL)
84831 goto err_reg;
84832
84833@@ -815,8 +815,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
84834 return 0;
84835
84836 err_reg:
84837- if (!net_eq(net, &init_net))
84838- kfree(table);
84839+ kfree(table);
84840 err_alloc:
84841 return -ENOMEM;
84842 }
84843diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
84844index e81b1ca..6f3b5b9 100644
84845--- a/net/ipv4/ip_gre.c
84846+++ b/net/ipv4/ip_gre.c
84847@@ -124,7 +124,7 @@ static bool log_ecn_error = true;
84848 module_param(log_ecn_error, bool, 0644);
84849 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
84850
84851-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
84852+static struct rtnl_link_ops ipgre_link_ops;
84853 static int ipgre_tunnel_init(struct net_device *dev);
84854 static void ipgre_tunnel_setup(struct net_device *dev);
84855 static int ipgre_tunnel_bind_dev(struct net_device *dev);
84856@@ -1756,7 +1756,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
84857 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
84858 };
84859
84860-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
84861+static struct rtnl_link_ops ipgre_link_ops = {
84862 .kind = "gre",
84863 .maxtype = IFLA_GRE_MAX,
84864 .policy = ipgre_policy,
84865@@ -1769,7 +1769,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
84866 .fill_info = ipgre_fill_info,
84867 };
84868
84869-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
84870+static struct rtnl_link_ops ipgre_tap_ops = {
84871 .kind = "gretap",
84872 .maxtype = IFLA_GRE_MAX,
84873 .policy = ipgre_policy,
84874diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
84875index d9c4f11..02b82db 100644
84876--- a/net/ipv4/ip_sockglue.c
84877+++ b/net/ipv4/ip_sockglue.c
84878@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
84879 len = min_t(unsigned int, len, opt->optlen);
84880 if (put_user(len, optlen))
84881 return -EFAULT;
84882- if (copy_to_user(optval, opt->__data, len))
84883+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
84884+ copy_to_user(optval, opt->__data, len))
84885 return -EFAULT;
84886 return 0;
84887 }
84888@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
84889 if (sk->sk_type != SOCK_STREAM)
84890 return -ENOPROTOOPT;
84891
84892- msg.msg_control = optval;
84893+ msg.msg_control = (void __force_kernel *)optval;
84894 msg.msg_controllen = len;
84895 msg.msg_flags = flags;
84896
84897diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
84898index c3a4233..1412161 100644
84899--- a/net/ipv4/ip_vti.c
84900+++ b/net/ipv4/ip_vti.c
84901@@ -47,7 +47,7 @@
84902 #define HASH_SIZE 16
84903 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
84904
84905-static struct rtnl_link_ops vti_link_ops __read_mostly;
84906+static struct rtnl_link_ops vti_link_ops;
84907
84908 static int vti_net_id __read_mostly;
84909 struct vti_net {
84910@@ -886,7 +886,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
84911 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
84912 };
84913
84914-static struct rtnl_link_ops vti_link_ops __read_mostly = {
84915+static struct rtnl_link_ops vti_link_ops = {
84916 .kind = "vti",
84917 .maxtype = IFLA_VTI_MAX,
84918 .policy = vti_policy,
84919diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
84920index 9a46dae..5f793a0 100644
84921--- a/net/ipv4/ipcomp.c
84922+++ b/net/ipv4/ipcomp.c
84923@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
84924 return;
84925
84926 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
84927- atomic_inc(&flow_cache_genid);
84928+ atomic_inc_unchecked(&flow_cache_genid);
84929 rt_genid_bump(net);
84930
84931 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
84932diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
84933index a2e50ae..e152b7c 100644
84934--- a/net/ipv4/ipconfig.c
84935+++ b/net/ipv4/ipconfig.c
84936@@ -323,7 +323,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
84937
84938 mm_segment_t oldfs = get_fs();
84939 set_fs(get_ds());
84940- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
84941+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
84942 set_fs(oldfs);
84943 return res;
84944 }
84945@@ -334,7 +334,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
84946
84947 mm_segment_t oldfs = get_fs();
84948 set_fs(get_ds());
84949- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
84950+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
84951 set_fs(oldfs);
84952 return res;
84953 }
84954@@ -345,7 +345,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
84955
84956 mm_segment_t oldfs = get_fs();
84957 set_fs(get_ds());
84958- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
84959+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
84960 set_fs(oldfs);
84961 return res;
84962 }
84963diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
84964index 191fc24..1b3b804 100644
84965--- a/net/ipv4/ipip.c
84966+++ b/net/ipv4/ipip.c
84967@@ -138,7 +138,7 @@ struct ipip_net {
84968 static int ipip_tunnel_init(struct net_device *dev);
84969 static void ipip_tunnel_setup(struct net_device *dev);
84970 static void ipip_dev_free(struct net_device *dev);
84971-static struct rtnl_link_ops ipip_link_ops __read_mostly;
84972+static struct rtnl_link_ops ipip_link_ops;
84973
84974 static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
84975 struct rtnl_link_stats64 *tot)
84976@@ -972,7 +972,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
84977 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
84978 };
84979
84980-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
84981+static struct rtnl_link_ops ipip_link_ops = {
84982 .kind = "ipip",
84983 .maxtype = IFLA_IPTUN_MAX,
84984 .policy = ipip_policy,
84985diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
84986index 3ea4127..849297b 100644
84987--- a/net/ipv4/netfilter/arp_tables.c
84988+++ b/net/ipv4/netfilter/arp_tables.c
84989@@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
84990 #endif
84991
84992 static int get_info(struct net *net, void __user *user,
84993- const int *len, int compat)
84994+ int len, int compat)
84995 {
84996 char name[XT_TABLE_MAXNAMELEN];
84997 struct xt_table *t;
84998 int ret;
84999
85000- if (*len != sizeof(struct arpt_getinfo)) {
85001- duprintf("length %u != %Zu\n", *len,
85002+ if (len != sizeof(struct arpt_getinfo)) {
85003+ duprintf("length %u != %Zu\n", len,
85004 sizeof(struct arpt_getinfo));
85005 return -EINVAL;
85006 }
85007@@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
85008 info.size = private->size;
85009 strcpy(info.name, name);
85010
85011- if (copy_to_user(user, &info, *len) != 0)
85012+ if (copy_to_user(user, &info, len) != 0)
85013 ret = -EFAULT;
85014 else
85015 ret = 0;
85016@@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
85017
85018 switch (cmd) {
85019 case ARPT_SO_GET_INFO:
85020- ret = get_info(sock_net(sk), user, len, 1);
85021+ ret = get_info(sock_net(sk), user, *len, 1);
85022 break;
85023 case ARPT_SO_GET_ENTRIES:
85024 ret = compat_get_entries(sock_net(sk), user, len);
85025@@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
85026
85027 switch (cmd) {
85028 case ARPT_SO_GET_INFO:
85029- ret = get_info(sock_net(sk), user, len, 0);
85030+ ret = get_info(sock_net(sk), user, *len, 0);
85031 break;
85032
85033 case ARPT_SO_GET_ENTRIES:
85034diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
85035index 17c5e06..1b91206 100644
85036--- a/net/ipv4/netfilter/ip_tables.c
85037+++ b/net/ipv4/netfilter/ip_tables.c
85038@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
85039 #endif
85040
85041 static int get_info(struct net *net, void __user *user,
85042- const int *len, int compat)
85043+ int len, int compat)
85044 {
85045 char name[XT_TABLE_MAXNAMELEN];
85046 struct xt_table *t;
85047 int ret;
85048
85049- if (*len != sizeof(struct ipt_getinfo)) {
85050- duprintf("length %u != %zu\n", *len,
85051+ if (len != sizeof(struct ipt_getinfo)) {
85052+ duprintf("length %u != %zu\n", len,
85053 sizeof(struct ipt_getinfo));
85054 return -EINVAL;
85055 }
85056@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
85057 info.size = private->size;
85058 strcpy(info.name, name);
85059
85060- if (copy_to_user(user, &info, *len) != 0)
85061+ if (copy_to_user(user, &info, len) != 0)
85062 ret = -EFAULT;
85063 else
85064 ret = 0;
85065@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
85066
85067 switch (cmd) {
85068 case IPT_SO_GET_INFO:
85069- ret = get_info(sock_net(sk), user, len, 1);
85070+ ret = get_info(sock_net(sk), user, *len, 1);
85071 break;
85072 case IPT_SO_GET_ENTRIES:
85073 ret = compat_get_entries(sock_net(sk), user, len);
85074@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
85075
85076 switch (cmd) {
85077 case IPT_SO_GET_INFO:
85078- ret = get_info(sock_net(sk), user, len, 0);
85079+ ret = get_info(sock_net(sk), user, *len, 0);
85080 break;
85081
85082 case IPT_SO_GET_ENTRIES:
85083diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
85084index dc454cc..5bb917f 100644
85085--- a/net/ipv4/ping.c
85086+++ b/net/ipv4/ping.c
85087@@ -844,7 +844,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
85088 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
85089 0, sock_i_ino(sp),
85090 atomic_read(&sp->sk_refcnt), sp,
85091- atomic_read(&sp->sk_drops), len);
85092+ atomic_read_unchecked(&sp->sk_drops), len);
85093 }
85094
85095 static int ping_seq_show(struct seq_file *seq, void *v)
85096diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
85097index 6f08991..55867ad 100644
85098--- a/net/ipv4/raw.c
85099+++ b/net/ipv4/raw.c
85100@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
85101 int raw_rcv(struct sock *sk, struct sk_buff *skb)
85102 {
85103 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
85104- atomic_inc(&sk->sk_drops);
85105+ atomic_inc_unchecked(&sk->sk_drops);
85106 kfree_skb(skb);
85107 return NET_RX_DROP;
85108 }
85109@@ -747,16 +747,20 @@ static int raw_init(struct sock *sk)
85110
85111 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
85112 {
85113+ struct icmp_filter filter;
85114+
85115 if (optlen > sizeof(struct icmp_filter))
85116 optlen = sizeof(struct icmp_filter);
85117- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
85118+ if (copy_from_user(&filter, optval, optlen))
85119 return -EFAULT;
85120+ raw_sk(sk)->filter = filter;
85121 return 0;
85122 }
85123
85124 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
85125 {
85126 int len, ret = -EFAULT;
85127+ struct icmp_filter filter;
85128
85129 if (get_user(len, optlen))
85130 goto out;
85131@@ -766,8 +770,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
85132 if (len > sizeof(struct icmp_filter))
85133 len = sizeof(struct icmp_filter);
85134 ret = -EFAULT;
85135- if (put_user(len, optlen) ||
85136- copy_to_user(optval, &raw_sk(sk)->filter, len))
85137+ filter = raw_sk(sk)->filter;
85138+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
85139 goto out;
85140 ret = 0;
85141 out: return ret;
85142@@ -998,7 +1002,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
85143 0, 0L, 0,
85144 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
85145 0, sock_i_ino(sp),
85146- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
85147+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
85148 }
85149
85150 static int raw_seq_show(struct seq_file *seq, void *v)
85151diff --git a/net/ipv4/route.c b/net/ipv4/route.c
85152index a0fcc47..32e2c89 100644
85153--- a/net/ipv4/route.c
85154+++ b/net/ipv4/route.c
85155@@ -2552,34 +2552,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
85156 .maxlen = sizeof(int),
85157 .mode = 0200,
85158 .proc_handler = ipv4_sysctl_rtcache_flush,
85159+ .extra1 = &init_net,
85160 },
85161 { },
85162 };
85163
85164 static __net_init int sysctl_route_net_init(struct net *net)
85165 {
85166- struct ctl_table *tbl;
85167+ ctl_table_no_const *tbl = NULL;
85168
85169- tbl = ipv4_route_flush_table;
85170 if (!net_eq(net, &init_net)) {
85171- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
85172+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
85173 if (tbl == NULL)
85174 goto err_dup;
85175
85176 /* Don't export sysctls to unprivileged users */
85177 if (net->user_ns != &init_user_ns)
85178 tbl[0].procname = NULL;
85179- }
85180- tbl[0].extra1 = net;
85181+ tbl[0].extra1 = net;
85182+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
85183+ } else
85184+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
85185
85186- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
85187 if (net->ipv4.route_hdr == NULL)
85188 goto err_reg;
85189 return 0;
85190
85191 err_reg:
85192- if (tbl != ipv4_route_flush_table)
85193- kfree(tbl);
85194+ kfree(tbl);
85195 err_dup:
85196 return -ENOMEM;
85197 }
85198@@ -2602,7 +2602,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
85199
85200 static __net_init int rt_genid_init(struct net *net)
85201 {
85202- atomic_set(&net->rt_genid, 0);
85203+ atomic_set_unchecked(&net->rt_genid, 0);
85204 get_random_bytes(&net->ipv4.dev_addr_genid,
85205 sizeof(net->ipv4.dev_addr_genid));
85206 return 0;
85207diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
85208index d84400b..62e066e 100644
85209--- a/net/ipv4/sysctl_net_ipv4.c
85210+++ b/net/ipv4/sysctl_net_ipv4.c
85211@@ -54,7 +54,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
85212 {
85213 int ret;
85214 int range[2];
85215- ctl_table tmp = {
85216+ ctl_table_no_const tmp = {
85217 .data = &range,
85218 .maxlen = sizeof(range),
85219 .mode = table->mode,
85220@@ -107,7 +107,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
85221 int ret;
85222 gid_t urange[2];
85223 kgid_t low, high;
85224- ctl_table tmp = {
85225+ ctl_table_no_const tmp = {
85226 .data = &urange,
85227 .maxlen = sizeof(urange),
85228 .mode = table->mode,
85229@@ -138,7 +138,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
85230 void __user *buffer, size_t *lenp, loff_t *ppos)
85231 {
85232 char val[TCP_CA_NAME_MAX];
85233- ctl_table tbl = {
85234+ ctl_table_no_const tbl = {
85235 .data = val,
85236 .maxlen = TCP_CA_NAME_MAX,
85237 };
85238@@ -157,7 +157,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
85239 void __user *buffer, size_t *lenp,
85240 loff_t *ppos)
85241 {
85242- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
85243+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
85244 int ret;
85245
85246 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
85247@@ -174,7 +174,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
85248 void __user *buffer, size_t *lenp,
85249 loff_t *ppos)
85250 {
85251- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
85252+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
85253 int ret;
85254
85255 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
85256@@ -200,15 +200,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
85257 struct mem_cgroup *memcg;
85258 #endif
85259
85260- ctl_table tmp = {
85261+ ctl_table_no_const tmp = {
85262 .data = &vec,
85263 .maxlen = sizeof(vec),
85264 .mode = ctl->mode,
85265 };
85266
85267 if (!write) {
85268- ctl->data = &net->ipv4.sysctl_tcp_mem;
85269- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
85270+ ctl_table_no_const tcp_mem = *ctl;
85271+
85272+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
85273+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
85274 }
85275
85276 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
85277@@ -235,7 +237,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
85278 int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
85279 size_t *lenp, loff_t *ppos)
85280 {
85281- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
85282+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
85283 struct tcp_fastopen_context *ctxt;
85284 int ret;
85285 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
85286@@ -476,7 +478,7 @@ static struct ctl_table ipv4_table[] = {
85287 },
85288 {
85289 .procname = "ip_local_reserved_ports",
85290- .data = NULL, /* initialized in sysctl_ipv4_init */
85291+ .data = sysctl_local_reserved_ports,
85292 .maxlen = 65536,
85293 .mode = 0644,
85294 .proc_handler = proc_do_large_bitmap,
85295@@ -860,11 +862,10 @@ static struct ctl_table ipv4_net_table[] = {
85296
85297 static __net_init int ipv4_sysctl_init_net(struct net *net)
85298 {
85299- struct ctl_table *table;
85300+ ctl_table_no_const *table = NULL;
85301
85302- table = ipv4_net_table;
85303 if (!net_eq(net, &init_net)) {
85304- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
85305+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
85306 if (table == NULL)
85307 goto err_alloc;
85308
85309@@ -897,15 +898,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
85310
85311 tcp_init_mem(net);
85312
85313- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
85314+ if (!net_eq(net, &init_net))
85315+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
85316+ else
85317+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
85318 if (net->ipv4.ipv4_hdr == NULL)
85319 goto err_reg;
85320
85321 return 0;
85322
85323 err_reg:
85324- if (!net_eq(net, &init_net))
85325- kfree(table);
85326+ kfree(table);
85327 err_alloc:
85328 return -ENOMEM;
85329 }
85330@@ -927,16 +930,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
85331 static __init int sysctl_ipv4_init(void)
85332 {
85333 struct ctl_table_header *hdr;
85334- struct ctl_table *i;
85335-
85336- for (i = ipv4_table; i->procname; i++) {
85337- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
85338- i->data = sysctl_local_reserved_ports;
85339- break;
85340- }
85341- }
85342- if (!i->procname)
85343- return -EINVAL;
85344
85345 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
85346 if (hdr == NULL)
85347diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
85348index ad70a96..50cb55b 100644
85349--- a/net/ipv4/tcp_input.c
85350+++ b/net/ipv4/tcp_input.c
85351@@ -4733,7 +4733,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
85352 * simplifies code)
85353 */
85354 static void
85355-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
85356+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
85357 struct sk_buff *head, struct sk_buff *tail,
85358 u32 start, u32 end)
85359 {
85360@@ -5850,6 +5850,7 @@ discard:
85361 tcp_paws_reject(&tp->rx_opt, 0))
85362 goto discard_and_undo;
85363
85364+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
85365 if (th->syn) {
85366 /* We see SYN without ACK. It is attempt of
85367 * simultaneous connect with crossed SYNs.
85368@@ -5900,6 +5901,7 @@ discard:
85369 goto discard;
85370 #endif
85371 }
85372+#endif
85373 /* "fifth, if neither of the SYN or RST bits is set then
85374 * drop the segment and return."
85375 */
85376@@ -5944,7 +5946,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
85377 goto discard;
85378
85379 if (th->syn) {
85380- if (th->fin)
85381+ if (th->fin || th->urg || th->psh)
85382 goto discard;
85383 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
85384 return 1;
85385diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
85386index eadb693..e8f7251 100644
85387--- a/net/ipv4/tcp_ipv4.c
85388+++ b/net/ipv4/tcp_ipv4.c
85389@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
85390 EXPORT_SYMBOL(sysctl_tcp_low_latency);
85391
85392
85393+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85394+extern int grsec_enable_blackhole;
85395+#endif
85396+
85397 #ifdef CONFIG_TCP_MD5SIG
85398 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
85399 __be32 daddr, __be32 saddr, const struct tcphdr *th);
85400@@ -1895,6 +1899,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
85401 return 0;
85402
85403 reset:
85404+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85405+ if (!grsec_enable_blackhole)
85406+#endif
85407 tcp_v4_send_reset(rsk, skb);
85408 discard:
85409 kfree_skb(skb);
85410@@ -1994,12 +2001,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
85411 TCP_SKB_CB(skb)->sacked = 0;
85412
85413 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
85414- if (!sk)
85415+ if (!sk) {
85416+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85417+ ret = 1;
85418+#endif
85419 goto no_tcp_socket;
85420-
85421+ }
85422 process:
85423- if (sk->sk_state == TCP_TIME_WAIT)
85424+ if (sk->sk_state == TCP_TIME_WAIT) {
85425+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85426+ ret = 2;
85427+#endif
85428 goto do_time_wait;
85429+ }
85430
85431 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
85432 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
85433@@ -2050,6 +2064,10 @@ no_tcp_socket:
85434 bad_packet:
85435 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
85436 } else {
85437+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85438+ if (!grsec_enable_blackhole || (ret == 1 &&
85439+ (skb->dev->flags & IFF_LOOPBACK)))
85440+#endif
85441 tcp_v4_send_reset(NULL, skb);
85442 }
85443
85444diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
85445index f35f2df..ccb5ca6 100644
85446--- a/net/ipv4/tcp_minisocks.c
85447+++ b/net/ipv4/tcp_minisocks.c
85448@@ -27,6 +27,10 @@
85449 #include <net/inet_common.h>
85450 #include <net/xfrm.h>
85451
85452+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85453+extern int grsec_enable_blackhole;
85454+#endif
85455+
85456 int sysctl_tcp_syncookies __read_mostly = 1;
85457 EXPORT_SYMBOL(sysctl_tcp_syncookies);
85458
85459@@ -742,7 +746,10 @@ embryonic_reset:
85460 * avoid becoming vulnerable to outside attack aiming at
85461 * resetting legit local connections.
85462 */
85463- req->rsk_ops->send_reset(sk, skb);
85464+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85465+ if (!grsec_enable_blackhole)
85466+#endif
85467+ req->rsk_ops->send_reset(sk, skb);
85468 } else if (fastopen) { /* received a valid RST pkt */
85469 reqsk_fastopen_remove(sk, req, true);
85470 tcp_reset(sk);
85471diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
85472index 4526fe6..1a34e43 100644
85473--- a/net/ipv4/tcp_probe.c
85474+++ b/net/ipv4/tcp_probe.c
85475@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
85476 if (cnt + width >= len)
85477 break;
85478
85479- if (copy_to_user(buf + cnt, tbuf, width))
85480+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
85481 return -EFAULT;
85482 cnt += width;
85483 }
85484diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
85485index b78aac3..e18230b 100644
85486--- a/net/ipv4/tcp_timer.c
85487+++ b/net/ipv4/tcp_timer.c
85488@@ -22,6 +22,10 @@
85489 #include <linux/gfp.h>
85490 #include <net/tcp.h>
85491
85492+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85493+extern int grsec_lastack_retries;
85494+#endif
85495+
85496 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
85497 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
85498 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
85499@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
85500 }
85501 }
85502
85503+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85504+ if ((sk->sk_state == TCP_LAST_ACK) &&
85505+ (grsec_lastack_retries > 0) &&
85506+ (grsec_lastack_retries < retry_until))
85507+ retry_until = grsec_lastack_retries;
85508+#endif
85509+
85510 if (retransmits_timed_out(sk, retry_until,
85511 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
85512 /* Has it gone just too far? */
85513diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
85514index 1f4d405..3524677 100644
85515--- a/net/ipv4/udp.c
85516+++ b/net/ipv4/udp.c
85517@@ -87,6 +87,7 @@
85518 #include <linux/types.h>
85519 #include <linux/fcntl.h>
85520 #include <linux/module.h>
85521+#include <linux/security.h>
85522 #include <linux/socket.h>
85523 #include <linux/sockios.h>
85524 #include <linux/igmp.h>
85525@@ -111,6 +112,10 @@
85526 #include <trace/events/skb.h>
85527 #include "udp_impl.h"
85528
85529+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85530+extern int grsec_enable_blackhole;
85531+#endif
85532+
85533 struct udp_table udp_table __read_mostly;
85534 EXPORT_SYMBOL(udp_table);
85535
85536@@ -569,6 +574,9 @@ found:
85537 return s;
85538 }
85539
85540+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
85541+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
85542+
85543 /*
85544 * This routine is called by the ICMP module when it gets some
85545 * sort of error condition. If err < 0 then the socket should
85546@@ -864,9 +872,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
85547 dport = usin->sin_port;
85548 if (dport == 0)
85549 return -EINVAL;
85550+
85551+ err = gr_search_udp_sendmsg(sk, usin);
85552+ if (err)
85553+ return err;
85554 } else {
85555 if (sk->sk_state != TCP_ESTABLISHED)
85556 return -EDESTADDRREQ;
85557+
85558+ err = gr_search_udp_sendmsg(sk, NULL);
85559+ if (err)
85560+ return err;
85561+
85562 daddr = inet->inet_daddr;
85563 dport = inet->inet_dport;
85564 /* Open fast path for connected socket.
85565@@ -1108,7 +1125,7 @@ static unsigned int first_packet_length(struct sock *sk)
85566 udp_lib_checksum_complete(skb)) {
85567 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
85568 IS_UDPLITE(sk));
85569- atomic_inc(&sk->sk_drops);
85570+ atomic_inc_unchecked(&sk->sk_drops);
85571 __skb_unlink(skb, rcvq);
85572 __skb_queue_tail(&list_kill, skb);
85573 }
85574@@ -1194,6 +1211,10 @@ try_again:
85575 if (!skb)
85576 goto out;
85577
85578+ err = gr_search_udp_recvmsg(sk, skb);
85579+ if (err)
85580+ goto out_free;
85581+
85582 ulen = skb->len - sizeof(struct udphdr);
85583 copied = len;
85584 if (copied > ulen)
85585@@ -1227,7 +1248,7 @@ try_again:
85586 if (unlikely(err)) {
85587 trace_kfree_skb(skb, udp_recvmsg);
85588 if (!peeked) {
85589- atomic_inc(&sk->sk_drops);
85590+ atomic_inc_unchecked(&sk->sk_drops);
85591 UDP_INC_STATS_USER(sock_net(sk),
85592 UDP_MIB_INERRORS, is_udplite);
85593 }
85594@@ -1510,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
85595
85596 drop:
85597 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
85598- atomic_inc(&sk->sk_drops);
85599+ atomic_inc_unchecked(&sk->sk_drops);
85600 kfree_skb(skb);
85601 return -1;
85602 }
85603@@ -1529,7 +1550,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
85604 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
85605
85606 if (!skb1) {
85607- atomic_inc(&sk->sk_drops);
85608+ atomic_inc_unchecked(&sk->sk_drops);
85609 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
85610 IS_UDPLITE(sk));
85611 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
85612@@ -1698,6 +1719,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
85613 goto csum_error;
85614
85615 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
85616+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85617+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
85618+#endif
85619 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
85620
85621 /*
85622@@ -2120,7 +2144,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
85623 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
85624 0, sock_i_ino(sp),
85625 atomic_read(&sp->sk_refcnt), sp,
85626- atomic_read(&sp->sk_drops), len);
85627+ atomic_read_unchecked(&sp->sk_drops), len);
85628 }
85629
85630 int udp4_seq_show(struct seq_file *seq, void *v)
85631diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
85632index 1b5d8cb..ffb0833 100644
85633--- a/net/ipv6/addrconf.c
85634+++ b/net/ipv6/addrconf.c
85635@@ -2272,7 +2272,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
85636 p.iph.ihl = 5;
85637 p.iph.protocol = IPPROTO_IPV6;
85638 p.iph.ttl = 64;
85639- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
85640+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
85641
85642 if (ops->ndo_do_ioctl) {
85643 mm_segment_t oldfs = get_fs();
85644@@ -4388,7 +4388,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
85645 int *valp = ctl->data;
85646 int val = *valp;
85647 loff_t pos = *ppos;
85648- ctl_table lctl;
85649+ ctl_table_no_const lctl;
85650 int ret;
85651
85652 /*
85653@@ -4470,7 +4470,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
85654 int *valp = ctl->data;
85655 int val = *valp;
85656 loff_t pos = *ppos;
85657- ctl_table lctl;
85658+ ctl_table_no_const lctl;
85659 int ret;
85660
85661 /*
85662diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
85663index fff5bdd..15194fb 100644
85664--- a/net/ipv6/icmp.c
85665+++ b/net/ipv6/icmp.c
85666@@ -973,7 +973,7 @@ ctl_table ipv6_icmp_table_template[] = {
85667
85668 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
85669 {
85670- struct ctl_table *table;
85671+ ctl_table_no_const *table;
85672
85673 table = kmemdup(ipv6_icmp_table_template,
85674 sizeof(ipv6_icmp_table_template),
85675diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
85676index 131dd09..f7ed64f 100644
85677--- a/net/ipv6/ip6_gre.c
85678+++ b/net/ipv6/ip6_gre.c
85679@@ -73,7 +73,7 @@ struct ip6gre_net {
85680 struct net_device *fb_tunnel_dev;
85681 };
85682
85683-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
85684+static struct rtnl_link_ops ip6gre_link_ops;
85685 static int ip6gre_tunnel_init(struct net_device *dev);
85686 static void ip6gre_tunnel_setup(struct net_device *dev);
85687 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
85688@@ -1337,7 +1337,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
85689 }
85690
85691
85692-static struct inet6_protocol ip6gre_protocol __read_mostly = {
85693+static struct inet6_protocol ip6gre_protocol = {
85694 .handler = ip6gre_rcv,
85695 .err_handler = ip6gre_err,
85696 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
85697@@ -1671,7 +1671,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
85698 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
85699 };
85700
85701-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
85702+static struct rtnl_link_ops ip6gre_link_ops = {
85703 .kind = "ip6gre",
85704 .maxtype = IFLA_GRE_MAX,
85705 .policy = ip6gre_policy,
85706@@ -1684,7 +1684,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
85707 .fill_info = ip6gre_fill_info,
85708 };
85709
85710-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
85711+static struct rtnl_link_ops ip6gre_tap_ops = {
85712 .kind = "ip6gretap",
85713 .maxtype = IFLA_GRE_MAX,
85714 .policy = ip6gre_policy,
85715diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
85716index a14f28b..b4b8956 100644
85717--- a/net/ipv6/ip6_tunnel.c
85718+++ b/net/ipv6/ip6_tunnel.c
85719@@ -87,7 +87,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
85720
85721 static int ip6_tnl_dev_init(struct net_device *dev);
85722 static void ip6_tnl_dev_setup(struct net_device *dev);
85723-static struct rtnl_link_ops ip6_link_ops __read_mostly;
85724+static struct rtnl_link_ops ip6_link_ops;
85725
85726 static int ip6_tnl_net_id __read_mostly;
85727 struct ip6_tnl_net {
85728@@ -1686,7 +1686,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
85729 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
85730 };
85731
85732-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
85733+static struct rtnl_link_ops ip6_link_ops = {
85734 .kind = "ip6tnl",
85735 .maxtype = IFLA_IPTUN_MAX,
85736 .policy = ip6_tnl_policy,
85737diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
85738index d1e2e8e..51c19ae 100644
85739--- a/net/ipv6/ipv6_sockglue.c
85740+++ b/net/ipv6/ipv6_sockglue.c
85741@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
85742 if (sk->sk_type != SOCK_STREAM)
85743 return -ENOPROTOOPT;
85744
85745- msg.msg_control = optval;
85746+ msg.msg_control = (void __force_kernel *)optval;
85747 msg.msg_controllen = len;
85748 msg.msg_flags = flags;
85749
85750diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
85751index 125a90d..2a11f36 100644
85752--- a/net/ipv6/netfilter/ip6_tables.c
85753+++ b/net/ipv6/netfilter/ip6_tables.c
85754@@ -1076,14 +1076,14 @@ static int compat_table_info(const struct xt_table_info *info,
85755 #endif
85756
85757 static int get_info(struct net *net, void __user *user,
85758- const int *len, int compat)
85759+ int len, int compat)
85760 {
85761 char name[XT_TABLE_MAXNAMELEN];
85762 struct xt_table *t;
85763 int ret;
85764
85765- if (*len != sizeof(struct ip6t_getinfo)) {
85766- duprintf("length %u != %zu\n", *len,
85767+ if (len != sizeof(struct ip6t_getinfo)) {
85768+ duprintf("length %u != %zu\n", len,
85769 sizeof(struct ip6t_getinfo));
85770 return -EINVAL;
85771 }
85772@@ -1120,7 +1120,7 @@ static int get_info(struct net *net, void __user *user,
85773 info.size = private->size;
85774 strcpy(info.name, name);
85775
85776- if (copy_to_user(user, &info, *len) != 0)
85777+ if (copy_to_user(user, &info, len) != 0)
85778 ret = -EFAULT;
85779 else
85780 ret = 0;
85781@@ -1974,7 +1974,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
85782
85783 switch (cmd) {
85784 case IP6T_SO_GET_INFO:
85785- ret = get_info(sock_net(sk), user, len, 1);
85786+ ret = get_info(sock_net(sk), user, *len, 1);
85787 break;
85788 case IP6T_SO_GET_ENTRIES:
85789 ret = compat_get_entries(sock_net(sk), user, len);
85790@@ -2021,7 +2021,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
85791
85792 switch (cmd) {
85793 case IP6T_SO_GET_INFO:
85794- ret = get_info(sock_net(sk), user, len, 0);
85795+ ret = get_info(sock_net(sk), user, *len, 0);
85796 break;
85797
85798 case IP6T_SO_GET_ENTRIES:
85799diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
85800index 3dacecc..2939087 100644
85801--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
85802+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
85803@@ -87,12 +87,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
85804
85805 static int nf_ct_frag6_sysctl_register(struct net *net)
85806 {
85807- struct ctl_table *table;
85808+ ctl_table_no_const *table = NULL;
85809 struct ctl_table_header *hdr;
85810
85811- table = nf_ct_frag6_sysctl_table;
85812 if (!net_eq(net, &init_net)) {
85813- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
85814+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
85815 GFP_KERNEL);
85816 if (table == NULL)
85817 goto err_alloc;
85818@@ -100,9 +99,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
85819 table[0].data = &net->ipv6.frags.high_thresh;
85820 table[1].data = &net->ipv6.frags.low_thresh;
85821 table[2].data = &net->ipv6.frags.timeout;
85822- }
85823-
85824- hdr = register_net_sysctl(net, "net/netfilter", table);
85825+ hdr = register_net_sysctl(net, "net/netfilter", table);
85826+ } else
85827+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
85828 if (hdr == NULL)
85829 goto err_reg;
85830
85831@@ -110,8 +109,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
85832 return 0;
85833
85834 err_reg:
85835- if (!net_eq(net, &init_net))
85836- kfree(table);
85837+ kfree(table);
85838 err_alloc:
85839 return -ENOMEM;
85840 }
85841diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
85842index 70fa814..d70c28c 100644
85843--- a/net/ipv6/raw.c
85844+++ b/net/ipv6/raw.c
85845@@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
85846 {
85847 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
85848 skb_checksum_complete(skb)) {
85849- atomic_inc(&sk->sk_drops);
85850+ atomic_inc_unchecked(&sk->sk_drops);
85851 kfree_skb(skb);
85852 return NET_RX_DROP;
85853 }
85854@@ -407,7 +407,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
85855 struct raw6_sock *rp = raw6_sk(sk);
85856
85857 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
85858- atomic_inc(&sk->sk_drops);
85859+ atomic_inc_unchecked(&sk->sk_drops);
85860 kfree_skb(skb);
85861 return NET_RX_DROP;
85862 }
85863@@ -431,7 +431,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
85864
85865 if (inet->hdrincl) {
85866 if (skb_checksum_complete(skb)) {
85867- atomic_inc(&sk->sk_drops);
85868+ atomic_inc_unchecked(&sk->sk_drops);
85869 kfree_skb(skb);
85870 return NET_RX_DROP;
85871 }
85872@@ -604,7 +604,7 @@ out:
85873 return err;
85874 }
85875
85876-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
85877+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
85878 struct flowi6 *fl6, struct dst_entry **dstp,
85879 unsigned int flags)
85880 {
85881@@ -916,12 +916,15 @@ do_confirm:
85882 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
85883 char __user *optval, int optlen)
85884 {
85885+ struct icmp6_filter filter;
85886+
85887 switch (optname) {
85888 case ICMPV6_FILTER:
85889 if (optlen > sizeof(struct icmp6_filter))
85890 optlen = sizeof(struct icmp6_filter);
85891- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
85892+ if (copy_from_user(&filter, optval, optlen))
85893 return -EFAULT;
85894+ raw6_sk(sk)->filter = filter;
85895 return 0;
85896 default:
85897 return -ENOPROTOOPT;
85898@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
85899 char __user *optval, int __user *optlen)
85900 {
85901 int len;
85902+ struct icmp6_filter filter;
85903
85904 switch (optname) {
85905 case ICMPV6_FILTER:
85906@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
85907 len = sizeof(struct icmp6_filter);
85908 if (put_user(len, optlen))
85909 return -EFAULT;
85910- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
85911+ filter = raw6_sk(sk)->filter;
85912+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
85913 return -EFAULT;
85914 return 0;
85915 default:
85916@@ -1253,7 +1258,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
85917 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
85918 0,
85919 sock_i_ino(sp),
85920- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
85921+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
85922 }
85923
85924 static int raw6_seq_show(struct seq_file *seq, void *v)
85925diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
85926index e5253ec..0410257 100644
85927--- a/net/ipv6/reassembly.c
85928+++ b/net/ipv6/reassembly.c
85929@@ -604,12 +604,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
85930
85931 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
85932 {
85933- struct ctl_table *table;
85934+ ctl_table_no_const *table = NULL;
85935 struct ctl_table_header *hdr;
85936
85937- table = ip6_frags_ns_ctl_table;
85938 if (!net_eq(net, &init_net)) {
85939- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
85940+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
85941 if (table == NULL)
85942 goto err_alloc;
85943
85944@@ -620,9 +619,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
85945 /* Don't export sysctls to unprivileged users */
85946 if (net->user_ns != &init_user_ns)
85947 table[0].procname = NULL;
85948- }
85949+ hdr = register_net_sysctl(net, "net/ipv6", table);
85950+ } else
85951+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
85952
85953- hdr = register_net_sysctl(net, "net/ipv6", table);
85954 if (hdr == NULL)
85955 goto err_reg;
85956
85957@@ -630,8 +630,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
85958 return 0;
85959
85960 err_reg:
85961- if (!net_eq(net, &init_net))
85962- kfree(table);
85963+ kfree(table);
85964 err_alloc:
85965 return -ENOMEM;
85966 }
85967diff --git a/net/ipv6/route.c b/net/ipv6/route.c
85968index 6f9f7b6..2306d63 100644
85969--- a/net/ipv6/route.c
85970+++ b/net/ipv6/route.c
85971@@ -2965,7 +2965,7 @@ ctl_table ipv6_route_table_template[] = {
85972
85973 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
85974 {
85975- struct ctl_table *table;
85976+ ctl_table_no_const *table;
85977
85978 table = kmemdup(ipv6_route_table_template,
85979 sizeof(ipv6_route_table_template),
85980diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
85981index cfba99b..20ca511 100644
85982--- a/net/ipv6/sit.c
85983+++ b/net/ipv6/sit.c
85984@@ -72,7 +72,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
85985 static int ipip6_tunnel_init(struct net_device *dev);
85986 static void ipip6_tunnel_setup(struct net_device *dev);
85987 static void ipip6_dev_free(struct net_device *dev);
85988-static struct rtnl_link_ops sit_link_ops __read_mostly;
85989+static struct rtnl_link_ops sit_link_ops;
85990
85991 static int sit_net_id __read_mostly;
85992 struct sit_net {
85993@@ -1463,7 +1463,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
85994 #endif
85995 };
85996
85997-static struct rtnl_link_ops sit_link_ops __read_mostly = {
85998+static struct rtnl_link_ops sit_link_ops = {
85999 .kind = "sit",
86000 .maxtype = IFLA_IPTUN_MAX,
86001 .policy = ipip6_policy,
86002diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
86003index e85c48b..b8268d3 100644
86004--- a/net/ipv6/sysctl_net_ipv6.c
86005+++ b/net/ipv6/sysctl_net_ipv6.c
86006@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
86007
86008 static int __net_init ipv6_sysctl_net_init(struct net *net)
86009 {
86010- struct ctl_table *ipv6_table;
86011+ ctl_table_no_const *ipv6_table;
86012 struct ctl_table *ipv6_route_table;
86013 struct ctl_table *ipv6_icmp_table;
86014 int err;
86015diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
86016index 4f435371..5de9da7 100644
86017--- a/net/ipv6/tcp_ipv6.c
86018+++ b/net/ipv6/tcp_ipv6.c
86019@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
86020 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
86021 }
86022
86023+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86024+extern int grsec_enable_blackhole;
86025+#endif
86026+
86027 static void tcp_v6_hash(struct sock *sk)
86028 {
86029 if (sk->sk_state != TCP_CLOSE) {
86030@@ -1433,6 +1437,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
86031 return 0;
86032
86033 reset:
86034+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86035+ if (!grsec_enable_blackhole)
86036+#endif
86037 tcp_v6_send_reset(sk, skb);
86038 discard:
86039 if (opt_skb)
86040@@ -1514,12 +1521,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
86041 TCP_SKB_CB(skb)->sacked = 0;
86042
86043 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
86044- if (!sk)
86045+ if (!sk) {
86046+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86047+ ret = 1;
86048+#endif
86049 goto no_tcp_socket;
86050+ }
86051
86052 process:
86053- if (sk->sk_state == TCP_TIME_WAIT)
86054+ if (sk->sk_state == TCP_TIME_WAIT) {
86055+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86056+ ret = 2;
86057+#endif
86058 goto do_time_wait;
86059+ }
86060
86061 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
86062 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
86063@@ -1568,6 +1583,10 @@ no_tcp_socket:
86064 bad_packet:
86065 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
86066 } else {
86067+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86068+ if (!grsec_enable_blackhole || (ret == 1 &&
86069+ (skb->dev->flags & IFF_LOOPBACK)))
86070+#endif
86071 tcp_v6_send_reset(NULL, skb);
86072 }
86073
86074diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
86075index fb08329..2d6919e 100644
86076--- a/net/ipv6/udp.c
86077+++ b/net/ipv6/udp.c
86078@@ -51,6 +51,10 @@
86079 #include <trace/events/skb.h>
86080 #include "udp_impl.h"
86081
86082+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86083+extern int grsec_enable_blackhole;
86084+#endif
86085+
86086 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
86087 {
86088 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
86089@@ -395,7 +399,7 @@ try_again:
86090 if (unlikely(err)) {
86091 trace_kfree_skb(skb, udpv6_recvmsg);
86092 if (!peeked) {
86093- atomic_inc(&sk->sk_drops);
86094+ atomic_inc_unchecked(&sk->sk_drops);
86095 if (is_udp4)
86096 UDP_INC_STATS_USER(sock_net(sk),
86097 UDP_MIB_INERRORS,
86098@@ -633,7 +637,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86099 return rc;
86100 drop:
86101 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
86102- atomic_inc(&sk->sk_drops);
86103+ atomic_inc_unchecked(&sk->sk_drops);
86104 kfree_skb(skb);
86105 return -1;
86106 }
86107@@ -691,7 +695,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
86108 if (likely(skb1 == NULL))
86109 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
86110 if (!skb1) {
86111- atomic_inc(&sk->sk_drops);
86112+ atomic_inc_unchecked(&sk->sk_drops);
86113 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
86114 IS_UDPLITE(sk));
86115 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
86116@@ -862,6 +866,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
86117 goto discard;
86118
86119 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
86120+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86121+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
86122+#endif
86123 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
86124
86125 kfree_skb(skb);
86126@@ -1379,7 +1386,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
86127 0,
86128 sock_i_ino(sp),
86129 atomic_read(&sp->sk_refcnt), sp,
86130- atomic_read(&sp->sk_drops));
86131+ atomic_read_unchecked(&sp->sk_drops));
86132 }
86133
86134 int udp6_seq_show(struct seq_file *seq, void *v)
86135diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
86136index a68c88c..d55b0c5 100644
86137--- a/net/irda/ircomm/ircomm_tty.c
86138+++ b/net/irda/ircomm/ircomm_tty.c
86139@@ -312,12 +312,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
86140 add_wait_queue(&port->open_wait, &wait);
86141
86142 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
86143- __FILE__, __LINE__, tty->driver->name, port->count);
86144+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
86145
86146 spin_lock_irqsave(&port->lock, flags);
86147 if (!tty_hung_up_p(filp)) {
86148 extra_count = 1;
86149- port->count--;
86150+ atomic_dec(&port->count);
86151 }
86152 spin_unlock_irqrestore(&port->lock, flags);
86153 port->blocked_open++;
86154@@ -353,7 +353,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
86155 }
86156
86157 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
86158- __FILE__, __LINE__, tty->driver->name, port->count);
86159+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
86160
86161 schedule();
86162 }
86163@@ -364,13 +364,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
86164 if (extra_count) {
86165 /* ++ is not atomic, so this should be protected - Jean II */
86166 spin_lock_irqsave(&port->lock, flags);
86167- port->count++;
86168+ atomic_inc(&port->count);
86169 spin_unlock_irqrestore(&port->lock, flags);
86170 }
86171 port->blocked_open--;
86172
86173 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
86174- __FILE__, __LINE__, tty->driver->name, port->count);
86175+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
86176
86177 if (!retval)
86178 port->flags |= ASYNC_NORMAL_ACTIVE;
86179@@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
86180
86181 /* ++ is not atomic, so this should be protected - Jean II */
86182 spin_lock_irqsave(&self->port.lock, flags);
86183- self->port.count++;
86184+ atomic_inc(&self->port.count);
86185 spin_unlock_irqrestore(&self->port.lock, flags);
86186 tty_port_tty_set(&self->port, tty);
86187
86188 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
86189- self->line, self->port.count);
86190+ self->line, atomic_read(&self->port.count));
86191
86192 /* Not really used by us, but lets do it anyway */
86193 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
86194@@ -986,7 +986,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
86195 tty_kref_put(port->tty);
86196 }
86197 port->tty = NULL;
86198- port->count = 0;
86199+ atomic_set(&port->count, 0);
86200 spin_unlock_irqrestore(&port->lock, flags);
86201
86202 wake_up_interruptible(&port->open_wait);
86203@@ -1343,7 +1343,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
86204 seq_putc(m, '\n');
86205
86206 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
86207- seq_printf(m, "Open count: %d\n", self->port.count);
86208+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
86209 seq_printf(m, "Max data size: %d\n", self->max_data_size);
86210 seq_printf(m, "Max header size: %d\n", self->max_header_size);
86211
86212diff --git a/net/irda/iriap.c b/net/irda/iriap.c
86213index e71e85b..29340a9 100644
86214--- a/net/irda/iriap.c
86215+++ b/net/irda/iriap.c
86216@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
86217 /* case CS_ISO_8859_9: */
86218 /* case CS_UNICODE: */
86219 default:
86220- IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
86221- __func__, ias_charset_types[charset]);
86222+ IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
86223+ __func__, charset,
86224+ charset < ARRAY_SIZE(ias_charset_types) ?
86225+ ias_charset_types[charset] :
86226+ "(unknown)");
86227
86228 /* Aborting, close connection! */
86229 iriap_disconnect_request(self);
86230diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
86231index cd6f7a9..e63fe89 100644
86232--- a/net/iucv/af_iucv.c
86233+++ b/net/iucv/af_iucv.c
86234@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
86235
86236 write_lock_bh(&iucv_sk_list.lock);
86237
86238- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
86239+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
86240 while (__iucv_get_sock_by_name(name)) {
86241 sprintf(name, "%08x",
86242- atomic_inc_return(&iucv_sk_list.autobind_name));
86243+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
86244 }
86245
86246 write_unlock_bh(&iucv_sk_list.lock);
86247diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
86248index df08250..02021fe 100644
86249--- a/net/iucv/iucv.c
86250+++ b/net/iucv/iucv.c
86251@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
86252 return NOTIFY_OK;
86253 }
86254
86255-static struct notifier_block __refdata iucv_cpu_notifier = {
86256+static struct notifier_block iucv_cpu_notifier = {
86257 .notifier_call = iucv_cpu_notify,
86258 };
86259
86260diff --git a/net/key/af_key.c b/net/key/af_key.c
86261index 5b426a6..970032b 100644
86262--- a/net/key/af_key.c
86263+++ b/net/key/af_key.c
86264@@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
86265 static u32 get_acqseq(void)
86266 {
86267 u32 res;
86268- static atomic_t acqseq;
86269+ static atomic_unchecked_t acqseq;
86270
86271 do {
86272- res = atomic_inc_return(&acqseq);
86273+ res = atomic_inc_return_unchecked(&acqseq);
86274 } while (!res);
86275 return res;
86276 }
86277diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
86278index 716605c..044e9e1 100644
86279--- a/net/l2tp/l2tp_ppp.c
86280+++ b/net/l2tp/l2tp_ppp.c
86281@@ -355,6 +355,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
86282 l2tp_xmit_skb(session, skb, session->hdr_len);
86283
86284 sock_put(ps->tunnel_sock);
86285+ sock_put(sk);
86286
86287 return error;
86288
86289diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
86290index 0479c64..d031db6 100644
86291--- a/net/mac80211/cfg.c
86292+++ b/net/mac80211/cfg.c
86293@@ -790,7 +790,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
86294 ret = ieee80211_vif_use_channel(sdata, chandef,
86295 IEEE80211_CHANCTX_EXCLUSIVE);
86296 }
86297- } else if (local->open_count == local->monitors) {
86298+ } else if (local_read(&local->open_count) == local->monitors) {
86299 local->_oper_channel = chandef->chan;
86300 local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
86301 ieee80211_hw_config(local, 0);
86302@@ -2716,7 +2716,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
86303 else
86304 local->probe_req_reg--;
86305
86306- if (!local->open_count)
86307+ if (!local_read(&local->open_count))
86308 break;
86309
86310 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
86311diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
86312index 2ed065c..948177f 100644
86313--- a/net/mac80211/ieee80211_i.h
86314+++ b/net/mac80211/ieee80211_i.h
86315@@ -28,6 +28,7 @@
86316 #include <net/ieee80211_radiotap.h>
86317 #include <net/cfg80211.h>
86318 #include <net/mac80211.h>
86319+#include <asm/local.h>
86320 #include "key.h"
86321 #include "sta_info.h"
86322 #include "debug.h"
86323@@ -909,7 +910,7 @@ struct ieee80211_local {
86324 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
86325 spinlock_t queue_stop_reason_lock;
86326
86327- int open_count;
86328+ local_t open_count;
86329 int monitors, cooked_mntrs;
86330 /* number of interfaces with corresponding FIF_ flags */
86331 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
86332diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
86333index 8be854e..ad72a69 100644
86334--- a/net/mac80211/iface.c
86335+++ b/net/mac80211/iface.c
86336@@ -546,7 +546,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
86337 break;
86338 }
86339
86340- if (local->open_count == 0) {
86341+ if (local_read(&local->open_count) == 0) {
86342 res = drv_start(local);
86343 if (res)
86344 goto err_del_bss;
86345@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
86346 break;
86347 }
86348
86349- if (local->monitors == 0 && local->open_count == 0) {
86350+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
86351 res = ieee80211_add_virtual_monitor(local);
86352 if (res)
86353 goto err_stop;
86354@@ -699,7 +699,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
86355 mutex_unlock(&local->mtx);
86356
86357 if (coming_up)
86358- local->open_count++;
86359+ local_inc(&local->open_count);
86360
86361 if (hw_reconf_flags)
86362 ieee80211_hw_config(local, hw_reconf_flags);
86363@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
86364 err_del_interface:
86365 drv_remove_interface(local, sdata);
86366 err_stop:
86367- if (!local->open_count)
86368+ if (!local_read(&local->open_count))
86369 drv_stop(local);
86370 err_del_bss:
86371 sdata->bss = NULL;
86372@@ -827,7 +827,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
86373 }
86374
86375 if (going_down)
86376- local->open_count--;
86377+ local_dec(&local->open_count);
86378
86379 switch (sdata->vif.type) {
86380 case NL80211_IFTYPE_AP_VLAN:
86381@@ -884,7 +884,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
86382
86383 ieee80211_recalc_ps(local, -1);
86384
86385- if (local->open_count == 0) {
86386+ if (local_read(&local->open_count) == 0) {
86387 if (local->ops->napi_poll)
86388 napi_disable(&local->napi);
86389 ieee80211_clear_tx_pending(local);
86390@@ -910,7 +910,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
86391 }
86392 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
86393
86394- if (local->monitors == local->open_count && local->monitors > 0)
86395+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
86396 ieee80211_add_virtual_monitor(local);
86397 }
86398
86399diff --git a/net/mac80211/main.c b/net/mac80211/main.c
86400index 1b087ff..bf600e9 100644
86401--- a/net/mac80211/main.c
86402+++ b/net/mac80211/main.c
86403@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
86404 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
86405 IEEE80211_CONF_CHANGE_POWER);
86406
86407- if (changed && local->open_count) {
86408+ if (changed && local_read(&local->open_count)) {
86409 ret = drv_config(local, changed);
86410 /*
86411 * Goal:
86412diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
86413index 79a48f3..5e185c9 100644
86414--- a/net/mac80211/pm.c
86415+++ b/net/mac80211/pm.c
86416@@ -35,7 +35,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
86417 struct sta_info *sta;
86418 struct ieee80211_chanctx *ctx;
86419
86420- if (!local->open_count)
86421+ if (!local_read(&local->open_count))
86422 goto suspend;
86423
86424 ieee80211_scan_cancel(local);
86425@@ -73,7 +73,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
86426 cancel_work_sync(&local->dynamic_ps_enable_work);
86427 del_timer_sync(&local->dynamic_ps_timer);
86428
86429- local->wowlan = wowlan && local->open_count;
86430+ local->wowlan = wowlan && local_read(&local->open_count);
86431 if (local->wowlan) {
86432 int err = drv_suspend(local, wowlan);
86433 if (err < 0) {
86434@@ -187,7 +187,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
86435 mutex_unlock(&local->chanctx_mtx);
86436
86437 /* stop hardware - this must stop RX */
86438- if (local->open_count)
86439+ if (local_read(&local->open_count))
86440 ieee80211_stop_device(local);
86441
86442 suspend:
86443diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
86444index dd88381..eef4dd6 100644
86445--- a/net/mac80211/rate.c
86446+++ b/net/mac80211/rate.c
86447@@ -493,7 +493,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
86448
86449 ASSERT_RTNL();
86450
86451- if (local->open_count)
86452+ if (local_read(&local->open_count))
86453 return -EBUSY;
86454
86455 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
86456diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
86457index c97a065..ff61928 100644
86458--- a/net/mac80211/rc80211_pid_debugfs.c
86459+++ b/net/mac80211/rc80211_pid_debugfs.c
86460@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
86461
86462 spin_unlock_irqrestore(&events->lock, status);
86463
86464- if (copy_to_user(buf, pb, p))
86465+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
86466 return -EFAULT;
86467
86468 return p;
86469diff --git a/net/mac80211/util.c b/net/mac80211/util.c
86470index f11e8c5..08d0013 100644
86471--- a/net/mac80211/util.c
86472+++ b/net/mac80211/util.c
86473@@ -1380,7 +1380,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
86474 }
86475 #endif
86476 /* everything else happens only if HW was up & running */
86477- if (!local->open_count)
86478+ if (!local_read(&local->open_count))
86479 goto wake_up;
86480
86481 /*
86482diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
86483index 49e96df..63a51c3 100644
86484--- a/net/netfilter/Kconfig
86485+++ b/net/netfilter/Kconfig
86486@@ -936,6 +936,16 @@ config NETFILTER_XT_MATCH_ESP
86487
86488 To compile it as a module, choose M here. If unsure, say N.
86489
86490+config NETFILTER_XT_MATCH_GRADM
86491+ tristate '"gradm" match support'
86492+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
86493+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
86494+ ---help---
86495+ The gradm match allows to match on grsecurity RBAC being enabled.
86496+ It is useful when iptables rules are applied early on bootup to
86497+ prevent connections to the machine (except from a trusted host)
86498+ while the RBAC system is disabled.
86499+
86500 config NETFILTER_XT_MATCH_HASHLIMIT
86501 tristate '"hashlimit" match support'
86502 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
86503diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
86504index 3259697..54d5393 100644
86505--- a/net/netfilter/Makefile
86506+++ b/net/netfilter/Makefile
86507@@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
86508 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
86509 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
86510 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
86511+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
86512 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
86513 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
86514 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
86515diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
86516index 6d6d8f2..a676749 100644
86517--- a/net/netfilter/ipset/ip_set_core.c
86518+++ b/net/netfilter/ipset/ip_set_core.c
86519@@ -1800,7 +1800,7 @@ done:
86520 return ret;
86521 }
86522
86523-static struct nf_sockopt_ops so_set __read_mostly = {
86524+static struct nf_sockopt_ops so_set = {
86525 .pf = PF_INET,
86526 .get_optmin = SO_IP_SET,
86527 .get_optmax = SO_IP_SET + 1,
86528diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
86529index 30e764a..c3b6a9d 100644
86530--- a/net/netfilter/ipvs/ip_vs_conn.c
86531+++ b/net/netfilter/ipvs/ip_vs_conn.c
86532@@ -554,7 +554,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
86533 /* Increase the refcnt counter of the dest */
86534 atomic_inc(&dest->refcnt);
86535
86536- conn_flags = atomic_read(&dest->conn_flags);
86537+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
86538 if (cp->protocol != IPPROTO_UDP)
86539 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
86540 flags = cp->flags;
86541@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
86542 atomic_set(&cp->refcnt, 1);
86543
86544 atomic_set(&cp->n_control, 0);
86545- atomic_set(&cp->in_pkts, 0);
86546+ atomic_set_unchecked(&cp->in_pkts, 0);
86547
86548 atomic_inc(&ipvs->conn_count);
86549 if (flags & IP_VS_CONN_F_NO_CPORT)
86550@@ -1180,7 +1180,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
86551
86552 /* Don't drop the entry if its number of incoming packets is not
86553 located in [0, 8] */
86554- i = atomic_read(&cp->in_pkts);
86555+ i = atomic_read_unchecked(&cp->in_pkts);
86556 if (i > 8 || i < 0) return 0;
86557
86558 if (!todrop_rate[i]) return 0;
86559diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
86560index 47edf5a..235b07d 100644
86561--- a/net/netfilter/ipvs/ip_vs_core.c
86562+++ b/net/netfilter/ipvs/ip_vs_core.c
86563@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
86564 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
86565 /* do not touch skb anymore */
86566
86567- atomic_inc(&cp->in_pkts);
86568+ atomic_inc_unchecked(&cp->in_pkts);
86569 ip_vs_conn_put(cp);
86570 return ret;
86571 }
86572@@ -1691,7 +1691,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
86573 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
86574 pkts = sysctl_sync_threshold(ipvs);
86575 else
86576- pkts = atomic_add_return(1, &cp->in_pkts);
86577+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
86578
86579 if (ipvs->sync_state & IP_VS_STATE_MASTER)
86580 ip_vs_sync_conn(net, cp, pkts);
86581diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
86582index ec664cb..7f34a77 100644
86583--- a/net/netfilter/ipvs/ip_vs_ctl.c
86584+++ b/net/netfilter/ipvs/ip_vs_ctl.c
86585@@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
86586 ip_vs_rs_hash(ipvs, dest);
86587 write_unlock_bh(&ipvs->rs_lock);
86588 }
86589- atomic_set(&dest->conn_flags, conn_flags);
86590+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
86591
86592 /* bind the service */
86593 if (!dest->svc) {
86594@@ -1688,7 +1688,7 @@ proc_do_sync_ports(ctl_table *table, int write,
86595 * align with netns init in ip_vs_control_net_init()
86596 */
86597
86598-static struct ctl_table vs_vars[] = {
86599+static ctl_table_no_const vs_vars[] __read_only = {
86600 {
86601 .procname = "amemthresh",
86602 .maxlen = sizeof(int),
86603@@ -2081,7 +2081,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
86604 " %-7s %-6d %-10d %-10d\n",
86605 &dest->addr.in6,
86606 ntohs(dest->port),
86607- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
86608+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
86609 atomic_read(&dest->weight),
86610 atomic_read(&dest->activeconns),
86611 atomic_read(&dest->inactconns));
86612@@ -2092,7 +2092,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
86613 "%-7s %-6d %-10d %-10d\n",
86614 ntohl(dest->addr.ip),
86615 ntohs(dest->port),
86616- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
86617+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
86618 atomic_read(&dest->weight),
86619 atomic_read(&dest->activeconns),
86620 atomic_read(&dest->inactconns));
86621@@ -2562,7 +2562,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
86622
86623 entry.addr = dest->addr.ip;
86624 entry.port = dest->port;
86625- entry.conn_flags = atomic_read(&dest->conn_flags);
86626+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
86627 entry.weight = atomic_read(&dest->weight);
86628 entry.u_threshold = dest->u_threshold;
86629 entry.l_threshold = dest->l_threshold;
86630@@ -3098,7 +3098,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
86631 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
86632 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
86633 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
86634- (atomic_read(&dest->conn_flags) &
86635+ (atomic_read_unchecked(&dest->conn_flags) &
86636 IP_VS_CONN_F_FWD_MASK)) ||
86637 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
86638 atomic_read(&dest->weight)) ||
86639@@ -3688,7 +3688,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
86640 {
86641 int idx;
86642 struct netns_ipvs *ipvs = net_ipvs(net);
86643- struct ctl_table *tbl;
86644+ ctl_table_no_const *tbl;
86645
86646 atomic_set(&ipvs->dropentry, 0);
86647 spin_lock_init(&ipvs->dropentry_lock);
86648diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
86649index fdd89b9..bd96aa9 100644
86650--- a/net/netfilter/ipvs/ip_vs_lblc.c
86651+++ b/net/netfilter/ipvs/ip_vs_lblc.c
86652@@ -115,7 +115,7 @@ struct ip_vs_lblc_table {
86653 * IPVS LBLC sysctl table
86654 */
86655 #ifdef CONFIG_SYSCTL
86656-static ctl_table vs_vars_table[] = {
86657+static ctl_table_no_const vs_vars_table[] __read_only = {
86658 {
86659 .procname = "lblc_expiration",
86660 .data = NULL,
86661diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
86662index c03b6a3..8ce3681 100644
86663--- a/net/netfilter/ipvs/ip_vs_lblcr.c
86664+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
86665@@ -288,7 +288,7 @@ struct ip_vs_lblcr_table {
86666 * IPVS LBLCR sysctl table
86667 */
86668
86669-static ctl_table vs_vars_table[] = {
86670+static ctl_table_no_const vs_vars_table[] __read_only = {
86671 {
86672 .procname = "lblcr_expiration",
86673 .data = NULL,
86674diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
86675index 44fd10c..2a163b3 100644
86676--- a/net/netfilter/ipvs/ip_vs_sync.c
86677+++ b/net/netfilter/ipvs/ip_vs_sync.c
86678@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
86679 cp = cp->control;
86680 if (cp) {
86681 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
86682- pkts = atomic_add_return(1, &cp->in_pkts);
86683+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
86684 else
86685 pkts = sysctl_sync_threshold(ipvs);
86686 ip_vs_sync_conn(net, cp->control, pkts);
86687@@ -758,7 +758,7 @@ control:
86688 if (!cp)
86689 return;
86690 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
86691- pkts = atomic_add_return(1, &cp->in_pkts);
86692+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
86693 else
86694 pkts = sysctl_sync_threshold(ipvs);
86695 goto sloop;
86696@@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
86697
86698 if (opt)
86699 memcpy(&cp->in_seq, opt, sizeof(*opt));
86700- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
86701+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
86702 cp->state = state;
86703 cp->old_state = cp->state;
86704 /*
86705diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
86706index ee6b7a9..f9a89f6 100644
86707--- a/net/netfilter/ipvs/ip_vs_xmit.c
86708+++ b/net/netfilter/ipvs/ip_vs_xmit.c
86709@@ -1210,7 +1210,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
86710 else
86711 rc = NF_ACCEPT;
86712 /* do not touch skb anymore */
86713- atomic_inc(&cp->in_pkts);
86714+ atomic_inc_unchecked(&cp->in_pkts);
86715 goto out;
86716 }
86717
86718@@ -1332,7 +1332,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
86719 else
86720 rc = NF_ACCEPT;
86721 /* do not touch skb anymore */
86722- atomic_inc(&cp->in_pkts);
86723+ atomic_inc_unchecked(&cp->in_pkts);
86724 goto out;
86725 }
86726
86727diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
86728index 7df424e..a527b02 100644
86729--- a/net/netfilter/nf_conntrack_acct.c
86730+++ b/net/netfilter/nf_conntrack_acct.c
86731@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
86732 #ifdef CONFIG_SYSCTL
86733 static int nf_conntrack_acct_init_sysctl(struct net *net)
86734 {
86735- struct ctl_table *table;
86736+ ctl_table_no_const *table;
86737
86738 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
86739 GFP_KERNEL);
86740diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
86741index e4a0c4f..c263f28 100644
86742--- a/net/netfilter/nf_conntrack_core.c
86743+++ b/net/netfilter/nf_conntrack_core.c
86744@@ -1529,6 +1529,10 @@ err_extend:
86745 #define DYING_NULLS_VAL ((1<<30)+1)
86746 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
86747
86748+#ifdef CONFIG_GRKERNSEC_HIDESYM
86749+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
86750+#endif
86751+
86752 static int nf_conntrack_init_net(struct net *net)
86753 {
86754 int ret;
86755@@ -1543,7 +1547,11 @@ static int nf_conntrack_init_net(struct net *net)
86756 goto err_stat;
86757 }
86758
86759+#ifdef CONFIG_GRKERNSEC_HIDESYM
86760+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
86761+#else
86762 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
86763+#endif
86764 if (!net->ct.slabname) {
86765 ret = -ENOMEM;
86766 goto err_slabname;
86767diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
86768index faa978f..1afb18f 100644
86769--- a/net/netfilter/nf_conntrack_ecache.c
86770+++ b/net/netfilter/nf_conntrack_ecache.c
86771@@ -186,7 +186,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
86772 #ifdef CONFIG_SYSCTL
86773 static int nf_conntrack_event_init_sysctl(struct net *net)
86774 {
86775- struct ctl_table *table;
86776+ ctl_table_no_const *table;
86777
86778 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
86779 GFP_KERNEL);
86780diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
86781index 884f2b3..d53b33a 100644
86782--- a/net/netfilter/nf_conntrack_helper.c
86783+++ b/net/netfilter/nf_conntrack_helper.c
86784@@ -55,7 +55,7 @@ static struct ctl_table helper_sysctl_table[] = {
86785
86786 static int nf_conntrack_helper_init_sysctl(struct net *net)
86787 {
86788- struct ctl_table *table;
86789+ ctl_table_no_const *table;
86790
86791 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
86792 GFP_KERNEL);
86793diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
86794index 51e928d..72a413a 100644
86795--- a/net/netfilter/nf_conntrack_proto.c
86796+++ b/net/netfilter/nf_conntrack_proto.c
86797@@ -51,7 +51,7 @@ nf_ct_register_sysctl(struct net *net,
86798
86799 static void
86800 nf_ct_unregister_sysctl(struct ctl_table_header **header,
86801- struct ctl_table **table,
86802+ ctl_table_no_const **table,
86803 unsigned int users)
86804 {
86805 if (users > 0)
86806diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
86807index e7185c6..4ad6c9c 100644
86808--- a/net/netfilter/nf_conntrack_standalone.c
86809+++ b/net/netfilter/nf_conntrack_standalone.c
86810@@ -470,7 +470,7 @@ static ctl_table nf_ct_netfilter_table[] = {
86811
86812 static int nf_conntrack_standalone_init_sysctl(struct net *net)
86813 {
86814- struct ctl_table *table;
86815+ ctl_table_no_const *table;
86816
86817 if (net_eq(net, &init_net)) {
86818 nf_ct_netfilter_header =
86819diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
86820index 7ea8026..bc9512d 100644
86821--- a/net/netfilter/nf_conntrack_timestamp.c
86822+++ b/net/netfilter/nf_conntrack_timestamp.c
86823@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
86824 #ifdef CONFIG_SYSCTL
86825 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
86826 {
86827- struct ctl_table *table;
86828+ ctl_table_no_const *table;
86829
86830 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
86831 GFP_KERNEL);
86832diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
86833index 9e31269..bc4c1b7 100644
86834--- a/net/netfilter/nf_log.c
86835+++ b/net/netfilter/nf_log.c
86836@@ -215,7 +215,7 @@ static const struct file_operations nflog_file_ops = {
86837
86838 #ifdef CONFIG_SYSCTL
86839 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
86840-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
86841+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
86842 static struct ctl_table_header *nf_log_dir_header;
86843
86844 static int nf_log_proc_dostring(ctl_table *table, int write,
86845@@ -246,14 +246,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
86846 rcu_assign_pointer(nf_loggers[tindex], logger);
86847 mutex_unlock(&nf_log_mutex);
86848 } else {
86849+ ctl_table_no_const nf_log_table = *table;
86850+
86851 mutex_lock(&nf_log_mutex);
86852 logger = rcu_dereference_protected(nf_loggers[tindex],
86853 lockdep_is_held(&nf_log_mutex));
86854 if (!logger)
86855- table->data = "NONE";
86856+ nf_log_table.data = "NONE";
86857 else
86858- table->data = logger->name;
86859- r = proc_dostring(table, write, buffer, lenp, ppos);
86860+ nf_log_table.data = logger->name;
86861+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
86862 mutex_unlock(&nf_log_mutex);
86863 }
86864
86865diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
86866index f042ae5..30ea486 100644
86867--- a/net/netfilter/nf_sockopt.c
86868+++ b/net/netfilter/nf_sockopt.c
86869@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
86870 }
86871 }
86872
86873- list_add(&reg->list, &nf_sockopts);
86874+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
86875 out:
86876 mutex_unlock(&nf_sockopt_mutex);
86877 return ret;
86878@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
86879 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
86880 {
86881 mutex_lock(&nf_sockopt_mutex);
86882- list_del(&reg->list);
86883+ pax_list_del((struct list_head *)&reg->list);
86884 mutex_unlock(&nf_sockopt_mutex);
86885 }
86886 EXPORT_SYMBOL(nf_unregister_sockopt);
86887diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
86888index 92fd8ec..3f6ea4b 100644
86889--- a/net/netfilter/nfnetlink_log.c
86890+++ b/net/netfilter/nfnetlink_log.c
86891@@ -72,7 +72,7 @@ struct nfulnl_instance {
86892 };
86893
86894 static DEFINE_SPINLOCK(instances_lock);
86895-static atomic_t global_seq;
86896+static atomic_unchecked_t global_seq;
86897
86898 #define INSTANCE_BUCKETS 16
86899 static struct hlist_head instance_table[INSTANCE_BUCKETS];
86900@@ -537,7 +537,7 @@ __build_packet_message(struct nfulnl_instance *inst,
86901 /* global sequence number */
86902 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
86903 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
86904- htonl(atomic_inc_return(&global_seq))))
86905+ htonl(atomic_inc_return_unchecked(&global_seq))))
86906 goto nla_put_failure;
86907
86908 if (data_len) {
86909diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
86910new file mode 100644
86911index 0000000..c566332
86912--- /dev/null
86913+++ b/net/netfilter/xt_gradm.c
86914@@ -0,0 +1,51 @@
86915+/*
86916+ * gradm match for netfilter
86917