]> git.ipfire.org Git - ipfire-3.x.git/blame - kernel/patches/grsecurity-2.9.1-3.4.6-201207281946.patch
kernel: Update to 3.4.6.
[ipfire-3.x.git] / kernel / patches / grsecurity-2.9.1-3.4.6-201207281946.patch
CommitLineData
fe2de317 1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
572b4308 2index b4a898f..781c7ad 100644
fe2de317
MT
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
4c928ab7
MT
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
fe2de317
MT
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
4c928ab7 17@@ -14,6 +16,7 @@
fe2de317
MT
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
572b4308 25@@ -48,14 +51,17 @@
fe2de317
MT
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
572b4308
MT
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
fe2de317
MT
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
572b4308 52@@ -80,6 +87,7 @@ aic7*seq.h*
c6e2a6c8
MT
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
572b4308 60@@ -92,19 +100,24 @@ bounds.h
fe2de317
MT
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
572b4308 85@@ -115,9 +128,11 @@ devlist.h*
4c928ab7
MT
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
fe2de317
MT
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
572b4308 97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
fe2de317
MT
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
572b4308 113@@ -145,7 +163,7 @@ int32.c
fe2de317
MT
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
572b4308 122@@ -153,7 +171,7 @@ kxgettext
fe2de317
MT
123 lkc_defs.h
124 lex.c
125 lex.*.c
126-linux
4c928ab7 127+lib1funcs.S
fe2de317
MT
128 logo_*.c
129 logo_*_clut224.c
130 logo_*_mono.c
572b4308 131@@ -164,14 +182,15 @@ machtypes.h
fe2de317
MT
132 map
133 map_hugetlb
134 maui_boot.h
135-media
136 mconf
137+mdp
138 miboot*
139 mk_elfconfig
140 mkboot
141 mkbugboot
142 mkcpustr
143 mkdep
144+mkpiggy
145 mkprep
146 mkregtable
147 mktables
572b4308 148@@ -188,6 +207,7 @@ oui.c*
c6e2a6c8
MT
149 page-types
150 parse.c
151 parse.h
152+parse-events*
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
572b4308 156@@ -197,6 +217,7 @@ perf-archive
c6e2a6c8
MT
157 piggyback
158 piggy.gzip
159 piggy.S
160+pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
572b4308 164@@ -207,6 +228,7 @@ r300_reg_safe.h
fe2de317
MT
165 r420_reg_safe.h
166 r600_reg_safe.h
167 recordmcount
168+regdb.c
169 relocs
170 rlim_names.h
171 rn50_reg_safe.h
572b4308 172@@ -216,7 +238,9 @@ series
c1e3898a 173 setup
fe2de317
MT
174 setup.bin
175 setup.elf
c1e3898a 176+size_overflow_hash.h
fe2de317
MT
177 sImage
178+slabinfo
179 sm_tbl*
180 split-include
181 syscalltab.h
572b4308 182@@ -227,6 +251,7 @@ tftpboot.img
fe2de317
MT
183 timeconst.h
184 times.h*
185 trix_boot.h
186+user_constants.h
187 utsrelease.h*
188 vdso-syms.lds
189 vdso.lds
572b4308 190@@ -238,13 +263,17 @@ vdso32.lds
c6e2a6c8
MT
191 vdso32.so.dbg
192 vdso64.lds
193 vdso64.so.dbg
194+vdsox32.lds
195+vdsox32-syms.lds
196 version.h*
197 vmImage
198 vmlinux
fe2de317
MT
199 vmlinux-*
200 vmlinux.aout
201 vmlinux.bin.all
202+vmlinux.bin.bz2
203 vmlinux.lds
204+vmlinux.relocs
205 vmlinuz
206 voffset.h
207 vsyscall.lds
572b4308 208@@ -252,9 +281,11 @@ vsyscall_32.lds
fe2de317
MT
209 wanxlfw.inc
210 uImage
211 unifdef
212+utsrelease.h
213 wakeup.bin
214 wakeup.elf
215 wakeup.lds
216 zImage*
217 zconf.hash.c
218+zconf.lex.c
219 zoffset.h
220diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
c6e2a6c8 221index c1601e5..08557ce 100644
fe2de317
MT
222--- a/Documentation/kernel-parameters.txt
223+++ b/Documentation/kernel-parameters.txt
c6e2a6c8 224@@ -2021,6 +2021,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
fe2de317
MT
225 the specified number of seconds. This is to be used if
226 your oopses keep scrolling off the screen.
227
228+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
229+ virtualization environments that don't cope well with the
230+ expand down segment used by UDEREF on X86-32 or the frequent
231+ page table updates on X86-64.
232+
233+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
234+
235 pcbit= [HW,ISDN]
236
237 pcd. [PARIDE]
238diff --git a/Makefile b/Makefile
572b4308 239index 5d0edcb..f69ee4c 100644
fe2de317
MT
240--- a/Makefile
241+++ b/Makefile
242@@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
243
244 HOSTCC = gcc
245 HOSTCXX = g++
246-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
247-HOSTCXXFLAGS = -O2
248+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
249+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
250+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
251
252 # Decide whether to build built-in, modular, or both.
253 # Normally, just do built-in.
254@@ -407,8 +408,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
255 # Rules shared between *config targets and build targets
256
257 # Basic helpers built in scripts/
258-PHONY += scripts_basic
259-scripts_basic:
260+PHONY += scripts_basic gcc-plugins
261+scripts_basic: gcc-plugins
262 $(Q)$(MAKE) $(build)=scripts/basic
263 $(Q)rm -f .tmp_quiet_recordmcount
264
572b4308 265@@ -564,6 +565,60 @@ else
fe2de317
MT
266 KBUILD_CFLAGS += -O2
267 endif
268
269+ifndef DISABLE_PAX_PLUGINS
c6e2a6c8
MT
270+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
271+ifneq ($(PLUGINCC),)
fe2de317 272+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
5e856224 273+ifndef CONFIG_UML
4c928ab7 274+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
fe2de317 275+endif
5e856224 276+endif
fe2de317 277+ifdef CONFIG_PAX_MEMORY_STACKLEAK
4c928ab7
MT
278+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
279+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
fe2de317
MT
280+endif
281+ifdef CONFIG_KALLOCSTAT_PLUGIN
4c928ab7 282+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
fe2de317
MT
283+endif
284+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
4c928ab7
MT
285+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
286+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
287+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
fe2de317
MT
288+endif
289+ifdef CONFIG_CHECKER_PLUGIN
290+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
4c928ab7
MT
291+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
292+endif
fe2de317 293+endif
4c928ab7
MT
294+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
295+ifdef CONFIG_PAX_SIZE_OVERFLOW
572b4308
MT
296+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
297+endif
298+ifdef CONFIG_PAX_LATENT_ENTROPY
299+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
fe2de317 300+endif
4c928ab7 301+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
572b4308
MT
302+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
303+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
4c928ab7 304+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
572b4308 305+export PLUGINCC CONSTIFY_PLUGIN
4c928ab7 306+ifeq ($(KBUILD_EXTMOD),)
fe2de317
MT
307+gcc-plugins:
308+ $(Q)$(MAKE) $(build)=tools/gcc
309+else
4c928ab7
MT
310+gcc-plugins: ;
311+endif
312+else
fe2de317
MT
313+gcc-plugins:
314+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
315+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
316+else
317+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
318+endif
319+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
320+endif
321+endif
322+
323 include $(srctree)/arch/$(SRCARCH)/Makefile
324
325 ifneq ($(CONFIG_FRAME_WARN),0)
572b4308 326@@ -708,7 +763,7 @@ export mod_strip_cmd
fe2de317
MT
327
328
329 ifeq ($(KBUILD_EXTMOD),)
330-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
331+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
332
333 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
334 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
572b4308 335@@ -932,6 +987,8 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
fe2de317
MT
336
337 # The actual objects are generated when descending,
338 # make sure no implicit rule kicks in
4c928ab7
MT
339+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
340+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
341 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
342
343 # Handle descending into subdirectories listed in $(vmlinux-dirs)
572b4308 344@@ -941,7 +998,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
fe2de317
MT
345 # Error messages still appears in the original language
346
347 PHONY += $(vmlinux-dirs)
348-$(vmlinux-dirs): prepare scripts
349+$(vmlinux-dirs): gcc-plugins prepare scripts
350 $(Q)$(MAKE) $(build)=$@
351
352 # Store (new) KERNELRELASE string in include/config/kernel.release
572b4308 353@@ -985,6 +1042,7 @@ prepare0: archprepare FORCE
4c928ab7 354 $(Q)$(MAKE) $(build)=.
fe2de317
MT
355
356 # All the preparing..
4c928ab7 357+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
fe2de317
MT
358 prepare: prepare0
359
360 # Generate some files
572b4308 361@@ -1092,6 +1150,8 @@ all: modules
fe2de317
MT
362 # using awk while concatenating to the final file.
363
364 PHONY += modules
4c928ab7
MT
365+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
366+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
367 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
368 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
369 @$(kecho) ' Building modules, stage 2.';
572b4308 370@@ -1107,7 +1167,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
fe2de317
MT
371
372 # Target to prepare building external modules
373 PHONY += modules_prepare
374-modules_prepare: prepare scripts
375+modules_prepare: gcc-plugins prepare scripts
376
377 # Target to install modules
378 PHONY += modules_install
572b4308 379@@ -1166,7 +1226,7 @@ CLEAN_FILES += vmlinux System.map \
c1e3898a
MT
380 MRPROPER_DIRS += include/config usr/include include/generated \
381 arch/*/include/generated
382 MRPROPER_FILES += .config .config.old .version .old_version \
383- include/linux/version.h \
384+ include/linux/version.h tools/gcc/size_overflow_hash.h\
385 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
386
387 # clean - Delete most, but leave enough to build external modules
572b4308 388@@ -1204,6 +1264,7 @@ distclean: mrproper
fe2de317
MT
389 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
390 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
4c928ab7
MT
391 -o -name '.*.rej' \
392+ -o -name '.*.rej' -o -name '*.so' \
fe2de317
MT
393 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
394 -type f -print | xargs rm -f
395
572b4308 396@@ -1364,6 +1425,8 @@ PHONY += $(module-dirs) modules
fe2de317
MT
397 $(module-dirs): crmodverdir $(objtree)/Module.symvers
398 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
399
4c928ab7
MT
400+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
401+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
402 modules: $(module-dirs)
403 @$(kecho) ' Building modules, stage 2.';
404 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
572b4308 405@@ -1490,17 +1553,21 @@ else
fe2de317
MT
406 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
407 endif
408
409-%.s: %.c prepare scripts FORCE
4c928ab7
MT
410+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
411+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
412+%.s: %.c gcc-plugins prepare scripts FORCE
413 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
414 %.i: %.c prepare scripts FORCE
415 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
416-%.o: %.c prepare scripts FORCE
4c928ab7
MT
417+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
418+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
419+%.o: %.c gcc-plugins prepare scripts FORCE
420 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
421 %.lst: %.c prepare scripts FORCE
422 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
423-%.s: %.S prepare scripts FORCE
424+%.s: %.S gcc-plugins prepare scripts FORCE
425 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
426-%.o: %.S prepare scripts FORCE
427+%.o: %.S gcc-plugins prepare scripts FORCE
428 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
429 %.symtypes: %.c prepare scripts FORCE
430 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
572b4308 431@@ -1510,11 +1577,15 @@ endif
fe2de317
MT
432 $(cmd_crmodverdir)
433 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
434 $(build)=$(build-dir)
435-%/: prepare scripts FORCE
4c928ab7
MT
436+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
437+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
438+%/: gcc-plugins prepare scripts FORCE
439 $(cmd_crmodverdir)
440 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
441 $(build)=$(build-dir)
442-%.ko: prepare scripts FORCE
4c928ab7
MT
443+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
444+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
fe2de317
MT
445+%.ko: gcc-plugins prepare scripts FORCE
446 $(cmd_crmodverdir)
447 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
448 $(build)=$(build-dir) $(@:.ko=.o)
4c928ab7 449diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
c6e2a6c8 450index 3bb7ffe..347a54c 100644
4c928ab7
MT
451--- a/arch/alpha/include/asm/atomic.h
452+++ b/arch/alpha/include/asm/atomic.h
453@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
454 #define atomic_dec(v) atomic_sub(1,(v))
455 #define atomic64_dec(v) atomic64_sub(1,(v))
456
457+#define atomic64_read_unchecked(v) atomic64_read(v)
458+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
459+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
460+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
461+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
462+#define atomic64_inc_unchecked(v) atomic64_inc(v)
463+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
464+#define atomic64_dec_unchecked(v) atomic64_dec(v)
465+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
466+
467 #define smp_mb__before_atomic_dec() smp_mb()
468 #define smp_mb__after_atomic_dec() smp_mb()
469 #define smp_mb__before_atomic_inc() smp_mb()
470diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
471index ad368a9..fbe0f25 100644
472--- a/arch/alpha/include/asm/cache.h
473+++ b/arch/alpha/include/asm/cache.h
474@@ -4,19 +4,19 @@
475 #ifndef __ARCH_ALPHA_CACHE_H
476 #define __ARCH_ALPHA_CACHE_H
477
478+#include <linux/const.h>
479
480 /* Bytes per L1 (data) cache line. */
481 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
482-# define L1_CACHE_BYTES 64
483 # define L1_CACHE_SHIFT 6
484 #else
485 /* Both EV4 and EV5 are write-through, read-allocate,
486 direct-mapped, physical.
487 */
488-# define L1_CACHE_BYTES 32
489 # define L1_CACHE_SHIFT 5
490 #endif
491
492+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
493 #define SMP_CACHE_BYTES L1_CACHE_BYTES
494
495 #endif
fe2de317 496diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
c6e2a6c8 497index 968d999..d36b2df 100644
fe2de317
MT
498--- a/arch/alpha/include/asm/elf.h
499+++ b/arch/alpha/include/asm/elf.h
c6e2a6c8 500@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
58c5fc13
MT
501
502 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
503
504+#ifdef CONFIG_PAX_ASLR
505+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
506+
507+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
508+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
509+#endif
510+
511 /* $0 is set by ld.so to a pointer to a function which might be
512 registered using atexit. This provides a mean for the dynamic
513 linker to call DT_FINI functions for shared libraries that have
5e856224
MT
514diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
515index bc2a0da..8ad11ee 100644
516--- a/arch/alpha/include/asm/pgalloc.h
517+++ b/arch/alpha/include/asm/pgalloc.h
518@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
519 pgd_set(pgd, pmd);
520 }
521
522+static inline void
523+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
524+{
525+ pgd_populate(mm, pgd, pmd);
526+}
527+
528 extern pgd_t *pgd_alloc(struct mm_struct *mm);
529
530 static inline void
fe2de317 531diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
c6e2a6c8 532index 81a4342..348b927 100644
fe2de317
MT
533--- a/arch/alpha/include/asm/pgtable.h
534+++ b/arch/alpha/include/asm/pgtable.h
c6e2a6c8 535@@ -102,6 +102,17 @@ struct vm_area_struct;
58c5fc13
MT
536 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
537 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
538 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
539+
540+#ifdef CONFIG_PAX_PAGEEXEC
541+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
542+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
543+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
544+#else
545+# define PAGE_SHARED_NOEXEC PAGE_SHARED
546+# define PAGE_COPY_NOEXEC PAGE_COPY
547+# define PAGE_READONLY_NOEXEC PAGE_READONLY
548+#endif
549+
550 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
551
552 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
fe2de317
MT
553diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
554index 2fd00b7..cfd5069 100644
555--- a/arch/alpha/kernel/module.c
556+++ b/arch/alpha/kernel/module.c
557@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
58c5fc13
MT
558
559 /* The small sections were sorted to the end of the segment.
560 The following should definitely cover them. */
561- gp = (u64)me->module_core + me->core_size - 0x8000;
562+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
563 got = sechdrs[me->arch.gotsecindex].sh_addr;
564
565 for (i = 0; i < n; i++) {
fe2de317 566diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
c6e2a6c8 567index 49ee319..9ee7d14 100644
fe2de317
MT
568--- a/arch/alpha/kernel/osf_sys.c
569+++ b/arch/alpha/kernel/osf_sys.c
c6e2a6c8 570@@ -1146,7 +1146,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
57199397
MT
571 /* At this point: (!vma || addr < vma->vm_end). */
572 if (limit - len < addr)
573 return -ENOMEM;
574- if (!vma || addr + len <= vma->vm_start)
575+ if (check_heap_stack_gap(vma, addr, len))
576 return addr;
577 addr = vma->vm_end;
578 vma = vma->vm_next;
c6e2a6c8 579@@ -1182,6 +1182,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
580 merely specific addresses, but regions of memory -- perhaps
581 this feature should be incorporated into all ports? */
582
583+#ifdef CONFIG_PAX_RANDMMAP
584+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
585+#endif
586+
587 if (addr) {
588 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
589 if (addr != (unsigned long) -ENOMEM)
c6e2a6c8 590@@ -1189,8 +1193,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
591 }
592
593 /* Next, try allocating at TASK_UNMAPPED_BASE. */
594- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
595- len, limit);
596+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
597+
598 if (addr != (unsigned long) -ENOMEM)
599 return addr;
600
fe2de317 601diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
c6e2a6c8 602index 5eecab1..609abc0 100644
fe2de317
MT
603--- a/arch/alpha/mm/fault.c
604+++ b/arch/alpha/mm/fault.c
c6e2a6c8 605@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
58c5fc13
MT
606 __reload_thread(pcb);
607 }
608
609+#ifdef CONFIG_PAX_PAGEEXEC
610+/*
611+ * PaX: decide what to do with offenders (regs->pc = fault address)
612+ *
613+ * returns 1 when task should be killed
614+ * 2 when patched PLT trampoline was detected
615+ * 3 when unpatched PLT trampoline was detected
616+ */
617+static int pax_handle_fetch_fault(struct pt_regs *regs)
618+{
619+
620+#ifdef CONFIG_PAX_EMUPLT
621+ int err;
622+
623+ do { /* PaX: patched PLT emulation #1 */
624+ unsigned int ldah, ldq, jmp;
625+
626+ err = get_user(ldah, (unsigned int *)regs->pc);
627+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
628+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
629+
630+ if (err)
631+ break;
632+
633+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
634+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
635+ jmp == 0x6BFB0000U)
636+ {
637+ unsigned long r27, addr;
638+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
639+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
640+
641+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
642+ err = get_user(r27, (unsigned long *)addr);
643+ if (err)
644+ break;
645+
646+ regs->r27 = r27;
647+ regs->pc = r27;
648+ return 2;
649+ }
650+ } while (0);
651+
652+ do { /* PaX: patched PLT emulation #2 */
653+ unsigned int ldah, lda, br;
654+
655+ err = get_user(ldah, (unsigned int *)regs->pc);
656+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
657+ err |= get_user(br, (unsigned int *)(regs->pc+8));
658+
659+ if (err)
660+ break;
661+
662+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
663+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
664+ (br & 0xFFE00000U) == 0xC3E00000U)
665+ {
666+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
667+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
668+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
669+
670+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
671+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
672+ return 2;
673+ }
674+ } while (0);
675+
676+ do { /* PaX: unpatched PLT emulation */
677+ unsigned int br;
678+
679+ err = get_user(br, (unsigned int *)regs->pc);
680+
681+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
682+ unsigned int br2, ldq, nop, jmp;
683+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
684+
685+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
686+ err = get_user(br2, (unsigned int *)addr);
687+ err |= get_user(ldq, (unsigned int *)(addr+4));
688+ err |= get_user(nop, (unsigned int *)(addr+8));
689+ err |= get_user(jmp, (unsigned int *)(addr+12));
690+ err |= get_user(resolver, (unsigned long *)(addr+16));
691+
692+ if (err)
693+ break;
694+
695+ if (br2 == 0xC3600000U &&
696+ ldq == 0xA77B000CU &&
697+ nop == 0x47FF041FU &&
698+ jmp == 0x6B7B0000U)
699+ {
700+ regs->r28 = regs->pc+4;
701+ regs->r27 = addr+16;
702+ regs->pc = resolver;
703+ return 3;
704+ }
705+ }
706+ } while (0);
707+#endif
708+
709+ return 1;
710+}
711+
6e9df6a3 712+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
713+{
714+ unsigned long i;
715+
716+ printk(KERN_ERR "PAX: bytes at PC: ");
717+ for (i = 0; i < 5; i++) {
718+ unsigned int c;
719+ if (get_user(c, (unsigned int *)pc+i))
720+ printk(KERN_CONT "???????? ");
721+ else
722+ printk(KERN_CONT "%08x ", c);
723+ }
724+ printk("\n");
725+}
726+#endif
727
728 /*
729 * This routine handles page faults. It determines the address,
c6e2a6c8 730@@ -130,8 +248,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
58c5fc13
MT
731 good_area:
732 si_code = SEGV_ACCERR;
733 if (cause < 0) {
734- if (!(vma->vm_flags & VM_EXEC))
735+ if (!(vma->vm_flags & VM_EXEC)) {
736+
737+#ifdef CONFIG_PAX_PAGEEXEC
738+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
739+ goto bad_area;
740+
741+ up_read(&mm->mmap_sem);
742+ switch (pax_handle_fetch_fault(regs)) {
743+
744+#ifdef CONFIG_PAX_EMUPLT
745+ case 2:
746+ case 3:
747+ return;
748+#endif
749+
750+ }
751+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
752+ do_group_exit(SIGKILL);
753+#else
754 goto bad_area;
755+#endif
756+
757+ }
758 } else if (!cause) {
759 /* Allow reads even for write-only mappings */
760 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
fe2de317 761diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
c6e2a6c8 762index 68374ba..cff7196 100644
fe2de317
MT
763--- a/arch/arm/include/asm/atomic.h
764+++ b/arch/arm/include/asm/atomic.h
c6e2a6c8
MT
765@@ -17,17 +17,35 @@
766 #include <asm/barrier.h>
767 #include <asm/cmpxchg.h>
4c928ab7
MT
768
769+#ifdef CONFIG_GENERIC_ATOMIC64
770+#include <asm-generic/atomic64.h>
771+#endif
772+
773 #define ATOMIC_INIT(i) { (i) }
774
775 #ifdef __KERNEL__
c6e2a6c8
MT
776
777+#define _ASM_EXTABLE(from, to) \
778+" .pushsection __ex_table,\"a\"\n"\
779+" .align 3\n" \
780+" .long " #from ", " #to"\n" \
781+" .popsection"
782+
783 /*
784 * On ARM, ordinary assignment (str instruction) doesn't clear the local
785 * strex/ldrex monitor on some implementations. The reason we can use it for
4c928ab7
MT
786 * atomic_set() is the clrex or dummy strex done on every exception return.
787 */
788 #define atomic_read(v) (*(volatile int *)&(v)->counter)
789+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
790+{
791+ return v->counter;
792+}
793 #define atomic_set(v,i) (((v)->counter) = (i))
794+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
795+{
796+ v->counter = i;
797+}
798
799 #if __LINUX_ARM_ARCH__ >= 6
800
c6e2a6c8 801@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
4c928ab7
MT
802 int result;
803
804 __asm__ __volatile__("@ atomic_add\n"
805+"1: ldrex %1, [%3]\n"
806+" adds %0, %1, %4\n"
807+
808+#ifdef CONFIG_PAX_REFCOUNT
809+" bvc 3f\n"
810+"2: bkpt 0xf103\n"
811+"3:\n"
812+#endif
813+
814+" strex %1, %0, [%3]\n"
815+" teq %1, #0\n"
816+" bne 1b"
817+
818+#ifdef CONFIG_PAX_REFCOUNT
819+"\n4:\n"
820+ _ASM_EXTABLE(2b, 4b)
821+#endif
822+
823+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
824+ : "r" (&v->counter), "Ir" (i)
825+ : "cc");
826+}
827+
828+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
829+{
830+ unsigned long tmp;
831+ int result;
832+
833+ __asm__ __volatile__("@ atomic_add_unchecked\n"
834 "1: ldrex %0, [%3]\n"
835 " add %0, %0, %4\n"
836 " strex %1, %0, [%3]\n"
c6e2a6c8 837@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
4c928ab7
MT
838 smp_mb();
839
840 __asm__ __volatile__("@ atomic_add_return\n"
841+"1: ldrex %1, [%3]\n"
842+" adds %0, %1, %4\n"
843+
844+#ifdef CONFIG_PAX_REFCOUNT
845+" bvc 3f\n"
846+" mov %0, %1\n"
c6e2a6c8 847+"2: bkpt 0xf103\n"
4c928ab7
MT
848+"3:\n"
849+#endif
850+
851+" strex %1, %0, [%3]\n"
852+" teq %1, #0\n"
853+" bne 1b"
854+
855+#ifdef CONFIG_PAX_REFCOUNT
856+"\n4:\n"
857+ _ASM_EXTABLE(2b, 4b)
858+#endif
859+
860+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
861+ : "r" (&v->counter), "Ir" (i)
862+ : "cc");
863+
864+ smp_mb();
865+
866+ return result;
867+}
868+
869+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
870+{
871+ unsigned long tmp;
872+ int result;
873+
874+ smp_mb();
875+
876+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
877 "1: ldrex %0, [%3]\n"
878 " add %0, %0, %4\n"
879 " strex %1, %0, [%3]\n"
c6e2a6c8 880@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
4c928ab7
MT
881 int result;
882
883 __asm__ __volatile__("@ atomic_sub\n"
884+"1: ldrex %1, [%3]\n"
885+" subs %0, %1, %4\n"
886+
887+#ifdef CONFIG_PAX_REFCOUNT
888+" bvc 3f\n"
889+"2: bkpt 0xf103\n"
890+"3:\n"
891+#endif
892+
893+" strex %1, %0, [%3]\n"
894+" teq %1, #0\n"
895+" bne 1b"
896+
897+#ifdef CONFIG_PAX_REFCOUNT
898+"\n4:\n"
899+ _ASM_EXTABLE(2b, 4b)
900+#endif
901+
902+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
903+ : "r" (&v->counter), "Ir" (i)
904+ : "cc");
905+}
906+
907+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
908+{
909+ unsigned long tmp;
910+ int result;
911+
912+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
913 "1: ldrex %0, [%3]\n"
914 " sub %0, %0, %4\n"
915 " strex %1, %0, [%3]\n"
c6e2a6c8 916@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
4c928ab7
MT
917 smp_mb();
918
919 __asm__ __volatile__("@ atomic_sub_return\n"
920-"1: ldrex %0, [%3]\n"
921-" sub %0, %0, %4\n"
922+"1: ldrex %1, [%3]\n"
923+" sub %0, %1, %4\n"
924+
925+#ifdef CONFIG_PAX_REFCOUNT
926+" bvc 3f\n"
927+" mov %0, %1\n"
928+"2: bkpt 0xf103\n"
929+"3:\n"
930+#endif
931+
932 " strex %1, %0, [%3]\n"
933 " teq %1, #0\n"
934 " bne 1b"
935+
936+#ifdef CONFIG_PAX_REFCOUNT
937+"\n4:\n"
938+ _ASM_EXTABLE(2b, 4b)
939+#endif
940+
941 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
942 : "r" (&v->counter), "Ir" (i)
943 : "cc");
c6e2a6c8 944@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
4c928ab7
MT
945 return oldval;
946 }
947
948+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
949+{
950+ unsigned long oldval, res;
951+
952+ smp_mb();
953+
954+ do {
955+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
956+ "ldrex %1, [%3]\n"
957+ "mov %0, #0\n"
958+ "teq %1, %4\n"
959+ "strexeq %0, %5, [%3]\n"
960+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
961+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
962+ : "cc");
963+ } while (res);
964+
965+ smp_mb();
966+
967+ return oldval;
968+}
969+
970 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
971 {
972 unsigned long tmp, tmp2;
c6e2a6c8 973@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
5e856224
MT
974
975 return val;
976 }
c6e2a6c8
MT
977+
978+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
979+{
980+ return atomic_add_return(i, v);
981+}
982+
5e856224 983 #define atomic_add(i, v) (void) atomic_add_return(i, v)
c6e2a6c8
MT
984+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
985+{
986+ (void) atomic_add_return(i, v);
987+}
5e856224
MT
988
989 static inline int atomic_sub_return(int i, atomic_t *v)
990 {
c6e2a6c8 991@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
5e856224
MT
992 return val;
993 }
5e856224 994 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
c6e2a6c8
MT
995+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
996+{
997+ (void) atomic_sub_return(i, v);
998+}
5e856224
MT
999
1000 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1001 {
c6e2a6c8 1002@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5e856224
MT
1003 return ret;
1004 }
5e856224 1005
c6e2a6c8
MT
1006+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1007+{
1008+ return atomic_cmpxchg(v, old, new);
1009+}
1010+
5e856224
MT
1011 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1012 {
c6e2a6c8
MT
1013 unsigned long flags;
1014@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
4c928ab7
MT
1015 #endif /* __LINUX_ARM_ARCH__ */
1016
1017 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1018+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1019+{
1020+ return xchg(&v->counter, new);
1021+}
1022
1023 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1024 {
c6e2a6c8 1025@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
4c928ab7
MT
1026 }
1027
1028 #define atomic_inc(v) atomic_add(1, v)
1029+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1030+{
1031+ atomic_add_unchecked(1, v);
1032+}
1033 #define atomic_dec(v) atomic_sub(1, v)
1034+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1035+{
1036+ atomic_sub_unchecked(1, v);
1037+}
1038
1039 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1040+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1041+{
1042+ return atomic_add_return_unchecked(1, v) == 0;
1043+}
1044 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1045 #define atomic_inc_return(v) (atomic_add_return(1, v))
1046+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1047+{
1048+ return atomic_add_return_unchecked(1, v);
1049+}
1050 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1051 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1052
c6e2a6c8 1053@@ -241,6 +428,14 @@ typedef struct {
fe2de317
MT
1054 u64 __aligned(8) counter;
1055 } atomic64_t;
1056
1057+#ifdef CONFIG_PAX_REFCOUNT
1058+typedef struct {
1059+ u64 __aligned(8) counter;
1060+} atomic64_unchecked_t;
1061+#else
1062+typedef atomic64_t atomic64_unchecked_t;
1063+#endif
1064+
1065 #define ATOMIC64_INIT(i) { (i) }
1066
1067 static inline u64 atomic64_read(atomic64_t *v)
c6e2a6c8 1068@@ -256,6 +451,19 @@ static inline u64 atomic64_read(atomic64_t *v)
4c928ab7
MT
1069 return result;
1070 }
1071
1072+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1073+{
1074+ u64 result;
1075+
1076+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1077+" ldrexd %0, %H0, [%1]"
1078+ : "=&r" (result)
1079+ : "r" (&v->counter), "Qo" (v->counter)
1080+ );
1081+
1082+ return result;
1083+}
1084+
1085 static inline void atomic64_set(atomic64_t *v, u64 i)
1086 {
1087 u64 tmp;
c6e2a6c8 1088@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
4c928ab7
MT
1089 : "cc");
1090 }
1091
1092+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1093+{
1094+ u64 tmp;
1095+
1096+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1097+"1: ldrexd %0, %H0, [%2]\n"
1098+" strexd %0, %3, %H3, [%2]\n"
1099+" teq %0, #0\n"
1100+" bne 1b"
1101+ : "=&r" (tmp), "=Qo" (v->counter)
1102+ : "r" (&v->counter), "r" (i)
1103+ : "cc");
1104+}
1105+
1106 static inline void atomic64_add(u64 i, atomic64_t *v)
1107 {
1108 u64 result;
c6e2a6c8 1109@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
4c928ab7
MT
1110 __asm__ __volatile__("@ atomic64_add\n"
1111 "1: ldrexd %0, %H0, [%3]\n"
1112 " adds %0, %0, %4\n"
1113+" adcs %H0, %H0, %H4\n"
1114+
1115+#ifdef CONFIG_PAX_REFCOUNT
1116+" bvc 3f\n"
1117+"2: bkpt 0xf103\n"
1118+"3:\n"
1119+#endif
1120+
1121+" strexd %1, %0, %H0, [%3]\n"
1122+" teq %1, #0\n"
1123+" bne 1b"
1124+
1125+#ifdef CONFIG_PAX_REFCOUNT
1126+"\n4:\n"
1127+ _ASM_EXTABLE(2b, 4b)
1128+#endif
1129+
1130+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1131+ : "r" (&v->counter), "r" (i)
1132+ : "cc");
1133+}
1134+
1135+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1136+{
1137+ u64 result;
1138+ unsigned long tmp;
1139+
1140+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1141+"1: ldrexd %0, %H0, [%3]\n"
1142+" adds %0, %0, %4\n"
1143 " adc %H0, %H0, %H4\n"
1144 " strexd %1, %0, %H0, [%3]\n"
1145 " teq %1, #0\n"
c6e2a6c8 1146@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
4c928ab7
MT
1147
1148 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1149 {
1150- u64 result;
1151- unsigned long tmp;
1152+ u64 result, tmp;
1153
1154 smp_mb();
1155
1156 __asm__ __volatile__("@ atomic64_add_return\n"
1157+"1: ldrexd %1, %H1, [%3]\n"
1158+" adds %0, %1, %4\n"
1159+" adcs %H0, %H1, %H4\n"
1160+
1161+#ifdef CONFIG_PAX_REFCOUNT
1162+" bvc 3f\n"
1163+" mov %0, %1\n"
1164+" mov %H0, %H1\n"
1165+"2: bkpt 0xf103\n"
1166+"3:\n"
1167+#endif
1168+
1169+" strexd %1, %0, %H0, [%3]\n"
1170+" teq %1, #0\n"
1171+" bne 1b"
1172+
1173+#ifdef CONFIG_PAX_REFCOUNT
1174+"\n4:\n"
1175+ _ASM_EXTABLE(2b, 4b)
1176+#endif
1177+
1178+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1179+ : "r" (&v->counter), "r" (i)
1180+ : "cc");
1181+
1182+ smp_mb();
1183+
1184+ return result;
1185+}
1186+
1187+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1188+{
1189+ u64 result;
1190+ unsigned long tmp;
1191+
1192+ smp_mb();
1193+
1194+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1195 "1: ldrexd %0, %H0, [%3]\n"
1196 " adds %0, %0, %4\n"
1197 " adc %H0, %H0, %H4\n"
c6e2a6c8 1198@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
4c928ab7
MT
1199 __asm__ __volatile__("@ atomic64_sub\n"
1200 "1: ldrexd %0, %H0, [%3]\n"
1201 " subs %0, %0, %4\n"
1202+" sbcs %H0, %H0, %H4\n"
1203+
1204+#ifdef CONFIG_PAX_REFCOUNT
1205+" bvc 3f\n"
1206+"2: bkpt 0xf103\n"
1207+"3:\n"
1208+#endif
1209+
1210+" strexd %1, %0, %H0, [%3]\n"
1211+" teq %1, #0\n"
1212+" bne 1b"
1213+
1214+#ifdef CONFIG_PAX_REFCOUNT
1215+"\n4:\n"
1216+ _ASM_EXTABLE(2b, 4b)
1217+#endif
1218+
1219+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1220+ : "r" (&v->counter), "r" (i)
1221+ : "cc");
1222+}
1223+
1224+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1225+{
1226+ u64 result;
1227+ unsigned long tmp;
1228+
1229+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1230+"1: ldrexd %0, %H0, [%3]\n"
1231+" subs %0, %0, %4\n"
1232 " sbc %H0, %H0, %H4\n"
1233 " strexd %1, %0, %H0, [%3]\n"
1234 " teq %1, #0\n"
c6e2a6c8 1235@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
4c928ab7
MT
1236
1237 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1238 {
1239- u64 result;
1240- unsigned long tmp;
1241+ u64 result, tmp;
1242
1243 smp_mb();
1244
1245 __asm__ __volatile__("@ atomic64_sub_return\n"
1246-"1: ldrexd %0, %H0, [%3]\n"
1247-" subs %0, %0, %4\n"
1248-" sbc %H0, %H0, %H4\n"
1249+"1: ldrexd %1, %H1, [%3]\n"
1250+" subs %0, %1, %4\n"
1251+" sbc %H0, %H1, %H4\n"
1252+
1253+#ifdef CONFIG_PAX_REFCOUNT
1254+" bvc 3f\n"
1255+" mov %0, %1\n"
1256+" mov %H0, %H1\n"
1257+"2: bkpt 0xf103\n"
1258+"3:\n"
1259+#endif
1260+
1261 " strexd %1, %0, %H0, [%3]\n"
1262 " teq %1, #0\n"
1263 " bne 1b"
1264+
1265+#ifdef CONFIG_PAX_REFCOUNT
1266+"\n4:\n"
1267+ _ASM_EXTABLE(2b, 4b)
1268+#endif
1269+
1270 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1271 : "r" (&v->counter), "r" (i)
1272 : "cc");
c6e2a6c8 1273@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
4c928ab7
MT
1274 return oldval;
1275 }
1276
1277+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1278+{
1279+ u64 oldval;
1280+ unsigned long res;
1281+
1282+ smp_mb();
1283+
1284+ do {
1285+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1286+ "ldrexd %1, %H1, [%3]\n"
1287+ "mov %0, #0\n"
1288+ "teq %1, %4\n"
1289+ "teqeq %H1, %H4\n"
1290+ "strexdeq %0, %5, %H5, [%3]"
1291+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1292+ : "r" (&ptr->counter), "r" (old), "r" (new)
1293+ : "cc");
1294+ } while (res);
1295+
1296+ smp_mb();
1297+
1298+ return oldval;
1299+}
1300+
1301 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1302 {
1303 u64 result;
c6e2a6c8 1304@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
4c928ab7
MT
1305
1306 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1307 {
1308- u64 result;
1309- unsigned long tmp;
1310+ u64 result, tmp;
1311
1312 smp_mb();
1313
1314 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1315-"1: ldrexd %0, %H0, [%3]\n"
1316-" subs %0, %0, #1\n"
1317-" sbc %H0, %H0, #0\n"
1318+"1: ldrexd %1, %H1, [%3]\n"
1319+" subs %0, %1, #1\n"
1320+" sbc %H0, %H1, #0\n"
1321+
1322+#ifdef CONFIG_PAX_REFCOUNT
1323+" bvc 3f\n"
1324+" mov %0, %1\n"
1325+" mov %H0, %H1\n"
1326+"2: bkpt 0xf103\n"
1327+"3:\n"
1328+#endif
1329+
1330 " teq %H0, #0\n"
1331-" bmi 2f\n"
1332+" bmi 4f\n"
1333 " strexd %1, %0, %H0, [%3]\n"
1334 " teq %1, #0\n"
1335 " bne 1b\n"
1336-"2:"
1337+"4:\n"
1338+
1339+#ifdef CONFIG_PAX_REFCOUNT
1340+ _ASM_EXTABLE(2b, 4b)
1341+#endif
1342+
1343 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1344 : "r" (&v->counter)
1345 : "cc");
c6e2a6c8 1346@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
4c928ab7
MT
1347 " teq %0, %5\n"
1348 " teqeq %H0, %H5\n"
1349 " moveq %1, #0\n"
1350-" beq 2f\n"
1351+" beq 4f\n"
1352 " adds %0, %0, %6\n"
1353 " adc %H0, %H0, %H6\n"
1354+
1355+#ifdef CONFIG_PAX_REFCOUNT
1356+" bvc 3f\n"
1357+"2: bkpt 0xf103\n"
1358+"3:\n"
1359+#endif
1360+
1361 " strexd %2, %0, %H0, [%4]\n"
1362 " teq %2, #0\n"
1363 " bne 1b\n"
1364-"2:"
1365+"4:\n"
1366+
1367+#ifdef CONFIG_PAX_REFCOUNT
1368+ _ASM_EXTABLE(2b, 4b)
1369+#endif
1370+
1371 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1372 : "r" (&v->counter), "r" (u), "r" (a)
1373 : "cc");
c6e2a6c8 1374@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
4c928ab7
MT
1375
1376 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1377 #define atomic64_inc(v) atomic64_add(1LL, (v))
1378+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1379 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1380+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1381 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1382 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1383 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1384+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1385 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1386 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1387 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1388diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1389index 75fe66b..2255c86 100644
1390--- a/arch/arm/include/asm/cache.h
1391+++ b/arch/arm/include/asm/cache.h
1392@@ -4,8 +4,10 @@
1393 #ifndef __ASMARM_CACHE_H
1394 #define __ASMARM_CACHE_H
1395
1396+#include <linux/const.h>
1397+
1398 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1399-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1400+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1401
1402 /*
1403 * Memory returned by kmalloc() may be used for DMA, so we must make
1404diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
c6e2a6c8 1405index 1252a26..9dc17b5 100644
4c928ab7
MT
1406--- a/arch/arm/include/asm/cacheflush.h
1407+++ b/arch/arm/include/asm/cacheflush.h
1408@@ -108,7 +108,7 @@ struct cpu_cache_fns {
1409 void (*dma_unmap_area)(const void *, size_t, int);
1410
1411 void (*dma_flush_range)(const void *, const void *);
1412-};
1413+} __no_const;
1414
1415 /*
1416 * Select the calling method
c6e2a6c8
MT
1417diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1418index d41d7cb..9bea5e0 100644
1419--- a/arch/arm/include/asm/cmpxchg.h
1420+++ b/arch/arm/include/asm/cmpxchg.h
1421@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1422
1423 #define xchg(ptr,x) \
1424 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1425+#define xchg_unchecked(ptr,x) \
1426+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1427
1428 #include <asm-generic/cmpxchg-local.h>
1429
fe2de317 1430diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
c6e2a6c8 1431index 38050b1..9d90e8b 100644
fe2de317
MT
1432--- a/arch/arm/include/asm/elf.h
1433+++ b/arch/arm/include/asm/elf.h
1434@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
58c5fc13
MT
1435 the loader. We need to make sure that it is out of the way of the program
1436 that it will "exec", and that there is sufficient room for the brk. */
1437
1438-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1439+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1440+
1441+#ifdef CONFIG_PAX_ASLR
1442+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1443+
1444+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1445+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1446+#endif
1447
1448 /* When the program starts, a1 contains a pointer to a function to be
1449 registered with atexit, as per the SVR4 ABI. A value of 0 means we
c6e2a6c8 1450@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
bc901d79
MT
1451 extern void elf_set_personality(const struct elf32_hdr *);
1452 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1453
1454-struct mm_struct;
1455-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1456-#define arch_randomize_brk arch_randomize_brk
1457-
c6e2a6c8 1458 #endif
fe2de317
MT
1459diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1460index e51b1e8..32a3113 100644
1461--- a/arch/arm/include/asm/kmap_types.h
1462+++ b/arch/arm/include/asm/kmap_types.h
57199397 1463@@ -21,6 +21,7 @@ enum km_type {
df50ba0c 1464 KM_L1_CACHE,
58c5fc13 1465 KM_L2_CACHE,
57199397 1466 KM_KDB,
58c5fc13
MT
1467+ KM_CLEARPAGE,
1468 KM_TYPE_NR
1469 };
1470
4c928ab7
MT
1471diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1472index 53426c6..c7baff3 100644
1473--- a/arch/arm/include/asm/outercache.h
1474+++ b/arch/arm/include/asm/outercache.h
1475@@ -35,7 +35,7 @@ struct outer_cache_fns {
1476 #endif
1477 void (*set_debug)(unsigned long);
1478 void (*resume)(void);
1479-};
1480+} __no_const;
1481
1482 #ifdef CONFIG_OUTER_CACHE
1483
1484diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
c6e2a6c8 1485index 5838361..da6e813 100644
4c928ab7
MT
1486--- a/arch/arm/include/asm/page.h
1487+++ b/arch/arm/include/asm/page.h
1488@@ -123,7 +123,7 @@ struct cpu_user_fns {
1489 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1490 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1491 unsigned long vaddr, struct vm_area_struct *vma);
1492-};
1493+} __no_const;
1494
1495 #ifdef MULTI_USER
1496 extern struct cpu_user_fns cpu_user;
5e856224
MT
1497diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1498index 943504f..bf8d667 100644
1499--- a/arch/arm/include/asm/pgalloc.h
1500+++ b/arch/arm/include/asm/pgalloc.h
1501@@ -43,6 +43,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1502 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1503 }
1504
1505+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1506+{
1507+ pud_populate(mm, pud, pmd);
1508+}
1509+
1510 #else /* !CONFIG_ARM_LPAE */
1511
1512 /*
1513@@ -51,6 +56,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1514 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1515 #define pmd_free(mm, pmd) do { } while (0)
1516 #define pud_populate(mm,pmd,pte) BUG()
1517+#define pud_populate_kernel(mm,pmd,pte) BUG()
1518
1519 #endif /* CONFIG_ARM_LPAE */
1520
5e856224 1521diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
c6e2a6c8 1522index 0f04d84..2be5648 100644
5e856224
MT
1523--- a/arch/arm/include/asm/thread_info.h
1524+++ b/arch/arm/include/asm/thread_info.h
c6e2a6c8 1525@@ -148,6 +148,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
5e856224
MT
1526 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
1527 #define TIF_SYSCALL_TRACE 8
1528 #define TIF_SYSCALL_AUDIT 9
1529+
1530+/* within 8 bits of TIF_SYSCALL_TRACE
1531+ to meet flexible second operand requirements
1532+*/
1533+#define TIF_GRSEC_SETXID 10
1534+
1535 #define TIF_POLLING_NRFLAG 16
1536 #define TIF_USING_IWMMXT 17
1537 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
c6e2a6c8 1538@@ -163,9 +169,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
5e856224
MT
1539 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
1540 #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
1541 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
1542+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
1543
1544 /* Checks for any syscall work in entry-common.S */
1545-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
1546+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
1547+ _TIF_GRSEC_SETXID)
1548
1549 /*
1550 * Change these and you break ASM code in entry-common.S
fe2de317 1551diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
c6e2a6c8 1552index 71f6536..602f279 100644
fe2de317
MT
1553--- a/arch/arm/include/asm/uaccess.h
1554+++ b/arch/arm/include/asm/uaccess.h
15a11c5b
MT
1555@@ -22,6 +22,8 @@
1556 #define VERIFY_READ 0
1557 #define VERIFY_WRITE 1
66a7e928 1558
15a11c5b
MT
1559+extern void check_object_size(const void *ptr, unsigned long n, bool to);
1560+
1561 /*
1562 * The exception table consists of pairs of addresses: the first is the
1563 * address of an instruction that is allowed to fault, and the second is
1564@@ -387,8 +389,23 @@ do { \
66a7e928 1565
66a7e928 1566
15a11c5b
MT
1567 #ifdef CONFIG_MMU
1568-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
1569-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
1570+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
1571+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
1572+
1573+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
1574+{
1575+ if (!__builtin_constant_p(n))
1576+ check_object_size(to, n, false);
1577+ return ___copy_from_user(to, from, n);
1578+}
1579+
1580+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
1581+{
1582+ if (!__builtin_constant_p(n))
1583+ check_object_size(from, n, true);
1584+ return ___copy_to_user(to, from, n);
1585+}
1586+
1587 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
1588 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
1589 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
fe2de317 1590@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
58c5fc13
MT
1591
1592 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1593 {
1594+ if ((long)n < 0)
1595+ return n;
1596+
1597 if (access_ok(VERIFY_READ, from, n))
1598 n = __copy_from_user(to, from, n);
1599 else /* security hole - plug it */
fe2de317 1600@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
58c5fc13
MT
1601
1602 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1603 {
1604+ if ((long)n < 0)
1605+ return n;
1606+
1607 if (access_ok(VERIFY_WRITE, to, n))
1608 n = __copy_to_user(to, from, n);
1609 return n;
fe2de317 1610diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
c6e2a6c8 1611index b57c75e..ed2d6b2 100644
fe2de317
MT
1612--- a/arch/arm/kernel/armksyms.c
1613+++ b/arch/arm/kernel/armksyms.c
c6e2a6c8 1614@@ -94,8 +94,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
15a11c5b
MT
1615 #ifdef CONFIG_MMU
1616 EXPORT_SYMBOL(copy_page);
1617
1618-EXPORT_SYMBOL(__copy_from_user);
1619-EXPORT_SYMBOL(__copy_to_user);
1620+EXPORT_SYMBOL(___copy_from_user);
1621+EXPORT_SYMBOL(___copy_to_user);
1622 EXPORT_SYMBOL(__clear_user);
1623
1624 EXPORT_SYMBOL(__get_user_1);
fe2de317 1625diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
c6e2a6c8 1626index 2b7b017..c380fa2 100644
fe2de317
MT
1627--- a/arch/arm/kernel/process.c
1628+++ b/arch/arm/kernel/process.c
bc901d79
MT
1629@@ -28,7 +28,6 @@
1630 #include <linux/tick.h>
1631 #include <linux/utsname.h>
1632 #include <linux/uaccess.h>
1633-#include <linux/random.h>
1634 #include <linux/hw_breakpoint.h>
6e9df6a3 1635 #include <linux/cpuidle.h>
bc901d79 1636
c6e2a6c8 1637@@ -275,9 +274,10 @@ void machine_power_off(void)
4c928ab7
MT
1638 machine_shutdown();
1639 if (pm_power_off)
1640 pm_power_off();
1641+ BUG();
1642 }
1643
5e856224
MT
1644-void machine_restart(char *cmd)
1645+__noreturn void machine_restart(char *cmd)
1646 {
1647 machine_shutdown();
1648
c6e2a6c8 1649@@ -519,12 +519,6 @@ unsigned long get_wchan(struct task_struct *p)
bc901d79
MT
1650 return 0;
1651 }
1652
1653-unsigned long arch_randomize_brk(struct mm_struct *mm)
1654-{
1655- unsigned long range_end = mm->brk + 0x02000000;
1656- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
1657-}
1658-
16454cff 1659 #ifdef CONFIG_MMU
bc901d79
MT
1660 /*
1661 * The vectors page is always readable from user space for the
5e856224 1662diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
c6e2a6c8 1663index 9650c14..ae30cdd 100644
5e856224
MT
1664--- a/arch/arm/kernel/ptrace.c
1665+++ b/arch/arm/kernel/ptrace.c
c6e2a6c8 1666@@ -906,10 +906,19 @@ long arch_ptrace(struct task_struct *child, long request,
5e856224
MT
1667 return ret;
1668 }
1669
1670+#ifdef CONFIG_GRKERNSEC_SETXID
1671+extern void gr_delayed_cred_worker(void);
1672+#endif
1673+
1674 asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
1675 {
1676 unsigned long ip;
1677
1678+#ifdef CONFIG_GRKERNSEC_SETXID
1679+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
1680+ gr_delayed_cred_worker();
1681+#endif
1682+
1683 if (why)
1684 audit_syscall_exit(regs);
1685 else
4c928ab7 1686diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
c6e2a6c8 1687index ebfac78..cbea9c0 100644
4c928ab7
MT
1688--- a/arch/arm/kernel/setup.c
1689+++ b/arch/arm/kernel/setup.c
c6e2a6c8 1690@@ -111,13 +111,13 @@ struct processor processor __read_mostly;
4c928ab7
MT
1691 struct cpu_tlb_fns cpu_tlb __read_mostly;
1692 #endif
1693 #ifdef MULTI_USER
1694-struct cpu_user_fns cpu_user __read_mostly;
1695+struct cpu_user_fns cpu_user __read_only;
1696 #endif
1697 #ifdef MULTI_CACHE
1698-struct cpu_cache_fns cpu_cache __read_mostly;
1699+struct cpu_cache_fns cpu_cache __read_only;
1700 #endif
1701 #ifdef CONFIG_OUTER_CACHE
1702-struct outer_cache_fns outer_cache __read_mostly;
1703+struct outer_cache_fns outer_cache __read_only;
1704 EXPORT_SYMBOL(outer_cache);
1705 #endif
1706
fe2de317 1707diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
c6e2a6c8 1708index 63d402f..db1d714 100644
fe2de317
MT
1709--- a/arch/arm/kernel/traps.c
1710+++ b/arch/arm/kernel/traps.c
c6e2a6c8 1711@@ -264,6 +264,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
66a7e928 1712
4c928ab7 1713 static DEFINE_RAW_SPINLOCK(die_lock);
66a7e928 1714
15a11c5b
MT
1715+extern void gr_handle_kernel_exploit(void);
1716+
1717 /*
1718 * This function is protected against re-entrancy.
1719 */
c6e2a6c8 1720@@ -296,6 +298,9 @@ void die(const char *str, struct pt_regs *regs, int err)
15a11c5b
MT
1721 panic("Fatal exception in interrupt");
1722 if (panic_on_oops)
1723 panic("Fatal exception");
1724+
1725+ gr_handle_kernel_exploit();
1726+
1727 if (ret != NOTIFY_STOP)
1728 do_exit(SIGSEGV);
66a7e928 1729 }
fe2de317
MT
1730diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
1731index 66a477a..bee61d3 100644
1732--- a/arch/arm/lib/copy_from_user.S
1733+++ b/arch/arm/lib/copy_from_user.S
15a11c5b
MT
1734@@ -16,7 +16,7 @@
1735 /*
1736 * Prototype:
1737 *
1738- * size_t __copy_from_user(void *to, const void *from, size_t n)
1739+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
1740 *
1741 * Purpose:
1742 *
1743@@ -84,11 +84,11 @@
66a7e928 1744
15a11c5b 1745 .text
66a7e928 1746
15a11c5b
MT
1747-ENTRY(__copy_from_user)
1748+ENTRY(___copy_from_user)
66a7e928 1749
15a11c5b 1750 #include "copy_template.S"
66a7e928 1751
15a11c5b
MT
1752-ENDPROC(__copy_from_user)
1753+ENDPROC(___copy_from_user)
66a7e928 1754
15a11c5b
MT
1755 .pushsection .fixup,"ax"
1756 .align 0
4c928ab7
MT
1757diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
1758index 6ee2f67..d1cce76 100644
1759--- a/arch/arm/lib/copy_page.S
1760+++ b/arch/arm/lib/copy_page.S
1761@@ -10,6 +10,7 @@
1762 * ASM optimised string functions
1763 */
1764 #include <linux/linkage.h>
1765+#include <linux/const.h>
1766 #include <asm/assembler.h>
1767 #include <asm/asm-offsets.h>
1768 #include <asm/cache.h>
fe2de317
MT
1769diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
1770index d066df6..df28194 100644
1771--- a/arch/arm/lib/copy_to_user.S
1772+++ b/arch/arm/lib/copy_to_user.S
15a11c5b
MT
1773@@ -16,7 +16,7 @@
1774 /*
1775 * Prototype:
1776 *
1777- * size_t __copy_to_user(void *to, const void *from, size_t n)
1778+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
1779 *
1780 * Purpose:
1781 *
1782@@ -88,11 +88,11 @@
1783 .text
57199397 1784
15a11c5b
MT
1785 ENTRY(__copy_to_user_std)
1786-WEAK(__copy_to_user)
1787+WEAK(___copy_to_user)
66a7e928 1788
15a11c5b 1789 #include "copy_template.S"
66a7e928 1790
15a11c5b
MT
1791-ENDPROC(__copy_to_user)
1792+ENDPROC(___copy_to_user)
1793 ENDPROC(__copy_to_user_std)
66a7e928 1794
15a11c5b 1795 .pushsection .fixup,"ax"
fe2de317 1796diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
5e856224 1797index 5c908b1..e712687 100644
fe2de317
MT
1798--- a/arch/arm/lib/uaccess.S
1799+++ b/arch/arm/lib/uaccess.S
15a11c5b 1800@@ -20,7 +20,7 @@
66a7e928 1801
15a11c5b
MT
1802 #define PAGE_SHIFT 12
1803
1804-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
1805+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
1806 * Purpose : copy a block to user memory from kernel memory
1807 * Params : to - user memory
1808 * : from - kernel memory
5e856224 1809@@ -40,7 +40,7 @@ USER( TUSER( strgtb) r3, [r0], #1) @ May fault
15a11c5b
MT
1810 sub r2, r2, ip
1811 b .Lc2u_dest_aligned
1812
1813-ENTRY(__copy_to_user)
1814+ENTRY(___copy_to_user)
1815 stmfd sp!, {r2, r4 - r7, lr}
1816 cmp r2, #4
1817 blt .Lc2u_not_enough
5e856224 1818@@ -278,14 +278,14 @@ USER( TUSER( strgeb) r3, [r0], #1) @ May fault
15a11c5b 1819 ldrgtb r3, [r1], #0
5e856224 1820 USER( TUSER( strgtb) r3, [r0], #1) @ May fault
15a11c5b
MT
1821 b .Lc2u_finished
1822-ENDPROC(__copy_to_user)
1823+ENDPROC(___copy_to_user)
1824
1825 .pushsection .fixup,"ax"
1826 .align 0
1827 9001: ldmfd sp!, {r0, r4 - r7, pc}
1828 .popsection
1829
1830-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
1831+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
1832 * Purpose : copy a block from user memory to kernel memory
1833 * Params : to - kernel memory
1834 * : from - user memory
5e856224 1835@@ -304,7 +304,7 @@ USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
15a11c5b
MT
1836 sub r2, r2, ip
1837 b .Lcfu_dest_aligned
1838
1839-ENTRY(__copy_from_user)
1840+ENTRY(___copy_from_user)
1841 stmfd sp!, {r0, r2, r4 - r7, lr}
1842 cmp r2, #4
1843 blt .Lcfu_not_enough
5e856224
MT
1844@@ -544,7 +544,7 @@ USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault
1845 USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault
15a11c5b
MT
1846 strgtb r3, [r0], #1
1847 b .Lcfu_finished
1848-ENDPROC(__copy_from_user)
1849+ENDPROC(___copy_from_user)
1850
1851 .pushsection .fixup,"ax"
1852 .align 0
fe2de317 1853diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
4c928ab7 1854index 025f742..8432b08 100644
fe2de317
MT
1855--- a/arch/arm/lib/uaccess_with_memcpy.c
1856+++ b/arch/arm/lib/uaccess_with_memcpy.c
4c928ab7 1857@@ -104,7 +104,7 @@ out:
66a7e928
MT
1858 }
1859
15a11c5b
MT
1860 unsigned long
1861-__copy_to_user(void __user *to, const void *from, unsigned long n)
1862+___copy_to_user(void __user *to, const void *from, unsigned long n)
1863 {
1864 /*
1865 * This test is stubbed out of the main function above to keep
4c928ab7 1866diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
c6e2a6c8 1867index 518091c..eae9a76 100644
4c928ab7
MT
1868--- a/arch/arm/mach-omap2/board-n8x0.c
1869+++ b/arch/arm/mach-omap2/board-n8x0.c
c6e2a6c8 1870@@ -596,7 +596,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
4c928ab7
MT
1871 }
1872 #endif
1873
1874-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
1875+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
1876 .late_init = n8x0_menelaus_late_init,
1877 };
1878
fe2de317 1879diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
c6e2a6c8 1880index 5bb4835..4760f68 100644
fe2de317
MT
1881--- a/arch/arm/mm/fault.c
1882+++ b/arch/arm/mm/fault.c
c6e2a6c8 1883@@ -174,6 +174,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
ae4e228f
MT
1884 }
1885 #endif
1886
1887+#ifdef CONFIG_PAX_PAGEEXEC
1888+ if (fsr & FSR_LNX_PF) {
1889+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1890+ do_group_exit(SIGKILL);
1891+ }
1892+#endif
1893+
1894 tsk->thread.address = addr;
1895 tsk->thread.error_code = fsr;
1896 tsk->thread.trap_no = 14;
c6e2a6c8 1897@@ -397,6 +404,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
ae4e228f
MT
1898 }
1899 #endif /* CONFIG_MMU */
1900
1901+#ifdef CONFIG_PAX_PAGEEXEC
6e9df6a3 1902+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
ae4e228f
MT
1903+{
1904+ long i;
1905+
1906+ printk(KERN_ERR "PAX: bytes at PC: ");
1907+ for (i = 0; i < 20; i++) {
1908+ unsigned char c;
1909+ if (get_user(c, (__force unsigned char __user *)pc+i))
1910+ printk(KERN_CONT "?? ");
1911+ else
1912+ printk(KERN_CONT "%02x ", c);
1913+ }
1914+ printk("\n");
1915+
1916+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1917+ for (i = -1; i < 20; i++) {
1918+ unsigned long c;
1919+ if (get_user(c, (__force unsigned long __user *)sp+i))
1920+ printk(KERN_CONT "???????? ");
1921+ else
1922+ printk(KERN_CONT "%08lx ", c);
1923+ }
1924+ printk("\n");
1925+}
1926+#endif
1927+
1928 /*
1929 * First Level Translation Fault Handler
1930 *
c6e2a6c8 1931@@ -577,6 +611,20 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
4c928ab7
MT
1932 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
1933 struct siginfo info;
1934
1935+#ifdef CONFIG_PAX_REFCOUNT
1936+ if (fsr_fs(ifsr) == 2) {
1937+ unsigned int bkpt;
1938+
1939+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
1940+ current->thread.error_code = ifsr;
1941+ current->thread.trap_no = 0;
1942+ pax_report_refcount_overflow(regs);
1943+ fixup_exception(regs);
1944+ return;
1945+ }
1946+ }
1947+#endif
1948+
1949 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
1950 return;
1951
fe2de317 1952diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
5e856224 1953index ce8cb19..3ec539d 100644
fe2de317
MT
1954--- a/arch/arm/mm/mmap.c
1955+++ b/arch/arm/mm/mmap.c
5e856224 1956@@ -93,6 +93,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
1957 if (len > TASK_SIZE)
1958 return -ENOMEM;
1959
1960+#ifdef CONFIG_PAX_RANDMMAP
1961+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1962+#endif
1963+
1964 if (addr) {
1965 if (do_align)
1966 addr = COLOUR_ALIGN(addr, pgoff);
5e856224 1967@@ -100,15 +104,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
57199397
MT
1968 addr = PAGE_ALIGN(addr);
1969
1970 vma = find_vma(mm, addr);
1971- if (TASK_SIZE - len >= addr &&
1972- (!vma || addr + len <= vma->vm_start))
1973+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
58c5fc13
MT
1974 return addr;
1975 }
1976 if (len > mm->cached_hole_size) {
1977- start_addr = addr = mm->free_area_cache;
1978+ start_addr = addr = mm->free_area_cache;
1979 } else {
5e856224 1980- start_addr = addr = mm->mmap_base;
58c5fc13
MT
1981- mm->cached_hole_size = 0;
1982+ start_addr = addr = mm->mmap_base;
1983+ mm->cached_hole_size = 0;
1984 }
5e856224
MT
1985
1986 full_search:
1987@@ -124,14 +127,14 @@ full_search:
58c5fc13
MT
1988 * Start a new search - just in case we missed
1989 * some holes.
1990 */
1991- if (start_addr != TASK_UNMAPPED_BASE) {
1992- start_addr = addr = TASK_UNMAPPED_BASE;
1993+ if (start_addr != mm->mmap_base) {
1994+ start_addr = addr = mm->mmap_base;
1995 mm->cached_hole_size = 0;
1996 goto full_search;
1997 }
57199397
MT
1998 return -ENOMEM;
1999 }
2000- if (!vma || addr + len <= vma->vm_start) {
2001+ if (check_heap_stack_gap(vma, addr, len)) {
2002 /*
2003 * Remember the place where we stopped the search:
2004 */
5e856224
MT
2005@@ -266,10 +269,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
2006
2007 if (mmap_is_legacy()) {
2008 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
2009+
2010+#ifdef CONFIG_PAX_RANDMMAP
2011+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2012+ mm->mmap_base += mm->delta_mmap;
2013+#endif
2014+
2015 mm->get_unmapped_area = arch_get_unmapped_area;
2016 mm->unmap_area = arch_unmap_area;
2017 } else {
2018 mm->mmap_base = mmap_base(random_factor);
2019+
2020+#ifdef CONFIG_PAX_RANDMMAP
2021+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2022+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2023+#endif
2024+
2025 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2026 mm->unmap_area = arch_unmap_area_topdown;
2027 }
c6e2a6c8
MT
2028diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
2029index fd556f7..af2e7d2 100644
2030--- a/arch/arm/plat-orion/include/plat/addr-map.h
2031+++ b/arch/arm/plat-orion/include/plat/addr-map.h
2032@@ -26,7 +26,7 @@ struct orion_addr_map_cfg {
2033 value in bridge_virt_base */
2034 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
2035 const int win);
2036-};
2037+} __no_const;
2038
2039 /*
2040 * Information needed to setup one address mapping.
4c928ab7 2041diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
5e856224 2042index 71a6827..e7fbc23 100644
4c928ab7
MT
2043--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
2044+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
5e856224 2045@@ -43,7 +43,7 @@ struct samsung_dma_ops {
4c928ab7
MT
2046 int (*started)(unsigned ch);
2047 int (*flush)(unsigned ch);
2048 int (*stop)(unsigned ch);
2049-};
2050+} __no_const;
2051
2052 extern void *samsung_dmadev_get_ops(void);
2053 extern void *s3c_dma_get_ops(void);
2054diff --git a/arch/arm/plat-samsung/include/plat/ehci.h b/arch/arm/plat-samsung/include/plat/ehci.h
2055index 5f28cae..3d23723 100644
2056--- a/arch/arm/plat-samsung/include/plat/ehci.h
2057+++ b/arch/arm/plat-samsung/include/plat/ehci.h
2058@@ -14,7 +14,7 @@
2059 struct s5p_ehci_platdata {
2060 int (*phy_init)(struct platform_device *pdev, int type);
2061 int (*phy_exit)(struct platform_device *pdev, int type);
2062-};
2063+} __no_const;
2064
2065 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
2066
2067diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
2068index c3a58a1..78fbf54 100644
2069--- a/arch/avr32/include/asm/cache.h
2070+++ b/arch/avr32/include/asm/cache.h
2071@@ -1,8 +1,10 @@
2072 #ifndef __ASM_AVR32_CACHE_H
2073 #define __ASM_AVR32_CACHE_H
2074
2075+#include <linux/const.h>
2076+
2077 #define L1_CACHE_SHIFT 5
2078-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2079+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2080
2081 /*
2082 * Memory returned by kmalloc() may be used for DMA, so we must make
fe2de317
MT
2083diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
2084index 3b3159b..425ea94 100644
2085--- a/arch/avr32/include/asm/elf.h
2086+++ b/arch/avr32/include/asm/elf.h
2087@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
58c5fc13
MT
2088 the loader. We need to make sure that it is out of the way of the program
2089 that it will "exec", and that there is sufficient room for the brk. */
2090
2091-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
2092+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2093
2094+#ifdef CONFIG_PAX_ASLR
2095+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
2096+
2097+#define PAX_DELTA_MMAP_LEN 15
2098+#define PAX_DELTA_STACK_LEN 15
2099+#endif
2100
2101 /* This yields a mask that user programs can use to figure out what
2102 instruction set this CPU supports. This could be done in user space,
fe2de317
MT
2103diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
2104index b7f5c68..556135c 100644
2105--- a/arch/avr32/include/asm/kmap_types.h
2106+++ b/arch/avr32/include/asm/kmap_types.h
58c5fc13
MT
2107@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
2108 D(11) KM_IRQ1,
2109 D(12) KM_SOFTIRQ0,
2110 D(13) KM_SOFTIRQ1,
2111-D(14) KM_TYPE_NR
2112+D(14) KM_CLEARPAGE,
2113+D(15) KM_TYPE_NR
2114 };
2115
2116 #undef D
fe2de317
MT
2117diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
2118index f7040a1..db9f300 100644
2119--- a/arch/avr32/mm/fault.c
2120+++ b/arch/avr32/mm/fault.c
2121@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
58c5fc13
MT
2122
2123 int exception_trace = 1;
2124
2125+#ifdef CONFIG_PAX_PAGEEXEC
6e9df6a3 2126+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
2127+{
2128+ unsigned long i;
2129+
2130+ printk(KERN_ERR "PAX: bytes at PC: ");
2131+ for (i = 0; i < 20; i++) {
2132+ unsigned char c;
2133+ if (get_user(c, (unsigned char *)pc+i))
2134+ printk(KERN_CONT "???????? ");
2135+ else
2136+ printk(KERN_CONT "%02x ", c);
2137+ }
2138+ printk("\n");
2139+}
2140+#endif
2141+
2142 /*
2143 * This routine handles page faults. It determines the address and the
2144 * problem, and then passes it off to one of the appropriate routines.
6892158b 2145@@ -156,6 +173,16 @@ bad_area:
58c5fc13
MT
2146 up_read(&mm->mmap_sem);
2147
2148 if (user_mode(regs)) {
2149+
2150+#ifdef CONFIG_PAX_PAGEEXEC
2151+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2152+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
2153+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
2154+ do_group_exit(SIGKILL);
2155+ }
2156+ }
2157+#endif
2158+
2159 if (exception_trace && printk_ratelimit())
2160 printk("%s%s[%d]: segfault at %08lx pc %08lx "
2161 "sp %08lx ecr %lu\n",
4c928ab7
MT
2162diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
2163index 568885a..f8008df 100644
2164--- a/arch/blackfin/include/asm/cache.h
2165+++ b/arch/blackfin/include/asm/cache.h
2166@@ -7,6 +7,7 @@
2167 #ifndef __ARCH_BLACKFIN_CACHE_H
2168 #define __ARCH_BLACKFIN_CACHE_H
2169
2170+#include <linux/const.h>
2171 #include <linux/linkage.h> /* for asmlinkage */
2172
2173 /*
2174@@ -14,7 +15,7 @@
2175 * Blackfin loads 32 bytes for cache
2176 */
2177 #define L1_CACHE_SHIFT 5
2178-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2179+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2180 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2181
2182 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2183diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
2184index aea2718..3639a60 100644
2185--- a/arch/cris/include/arch-v10/arch/cache.h
2186+++ b/arch/cris/include/arch-v10/arch/cache.h
2187@@ -1,8 +1,9 @@
2188 #ifndef _ASM_ARCH_CACHE_H
2189 #define _ASM_ARCH_CACHE_H
2190
2191+#include <linux/const.h>
2192 /* Etrax 100LX have 32-byte cache-lines. */
2193-#define L1_CACHE_BYTES 32
2194 #define L1_CACHE_SHIFT 5
2195+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2196
2197 #endif /* _ASM_ARCH_CACHE_H */
2198diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
2199index 1de779f..336fad3 100644
2200--- a/arch/cris/include/arch-v32/arch/cache.h
2201+++ b/arch/cris/include/arch-v32/arch/cache.h
2202@@ -1,11 +1,12 @@
2203 #ifndef _ASM_CRIS_ARCH_CACHE_H
2204 #define _ASM_CRIS_ARCH_CACHE_H
2205
2206+#include <linux/const.h>
2207 #include <arch/hwregs/dma.h>
2208
2209 /* A cache-line is 32 bytes. */
2210-#define L1_CACHE_BYTES 32
2211 #define L1_CACHE_SHIFT 5
2212+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2213
2214 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
2215
2216diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
c6e2a6c8 2217index b86329d..6709906 100644
4c928ab7
MT
2218--- a/arch/frv/include/asm/atomic.h
2219+++ b/arch/frv/include/asm/atomic.h
c6e2a6c8 2220@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4c928ab7
MT
2221 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
2222 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
2223
2224+#define atomic64_read_unchecked(v) atomic64_read(v)
2225+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2226+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2227+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2228+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2229+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2230+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2231+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2232+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2233+
2234 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
2235 {
2236 int c, old;
2237diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
2238index 2797163..c2a401d 100644
2239--- a/arch/frv/include/asm/cache.h
2240+++ b/arch/frv/include/asm/cache.h
2241@@ -12,10 +12,11 @@
2242 #ifndef __ASM_CACHE_H
2243 #define __ASM_CACHE_H
2244
2245+#include <linux/const.h>
2246
2247 /* bytes per L1 cache line */
2248 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
2249-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2250+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2251
2252 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
2253 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
fe2de317
MT
2254diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
2255index f8e16b2..c73ff79 100644
2256--- a/arch/frv/include/asm/kmap_types.h
2257+++ b/arch/frv/include/asm/kmap_types.h
58c5fc13
MT
2258@@ -23,6 +23,7 @@ enum km_type {
2259 KM_IRQ1,
2260 KM_SOFTIRQ0,
2261 KM_SOFTIRQ1,
2262+ KM_CLEARPAGE,
2263 KM_TYPE_NR
2264 };
2265
fe2de317
MT
2266diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
2267index 385fd30..6c3d97e 100644
2268--- a/arch/frv/mm/elf-fdpic.c
2269+++ b/arch/frv/mm/elf-fdpic.c
2270@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
2271 if (addr) {
2272 addr = PAGE_ALIGN(addr);
2273 vma = find_vma(current->mm, addr);
2274- if (TASK_SIZE - len >= addr &&
2275- (!vma || addr + len <= vma->vm_start))
2276+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2277 goto success;
2278 }
2279
fe2de317 2280@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
2281 for (; vma; vma = vma->vm_next) {
2282 if (addr > limit)
2283 break;
2284- if (addr + len <= vma->vm_start)
2285+ if (check_heap_stack_gap(vma, addr, len))
2286 goto success;
2287 addr = vma->vm_end;
2288 }
fe2de317 2289@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
2290 for (; vma; vma = vma->vm_next) {
2291 if (addr > limit)
2292 break;
2293- if (addr + len <= vma->vm_start)
2294+ if (check_heap_stack_gap(vma, addr, len))
2295 goto success;
2296 addr = vma->vm_end;
2297 }
4c928ab7
MT
2298diff --git a/arch/h8300/include/asm/cache.h b/arch/h8300/include/asm/cache.h
2299index c635028..6d9445a 100644
2300--- a/arch/h8300/include/asm/cache.h
2301+++ b/arch/h8300/include/asm/cache.h
2302@@ -1,8 +1,10 @@
2303 #ifndef __ARCH_H8300_CACHE_H
2304 #define __ARCH_H8300_CACHE_H
2305
2306+#include <linux/const.h>
2307+
2308 /* bytes per L1 cache line */
2309-#define L1_CACHE_BYTES 4
2310+#define L1_CACHE_BYTES _AC(4,UL)
2311
2312 /* m68k-elf-gcc 2.95.2 doesn't like these */
2313
2314diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
2315index 0f01de2..d37d309 100644
2316--- a/arch/hexagon/include/asm/cache.h
2317+++ b/arch/hexagon/include/asm/cache.h
2318@@ -21,9 +21,11 @@
2319 #ifndef __ASM_CACHE_H
2320 #define __ASM_CACHE_H
2321
2322+#include <linux/const.h>
2323+
2324 /* Bytes per L1 cache line */
2325-#define L1_CACHE_SHIFT (5)
2326-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2327+#define L1_CACHE_SHIFT 5
2328+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2329
2330 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
2331 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
2332diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
c6e2a6c8 2333index 7d91166..88ab87e 100644
4c928ab7
MT
2334--- a/arch/ia64/include/asm/atomic.h
2335+++ b/arch/ia64/include/asm/atomic.h
c6e2a6c8 2336@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4c928ab7
MT
2337 #define atomic64_inc(v) atomic64_add(1, (v))
2338 #define atomic64_dec(v) atomic64_sub(1, (v))
2339
2340+#define atomic64_read_unchecked(v) atomic64_read(v)
2341+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2342+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2343+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2344+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2345+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2346+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2347+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2348+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2349+
2350 /* Atomic operations are already serializing */
2351 #define smp_mb__before_atomic_dec() barrier()
2352 #define smp_mb__after_atomic_dec() barrier()
2353diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
2354index 988254a..e1ee885 100644
2355--- a/arch/ia64/include/asm/cache.h
2356+++ b/arch/ia64/include/asm/cache.h
2357@@ -1,6 +1,7 @@
2358 #ifndef _ASM_IA64_CACHE_H
2359 #define _ASM_IA64_CACHE_H
2360
2361+#include <linux/const.h>
2362
2363 /*
2364 * Copyright (C) 1998-2000 Hewlett-Packard Co
2365@@ -9,7 +10,7 @@
2366
2367 /* Bytes per L1 (data) cache line. */
2368 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
2369-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2370+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2371
2372 #ifdef CONFIG_SMP
2373 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
fe2de317
MT
2374diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
2375index b5298eb..67c6e62 100644
2376--- a/arch/ia64/include/asm/elf.h
2377+++ b/arch/ia64/include/asm/elf.h
ae4e228f 2378@@ -42,6 +42,13 @@
58c5fc13
MT
2379 */
2380 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
2381
2382+#ifdef CONFIG_PAX_ASLR
2383+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
2384+
2385+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2386+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
2387+#endif
2388+
2389 #define PT_IA_64_UNWIND 0x70000001
2390
2391 /* IA-64 relocations: */
5e856224
MT
2392diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
2393index 96a8d92..617a1cf 100644
2394--- a/arch/ia64/include/asm/pgalloc.h
2395+++ b/arch/ia64/include/asm/pgalloc.h
2396@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2397 pgd_val(*pgd_entry) = __pa(pud);
2398 }
2399
2400+static inline void
2401+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
2402+{
2403+ pgd_populate(mm, pgd_entry, pud);
2404+}
2405+
2406 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
2407 {
2408 return quicklist_alloc(0, GFP_KERNEL, NULL);
2409@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2410 pud_val(*pud_entry) = __pa(pmd);
2411 }
2412
2413+static inline void
2414+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
2415+{
2416+ pud_populate(mm, pud_entry, pmd);
2417+}
2418+
2419 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
2420 {
2421 return quicklist_alloc(0, GFP_KERNEL, NULL);
fe2de317 2422diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
c6e2a6c8 2423index 815810c..d60bd4c 100644
fe2de317
MT
2424--- a/arch/ia64/include/asm/pgtable.h
2425+++ b/arch/ia64/include/asm/pgtable.h
57199397
MT
2426@@ -12,7 +12,7 @@
2427 * David Mosberger-Tang <davidm@hpl.hp.com>
2428 */
2429
2430-
2431+#include <linux/const.h>
2432 #include <asm/mman.h>
2433 #include <asm/page.h>
2434 #include <asm/processor.h>
c6e2a6c8 2435@@ -142,6 +142,17 @@
58c5fc13
MT
2436 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2437 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2438 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
2439+
2440+#ifdef CONFIG_PAX_PAGEEXEC
2441+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
2442+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2443+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
2444+#else
2445+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2446+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2447+# define PAGE_COPY_NOEXEC PAGE_COPY
2448+#endif
2449+
2450 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
2451 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
2452 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
fe2de317 2453diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
c6e2a6c8 2454index 54ff557..70c88b7 100644
fe2de317
MT
2455--- a/arch/ia64/include/asm/spinlock.h
2456+++ b/arch/ia64/include/asm/spinlock.h
c6e2a6c8 2457@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
317566c1
MT
2458 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
2459
2460 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
2461- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
2462+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
2463 }
2464
2465 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
fe2de317
MT
2466diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
2467index 449c8c0..432a3d2 100644
2468--- a/arch/ia64/include/asm/uaccess.h
2469+++ b/arch/ia64/include/asm/uaccess.h
2470@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
58c5fc13
MT
2471 const void *__cu_from = (from); \
2472 long __cu_len = (n); \
2473 \
2474- if (__access_ok(__cu_to, __cu_len, get_fs())) \
ae4e228f 2475+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
58c5fc13
MT
2476 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
2477 __cu_len; \
2478 })
fe2de317 2479@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
58c5fc13
MT
2480 long __cu_len = (n); \
2481 \
2482 __chk_user_ptr(__cu_from); \
2483- if (__access_ok(__cu_from, __cu_len, get_fs())) \
ae4e228f 2484+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
58c5fc13
MT
2485 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
2486 __cu_len; \
2487 })
fe2de317
MT
2488diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
2489index 24603be..948052d 100644
2490--- a/arch/ia64/kernel/module.c
2491+++ b/arch/ia64/kernel/module.c
6e9df6a3 2492@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
58c5fc13
MT
2493 void
2494 module_free (struct module *mod, void *module_region)
2495 {
2496- if (mod && mod->arch.init_unw_table &&
2497- module_region == mod->module_init) {
2498+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
2499 unw_remove_unwind_table(mod->arch.init_unw_table);
2500 mod->arch.init_unw_table = NULL;
2501 }
fe2de317 2502@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
58c5fc13
MT
2503 }
2504
2505 static inline int
2506+in_init_rx (const struct module *mod, uint64_t addr)
2507+{
2508+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
2509+}
2510+
2511+static inline int
2512+in_init_rw (const struct module *mod, uint64_t addr)
2513+{
2514+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
2515+}
2516+
2517+static inline int
2518 in_init (const struct module *mod, uint64_t addr)
2519 {
2520- return addr - (uint64_t) mod->module_init < mod->init_size;
2521+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
2522+}
2523+
2524+static inline int
2525+in_core_rx (const struct module *mod, uint64_t addr)
2526+{
2527+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
2528+}
2529+
2530+static inline int
2531+in_core_rw (const struct module *mod, uint64_t addr)
2532+{
2533+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
2534 }
2535
2536 static inline int
2537 in_core (const struct module *mod, uint64_t addr)
2538 {
2539- return addr - (uint64_t) mod->module_core < mod->core_size;
2540+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
2541 }
2542
2543 static inline int
fe2de317 2544@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
58c5fc13
MT
2545 break;
2546
2547 case RV_BDREL:
2548- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
2549+ if (in_init_rx(mod, val))
2550+ val -= (uint64_t) mod->module_init_rx;
2551+ else if (in_init_rw(mod, val))
2552+ val -= (uint64_t) mod->module_init_rw;
2553+ else if (in_core_rx(mod, val))
2554+ val -= (uint64_t) mod->module_core_rx;
2555+ else if (in_core_rw(mod, val))
2556+ val -= (uint64_t) mod->module_core_rw;
2557 break;
2558
2559 case RV_LTV:
fe2de317 2560@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
58c5fc13
MT
2561 * addresses have been selected...
2562 */
2563 uint64_t gp;
2564- if (mod->core_size > MAX_LTOFF)
2565+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
2566 /*
2567 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
2568 * at the end of the module.
2569 */
2570- gp = mod->core_size - MAX_LTOFF / 2;
2571+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
2572 else
2573- gp = mod->core_size / 2;
2574- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
2575+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
2576+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
2577 mod->arch.gp = gp;
2578 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
2579 }
fe2de317
MT
2580diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
2581index 609d500..7dde2a8 100644
2582--- a/arch/ia64/kernel/sys_ia64.c
2583+++ b/arch/ia64/kernel/sys_ia64.c
2584@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
58c5fc13
MT
2585 if (REGION_NUMBER(addr) == RGN_HPAGE)
2586 addr = 0;
2587 #endif
2588+
2589+#ifdef CONFIG_PAX_RANDMMAP
2590+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2591+ addr = mm->free_area_cache;
2592+ else
2593+#endif
2594+
2595 if (!addr)
2596 addr = mm->free_area_cache;
2597
fe2de317 2598@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
58c5fc13
MT
2599 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
2600 /* At this point: (!vma || addr < vma->vm_end). */
2601 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
2602- if (start_addr != TASK_UNMAPPED_BASE) {
2603+ if (start_addr != mm->mmap_base) {
2604 /* Start a new search --- just in case we missed some holes. */
2605- addr = TASK_UNMAPPED_BASE;
2606+ addr = mm->mmap_base;
2607 goto full_search;
2608 }
2609 return -ENOMEM;
57199397
MT
2610 }
2611- if (!vma || addr + len <= vma->vm_start) {
2612+ if (check_heap_stack_gap(vma, addr, len)) {
2613 /* Remember the address where we stopped this search: */
2614 mm->free_area_cache = addr + len;
2615 return addr;
fe2de317 2616diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
c6e2a6c8 2617index 0ccb28f..8992469 100644
fe2de317
MT
2618--- a/arch/ia64/kernel/vmlinux.lds.S
2619+++ b/arch/ia64/kernel/vmlinux.lds.S
c6e2a6c8 2620@@ -198,7 +198,7 @@ SECTIONS {
6892158b
MT
2621 /* Per-cpu data: */
2622 . = ALIGN(PERCPU_PAGE_SIZE);
66a7e928 2623 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
6892158b
MT
2624- __phys_per_cpu_start = __per_cpu_load;
2625+ __phys_per_cpu_start = per_cpu_load;
2626 /*
2627 * ensure percpu data fits
2628 * into percpu page size
fe2de317 2629diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
c6e2a6c8 2630index 02d29c2..ea893df 100644
fe2de317
MT
2631--- a/arch/ia64/mm/fault.c
2632+++ b/arch/ia64/mm/fault.c
c6e2a6c8 2633@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
58c5fc13
MT
2634 return pte_present(pte);
2635 }
2636
2637+#ifdef CONFIG_PAX_PAGEEXEC
6e9df6a3 2638+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
2639+{
2640+ unsigned long i;
2641+
2642+ printk(KERN_ERR "PAX: bytes at PC: ");
2643+ for (i = 0; i < 8; i++) {
2644+ unsigned int c;
2645+ if (get_user(c, (unsigned int *)pc+i))
2646+ printk(KERN_CONT "???????? ");
2647+ else
2648+ printk(KERN_CONT "%08x ", c);
2649+ }
2650+ printk("\n");
2651+}
2652+#endif
2653+
2654 void __kprobes
2655 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
2656 {
c6e2a6c8 2657@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
58c5fc13
MT
2658 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
2659 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
2660
2661- if ((vma->vm_flags & mask) != mask)
2662+ if ((vma->vm_flags & mask) != mask) {
2663+
2664+#ifdef CONFIG_PAX_PAGEEXEC
2665+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
2666+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
2667+ goto bad_area;
2668+
2669+ up_read(&mm->mmap_sem);
2670+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
2671+ do_group_exit(SIGKILL);
2672+ }
2673+#endif
2674+
2675 goto bad_area;
2676
2677+ }
2678+
58c5fc13
MT
2679 /*
2680 * If for any reason at all we couldn't handle the fault, make
57199397 2681 * sure we exit gracefully rather than endlessly redo the
fe2de317
MT
2682diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
2683index 5ca674b..e0e1b70 100644
2684--- a/arch/ia64/mm/hugetlbpage.c
2685+++ b/arch/ia64/mm/hugetlbpage.c
2686@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
57199397
MT
2687 /* At this point: (!vmm || addr < vmm->vm_end). */
2688 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
2689 return -ENOMEM;
2690- if (!vmm || (addr + len) <= vmm->vm_start)
2691+ if (check_heap_stack_gap(vmm, addr, len))
2692 return addr;
2693 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
2694 }
fe2de317 2695diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
c6e2a6c8 2696index 0eab454..bd794f2 100644
fe2de317
MT
2697--- a/arch/ia64/mm/init.c
2698+++ b/arch/ia64/mm/init.c
c6e2a6c8 2699@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
58c5fc13
MT
2700 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
2701 vma->vm_end = vma->vm_start + PAGE_SIZE;
2702 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
2703+
2704+#ifdef CONFIG_PAX_PAGEEXEC
2705+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
2706+ vma->vm_flags &= ~VM_EXEC;
2707+
2708+#ifdef CONFIG_PAX_MPROTECT
2709+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
2710+ vma->vm_flags &= ~VM_MAYEXEC;
2711+#endif
2712+
2713+ }
2714+#endif
2715+
2716 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2717 down_write(&current->mm->mmap_sem);
2718 if (insert_vm_struct(current->mm, vma)) {
4c928ab7
MT
2719diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
2720index 40b3ee9..8c2c112 100644
2721--- a/arch/m32r/include/asm/cache.h
2722+++ b/arch/m32r/include/asm/cache.h
2723@@ -1,8 +1,10 @@
2724 #ifndef _ASM_M32R_CACHE_H
2725 #define _ASM_M32R_CACHE_H
2726
2727+#include <linux/const.h>
2728+
2729 /* L1 cache line size */
2730 #define L1_CACHE_SHIFT 4
2731-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2732+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2733
2734 #endif /* _ASM_M32R_CACHE_H */
fe2de317
MT
2735diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
2736index 82abd15..d95ae5d 100644
2737--- a/arch/m32r/lib/usercopy.c
2738+++ b/arch/m32r/lib/usercopy.c
58c5fc13
MT
2739@@ -14,6 +14,9 @@
2740 unsigned long
2741 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
2742 {
2743+ if ((long)n < 0)
2744+ return n;
2745+
2746 prefetch(from);
2747 if (access_ok(VERIFY_WRITE, to, n))
2748 __copy_user(to,from,n);
fe2de317 2749@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
58c5fc13
MT
2750 unsigned long
2751 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
2752 {
2753+ if ((long)n < 0)
2754+ return n;
2755+
2756 prefetchw(to);
2757 if (access_ok(VERIFY_READ, from, n))
2758 __copy_user_zeroing(to,from,n);
4c928ab7
MT
2759diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
2760index 0395c51..5f26031 100644
2761--- a/arch/m68k/include/asm/cache.h
2762+++ b/arch/m68k/include/asm/cache.h
2763@@ -4,9 +4,11 @@
2764 #ifndef __ARCH_M68K_CACHE_H
2765 #define __ARCH_M68K_CACHE_H
2766
2767+#include <linux/const.h>
2768+
2769 /* bytes per L1 cache line */
2770 #define L1_CACHE_SHIFT 4
2771-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
2772+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2773
2774 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
2775
2776diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
2777index 4efe96a..60e8699 100644
2778--- a/arch/microblaze/include/asm/cache.h
2779+++ b/arch/microblaze/include/asm/cache.h
2780@@ -13,11 +13,12 @@
2781 #ifndef _ASM_MICROBLAZE_CACHE_H
2782 #define _ASM_MICROBLAZE_CACHE_H
2783
2784+#include <linux/const.h>
2785 #include <asm/registers.h>
2786
2787 #define L1_CACHE_SHIFT 5
2788 /* word-granular cache in microblaze */
2789-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2790+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2791
2792 #define SMP_CACHE_BYTES L1_CACHE_BYTES
2793
2794diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
c6e2a6c8 2795index 3f4c5cb..3439c6e 100644
4c928ab7
MT
2796--- a/arch/mips/include/asm/atomic.h
2797+++ b/arch/mips/include/asm/atomic.h
2798@@ -21,6 +21,10 @@
c6e2a6c8 2799 #include <asm/cmpxchg.h>
4c928ab7 2800 #include <asm/war.h>
4c928ab7
MT
2801
2802+#ifdef CONFIG_GENERIC_ATOMIC64
2803+#include <asm-generic/atomic64.h>
2804+#endif
2805+
2806 #define ATOMIC_INIT(i) { (i) }
2807
2808 /*
2809@@ -765,6 +769,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
2810 */
2811 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
2812
2813+#define atomic64_read_unchecked(v) atomic64_read(v)
2814+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
2815+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
2816+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
2817+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
2818+#define atomic64_inc_unchecked(v) atomic64_inc(v)
2819+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
2820+#define atomic64_dec_unchecked(v) atomic64_dec(v)
2821+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
2822+
2823 #endif /* CONFIG_64BIT */
2824
2825 /*
2826diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
2827index b4db69f..8f3b093 100644
2828--- a/arch/mips/include/asm/cache.h
2829+++ b/arch/mips/include/asm/cache.h
2830@@ -9,10 +9,11 @@
2831 #ifndef _ASM_CACHE_H
2832 #define _ASM_CACHE_H
2833
2834+#include <linux/const.h>
2835 #include <kmalloc.h>
2836
2837 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
2838-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
2839+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
2840
2841 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
2842 #define SMP_CACHE_BYTES L1_CACHE_BYTES
fe2de317
MT
2843diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
2844index 455c0ac..ad65fbe 100644
2845--- a/arch/mips/include/asm/elf.h
2846+++ b/arch/mips/include/asm/elf.h
bc901d79 2847@@ -372,13 +372,16 @@ extern const char *__elf_platform;
58c5fc13
MT
2848 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
2849 #endif
2850
2851+#ifdef CONFIG_PAX_ASLR
66a7e928 2852+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
58c5fc13 2853+
66a7e928
MT
2854+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2855+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
58c5fc13
MT
2856+#endif
2857+
df50ba0c
MT
2858 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2859 struct linux_binprm;
2860 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
bc901d79
MT
2861 int uses_interp);
2862
2863-struct mm_struct;
2864-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2865-#define arch_randomize_brk arch_randomize_brk
2866-
2867 #endif /* _ASM_ELF_H */
c6e2a6c8
MT
2868diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
2869index c1f6afa..38cc6e9 100644
2870--- a/arch/mips/include/asm/exec.h
2871+++ b/arch/mips/include/asm/exec.h
2872@@ -12,6 +12,6 @@
2873 #ifndef _ASM_EXEC_H
2874 #define _ASM_EXEC_H
2875
2876-extern unsigned long arch_align_stack(unsigned long sp);
2877+#define arch_align_stack(x) ((x) & ~0xfUL)
2878
2879 #endif /* _ASM_EXEC_H */
fe2de317 2880diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5e856224 2881index da9bd7d..91aa7ab 100644
fe2de317
MT
2882--- a/arch/mips/include/asm/page.h
2883+++ b/arch/mips/include/asm/page.h
5e856224 2884@@ -98,7 +98,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
58c5fc13
MT
2885 #ifdef CONFIG_CPU_MIPS32
2886 typedef struct { unsigned long pte_low, pte_high; } pte_t;
2887 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
2888- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
2889+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
2890 #else
2891 typedef struct { unsigned long long pte; } pte_t;
2892 #define pte_val(x) ((x).pte)
5e856224
MT
2893diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
2894index 881d18b..cea38bc 100644
2895--- a/arch/mips/include/asm/pgalloc.h
2896+++ b/arch/mips/include/asm/pgalloc.h
2897@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2898 {
2899 set_pud(pud, __pud((unsigned long)pmd));
2900 }
2901+
2902+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
2903+{
2904+ pud_populate(mm, pud, pmd);
2905+}
2906 #endif
2907
2908 /*
5e856224
MT
2909diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
2910index 0d85d8e..ec71487 100644
2911--- a/arch/mips/include/asm/thread_info.h
2912+++ b/arch/mips/include/asm/thread_info.h
2913@@ -123,6 +123,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
2914 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
2915 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
2916 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
2917+/* li takes a 32bit immediate */
2918+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
2919 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
2920
2921 #ifdef CONFIG_MIPS32_O32
2922@@ -146,15 +148,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
2923 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
2924 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
2925 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
2926+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
2927+
2928+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2929
2930 /* work to do in syscall_trace_leave() */
2931-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
2932+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
2933
2934 /* work to do on interrupt/exception return */
2935 #define _TIF_WORK_MASK (0x0000ffef & \
2936 ~(_TIF_SECCOMP | _TIF_SYSCALL_AUDIT))
2937 /* work to do on any return to u-space */
2938-#define _TIF_ALLWORK_MASK (0x8000ffff & ~_TIF_SECCOMP)
2939+#define _TIF_ALLWORK_MASK ((0x8000ffff & ~_TIF_SECCOMP) | _TIF_GRSEC_SETXID)
2940
2941 #endif /* __KERNEL__ */
2942
fe2de317
MT
2943diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
2944index 9fdd8bc..4bd7f1a 100644
2945--- a/arch/mips/kernel/binfmt_elfn32.c
2946+++ b/arch/mips/kernel/binfmt_elfn32.c
2947@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
58c5fc13
MT
2948 #undef ELF_ET_DYN_BASE
2949 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2950
2951+#ifdef CONFIG_PAX_ASLR
66a7e928 2952+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
58c5fc13 2953+
66a7e928
MT
2954+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2955+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
58c5fc13
MT
2956+#endif
2957+
2958 #include <asm/processor.h>
2959 #include <linux/module.h>
2960 #include <linux/elfcore.h>
fe2de317
MT
2961diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2962index ff44823..97f8906 100644
2963--- a/arch/mips/kernel/binfmt_elfo32.c
2964+++ b/arch/mips/kernel/binfmt_elfo32.c
2965@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
58c5fc13
MT
2966 #undef ELF_ET_DYN_BASE
2967 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2968
2969+#ifdef CONFIG_PAX_ASLR
66a7e928 2970+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
58c5fc13 2971+
66a7e928
MT
2972+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2973+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
58c5fc13
MT
2974+#endif
2975+
2976 #include <asm/processor.h>
2977
2978 /*
fe2de317 2979diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
c6e2a6c8 2980index e9a5fd7..378809a 100644
fe2de317
MT
2981--- a/arch/mips/kernel/process.c
2982+++ b/arch/mips/kernel/process.c
c6e2a6c8 2983@@ -480,15 +480,3 @@ unsigned long get_wchan(struct task_struct *task)
58c5fc13
MT
2984 out:
2985 return pc;
2986 }
2987-
2988-/*
2989- * Don't forget that the stack pointer must be aligned on a 8 bytes
2990- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2991- */
2992-unsigned long arch_align_stack(unsigned long sp)
2993-{
2994- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2995- sp -= get_random_int() & ~PAGE_MASK;
2996-
2997- return sp & ALMASK;
2998-}
5e856224 2999diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
c6e2a6c8 3000index 7c24c29..e2f1981 100644
5e856224
MT
3001--- a/arch/mips/kernel/ptrace.c
3002+++ b/arch/mips/kernel/ptrace.c
c6e2a6c8 3003@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5e856224
MT
3004 return arch;
3005 }
3006
3007+#ifdef CONFIG_GRKERNSEC_SETXID
3008+extern void gr_delayed_cred_worker(void);
3009+#endif
3010+
3011 /*
3012 * Notification of system call entry/exit
3013 * - triggered by current->work.syscall_trace
c6e2a6c8 3014@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5e856224
MT
3015 /* do the secure computing check first */
3016 secure_computing(regs->regs[2]);
3017
3018+#ifdef CONFIG_GRKERNSEC_SETXID
3019+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
3020+ gr_delayed_cred_worker();
3021+#endif
3022+
3023 if (!(current->ptrace & PT_PTRACED))
3024 goto out;
3025
3026diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
3027index a632bc1..0b77c7c 100644
3028--- a/arch/mips/kernel/scall32-o32.S
3029+++ b/arch/mips/kernel/scall32-o32.S
3030@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3031
3032 stack_done:
3033 lw t0, TI_FLAGS($28) # syscall tracing enabled?
3034- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3035+ li t1, _TIF_SYSCALL_WORK
3036 and t0, t1
3037 bnez t0, syscall_trace_entry # -> yes
3038
3039diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
3040index 3b5a5e9..e1ee86d 100644
3041--- a/arch/mips/kernel/scall64-64.S
3042+++ b/arch/mips/kernel/scall64-64.S
3043@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
3044
3045 sd a3, PT_R26(sp) # save a3 for syscall restarting
3046
3047- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3048+ li t1, _TIF_SYSCALL_WORK
3049 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3050 and t0, t1, t0
3051 bnez t0, syscall_trace_entry
3052diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
3053index 6be6f70..1859577 100644
3054--- a/arch/mips/kernel/scall64-n32.S
3055+++ b/arch/mips/kernel/scall64-n32.S
3056@@ -53,7 +53,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
3057
3058 sd a3, PT_R26(sp) # save a3 for syscall restarting
3059
3060- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3061+ li t1, _TIF_SYSCALL_WORK
3062 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3063 and t0, t1, t0
3064 bnez t0, n32_syscall_trace_entry
3065diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
3066index 5422855..74e63a3 100644
3067--- a/arch/mips/kernel/scall64-o32.S
3068+++ b/arch/mips/kernel/scall64-o32.S
3069@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
3070 PTR 4b, bad_stack
3071 .previous
3072
3073- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
3074+ li t1, _TIF_SYSCALL_WORK
3075 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
3076 and t0, t1, t0
3077 bnez t0, trace_a_syscall
fe2de317 3078diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
c6e2a6c8 3079index c14f6df..537e729 100644
fe2de317
MT
3080--- a/arch/mips/mm/fault.c
3081+++ b/arch/mips/mm/fault.c
c6e2a6c8 3082@@ -27,6 +27,23 @@
15a11c5b
MT
3083 #include <asm/highmem.h> /* For VMALLOC_END */
3084 #include <linux/kdebug.h>
3085
3086+#ifdef CONFIG_PAX_PAGEEXEC
6e9df6a3 3087+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
15a11c5b
MT
3088+{
3089+ unsigned long i;
3090+
3091+ printk(KERN_ERR "PAX: bytes at PC: ");
3092+ for (i = 0; i < 5; i++) {
3093+ unsigned int c;
3094+ if (get_user(c, (unsigned int *)pc+i))
3095+ printk(KERN_CONT "???????? ");
3096+ else
3097+ printk(KERN_CONT "%08x ", c);
3098+ }
3099+ printk("\n");
3100+}
3101+#endif
3102+
3103 /*
3104 * This routine handles page faults. It determines the address,
3105 * and the problem, and then passes it off to one of the appropriate
fe2de317
MT
3106diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
3107index 302d779..7d35bf8 100644
3108--- a/arch/mips/mm/mmap.c
3109+++ b/arch/mips/mm/mmap.c
3110@@ -95,6 +95,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
58c5fc13 3111 do_color_align = 1;
6e9df6a3
MT
3112
3113 /* requesting a specific address */
58c5fc13
MT
3114+
3115+#ifdef CONFIG_PAX_RANDMMAP
3116+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
3117+#endif
3118+
3119 if (addr) {
3120 if (do_color_align)
3121 addr = COLOUR_ALIGN(addr, pgoff);
fe2de317 3122@@ -102,8 +107,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
57199397 3123 addr = PAGE_ALIGN(addr);
6e9df6a3
MT
3124
3125 vma = find_vma(mm, addr);
15a11c5b 3126- if (TASK_SIZE - len >= addr &&
6e9df6a3 3127- (!vma || addr + len <= vma->vm_start))
15a11c5b 3128+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len))
58c5fc13
MT
3129 return addr;
3130 }
6e9df6a3 3131
fe2de317 3132@@ -118,7 +122,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6e9df6a3
MT
3133 /* At this point: (!vma || addr < vma->vm_end). */
3134 if (TASK_SIZE - len < addr)
3135 return -ENOMEM;
3136- if (!vma || addr + len <= vma->vm_start)
3137+ if (check_heap_stack_gap(vmm, addr, len))
3138 return addr;
3139 addr = vma->vm_end;
3140 if (do_color_align)
fe2de317 3141@@ -145,7 +149,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6e9df6a3
MT
3142 /* make sure it can fit in the remaining address space */
3143 if (likely(addr > len)) {
3144 vma = find_vma(mm, addr - len);
3145- if (!vma || addr <= vma->vm_start) {
3146+ if (check_heap_stack_gap(vmm, addr - len, len))
3147 /* cache the address as a hint for next time */
3148 return mm->free_area_cache = addr - len;
3149 }
fe2de317 3150@@ -165,7 +169,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6e9df6a3
MT
3151 * return with success:
3152 */
3153 vma = find_vma(mm, addr);
3154- if (likely(!vma || addr + len <= vma->vm_start)) {
3155+ if (check_heap_stack_gap(vmm, addr, len)) {
3156 /* cache the address as a hint for next time */
3157 return mm->free_area_cache = addr;
3158 }
fe2de317 3159@@ -242,30 +246,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6e9df6a3
MT
3160 mm->unmap_area = arch_unmap_area_topdown;
3161 }
bc901d79 3162 }
15a11c5b 3163-
66a7e928
MT
3164-static inline unsigned long brk_rnd(void)
3165-{
3166- unsigned long rnd = get_random_int();
3167-
3168- rnd = rnd << PAGE_SHIFT;
3169- /* 8MB for 32bit, 256MB for 64bit */
3170- if (TASK_IS_32BIT_ADDR)
3171- rnd = rnd & 0x7ffffful;
3172- else
3173- rnd = rnd & 0xffffffful;
3174-
3175- return rnd;
3176-}
3177-
bc901d79
MT
3178-unsigned long arch_randomize_brk(struct mm_struct *mm)
3179-{
3180- unsigned long base = mm->brk;
3181- unsigned long ret;
3182-
3183- ret = PAGE_ALIGN(base + brk_rnd());
3184-
3185- if (ret < mm->brk)
3186- return mm->brk;
3187-
3188- return ret;
3189-}
4c928ab7
MT
3190diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3191index 967d144..db12197 100644
3192--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
3193+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
3194@@ -11,12 +11,14 @@
3195 #ifndef _ASM_PROC_CACHE_H
3196 #define _ASM_PROC_CACHE_H
3197
3198+#include <linux/const.h>
3199+
3200 /* L1 cache */
3201
3202 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3203 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
3204-#define L1_CACHE_BYTES 16 /* bytes per entry */
3205 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
3206+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3207 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
3208
3209 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3210diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3211index bcb5df2..84fabd2 100644
3212--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3213+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
3214@@ -16,13 +16,15 @@
3215 #ifndef _ASM_PROC_CACHE_H
3216 #define _ASM_PROC_CACHE_H
3217
3218+#include <linux/const.h>
3219+
3220 /*
3221 * L1 cache
3222 */
3223 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
3224 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
3225-#define L1_CACHE_BYTES 32 /* bytes per entry */
3226 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
3227+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
3228 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
3229
3230 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
3231diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
3232index 4ce7a01..449202a 100644
3233--- a/arch/openrisc/include/asm/cache.h
3234+++ b/arch/openrisc/include/asm/cache.h
3235@@ -19,11 +19,13 @@
3236 #ifndef __ASM_OPENRISC_CACHE_H
3237 #define __ASM_OPENRISC_CACHE_H
3238
3239+#include <linux/const.h>
3240+
3241 /* FIXME: How can we replace these with values from the CPU...
3242 * they shouldn't be hard-coded!
3243 */
3244
3245-#define L1_CACHE_BYTES 16
3246 #define L1_CACHE_SHIFT 4
3247+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3248
3249 #endif /* __ASM_OPENRISC_CACHE_H */
3250diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
c6e2a6c8 3251index 6c6defc..d30653d 100644
4c928ab7
MT
3252--- a/arch/parisc/include/asm/atomic.h
3253+++ b/arch/parisc/include/asm/atomic.h
c6e2a6c8 3254@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
4c928ab7
MT
3255
3256 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3257
3258+#define atomic64_read_unchecked(v) atomic64_read(v)
3259+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3260+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3261+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3262+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3263+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3264+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3265+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3266+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3267+
3268 #endif /* !CONFIG_64BIT */
3269
3270
3271diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
3272index 47f11c7..3420df2 100644
3273--- a/arch/parisc/include/asm/cache.h
3274+++ b/arch/parisc/include/asm/cache.h
3275@@ -5,6 +5,7 @@
3276 #ifndef __ARCH_PARISC_CACHE_H
3277 #define __ARCH_PARISC_CACHE_H
3278
3279+#include <linux/const.h>
3280
3281 /*
3282 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
3283@@ -15,13 +16,13 @@
3284 * just ruin performance.
3285 */
3286 #ifdef CONFIG_PA20
3287-#define L1_CACHE_BYTES 64
3288 #define L1_CACHE_SHIFT 6
3289 #else
3290-#define L1_CACHE_BYTES 32
3291 #define L1_CACHE_SHIFT 5
3292 #endif
3293
3294+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3295+
3296 #ifndef __ASSEMBLY__
3297
3298 #define SMP_CACHE_BYTES L1_CACHE_BYTES
fe2de317
MT
3299diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
3300index 19f6cb1..6c78cf2 100644
3301--- a/arch/parisc/include/asm/elf.h
3302+++ b/arch/parisc/include/asm/elf.h
3303@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
58c5fc13
MT
3304
3305 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
3306
3307+#ifdef CONFIG_PAX_ASLR
3308+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3309+
3310+#define PAX_DELTA_MMAP_LEN 16
3311+#define PAX_DELTA_STACK_LEN 16
3312+#endif
3313+
3314 /* This yields a mask that user programs can use to figure out what
3315 instruction set this CPU supports. This could be done in user space,
3316 but it's not easy, and we've already done it here. */
5e856224
MT
3317diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
3318index fc987a1..6e068ef 100644
3319--- a/arch/parisc/include/asm/pgalloc.h
3320+++ b/arch/parisc/include/asm/pgalloc.h
3321@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3322 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
3323 }
3324
3325+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
3326+{
3327+ pgd_populate(mm, pgd, pmd);
3328+}
3329+
3330 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
3331 {
3332 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
3333@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
3334 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
3335 #define pmd_free(mm, x) do { } while (0)
3336 #define pgd_populate(mm, pmd, pte) BUG()
3337+#define pgd_populate_kernel(mm, pmd, pte) BUG()
3338
3339 #endif
3340
fe2de317 3341diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
c6e2a6c8 3342index ee99f23..802b0a1 100644
fe2de317
MT
3343--- a/arch/parisc/include/asm/pgtable.h
3344+++ b/arch/parisc/include/asm/pgtable.h
c6e2a6c8 3345@@ -212,6 +212,17 @@ struct vm_area_struct;
58c5fc13
MT
3346 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
3347 #define PAGE_COPY PAGE_EXECREAD
3348 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
3349+
3350+#ifdef CONFIG_PAX_PAGEEXEC
3351+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
3352+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3353+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
3354+#else
3355+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3356+# define PAGE_COPY_NOEXEC PAGE_COPY
3357+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3358+#endif
3359+
3360 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
15a11c5b
MT
3361 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
3362 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
c1e3898a
MT
3363diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
3364index 9ac0660..6ed15c4 100644
3365--- a/arch/parisc/include/asm/uaccess.h
3366+++ b/arch/parisc/include/asm/uaccess.h
3367@@ -252,10 +252,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
3368 const void __user *from,
3369 unsigned long n)
3370 {
3371- int sz = __compiletime_object_size(to);
3372+ size_t sz = __compiletime_object_size(to);
3373 int ret = -EFAULT;
3374
3375- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
3376+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
3377 ret = __copy_from_user(to, from, n);
3378 else
3379 copy_from_user_overflow();
fe2de317
MT
3380diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
3381index 5e34ccf..672bc9c 100644
3382--- a/arch/parisc/kernel/module.c
3383+++ b/arch/parisc/kernel/module.c
15a11c5b 3384@@ -98,16 +98,38 @@
58c5fc13
MT
3385
3386 /* three functions to determine where in the module core
3387 * or init pieces the location is */
3388+static inline int in_init_rx(struct module *me, void *loc)
3389+{
3390+ return (loc >= me->module_init_rx &&
3391+ loc < (me->module_init_rx + me->init_size_rx));
3392+}
3393+
3394+static inline int in_init_rw(struct module *me, void *loc)
3395+{
3396+ return (loc >= me->module_init_rw &&
3397+ loc < (me->module_init_rw + me->init_size_rw));
3398+}
3399+
3400 static inline int in_init(struct module *me, void *loc)
3401 {
3402- return (loc >= me->module_init &&
3403- loc <= (me->module_init + me->init_size));
3404+ return in_init_rx(me, loc) || in_init_rw(me, loc);
3405+}
3406+
3407+static inline int in_core_rx(struct module *me, void *loc)
3408+{
3409+ return (loc >= me->module_core_rx &&
3410+ loc < (me->module_core_rx + me->core_size_rx));
3411+}
3412+
3413+static inline int in_core_rw(struct module *me, void *loc)
3414+{
3415+ return (loc >= me->module_core_rw &&
3416+ loc < (me->module_core_rw + me->core_size_rw));
3417 }
3418
3419 static inline int in_core(struct module *me, void *loc)
3420 {
3421- return (loc >= me->module_core &&
3422- loc <= (me->module_core + me->core_size));
3423+ return in_core_rx(me, loc) || in_core_rw(me, loc);
3424 }
3425
3426 static inline int in_local(struct module *me, void *loc)
fe2de317 3427@@ -373,13 +395,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
58c5fc13
MT
3428 }
3429
3430 /* align things a bit */
3431- me->core_size = ALIGN(me->core_size, 16);
3432- me->arch.got_offset = me->core_size;
3433- me->core_size += gots * sizeof(struct got_entry);
58c5fc13
MT
3434+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3435+ me->arch.got_offset = me->core_size_rw;
3436+ me->core_size_rw += gots * sizeof(struct got_entry);
fe2de317
MT
3437
3438- me->core_size = ALIGN(me->core_size, 16);
3439- me->arch.fdesc_offset = me->core_size;
3440- me->core_size += fdescs * sizeof(Elf_Fdesc);
58c5fc13
MT
3441+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
3442+ me->arch.fdesc_offset = me->core_size_rw;
3443+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
3444
3445 me->arch.got_max = gots;
3446 me->arch.fdesc_max = fdescs;
fe2de317 3447@@ -397,7 +419,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
58c5fc13
MT
3448
3449 BUG_ON(value == 0);
3450
3451- got = me->module_core + me->arch.got_offset;
3452+ got = me->module_core_rw + me->arch.got_offset;
3453 for (i = 0; got[i].addr; i++)
3454 if (got[i].addr == value)
3455 goto out;
fe2de317 3456@@ -415,7 +437,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
58c5fc13
MT
3457 #ifdef CONFIG_64BIT
3458 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
3459 {
3460- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
3461+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
3462
3463 if (!value) {
3464 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
fe2de317 3465@@ -433,7 +455,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
58c5fc13
MT
3466
3467 /* Create new one */
3468 fdesc->addr = value;
3469- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3470+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3471 return (Elf_Addr)fdesc;
3472 }
3473 #endif /* CONFIG_64BIT */
6e9df6a3 3474@@ -845,7 +867,7 @@ register_unwind_table(struct module *me,
58c5fc13
MT
3475
3476 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
3477 end = table + sechdrs[me->arch.unwind_section].sh_size;
3478- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
3479+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
3480
3481 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
3482 me->arch.unwind_section, table, end, gp);
fe2de317
MT
3483diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
3484index c9b9322..02d8940 100644
3485--- a/arch/parisc/kernel/sys_parisc.c
3486+++ b/arch/parisc/kernel/sys_parisc.c
3487@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
57199397
MT
3488 /* At this point: (!vma || addr < vma->vm_end). */
3489 if (TASK_SIZE - len < addr)
3490 return -ENOMEM;
3491- if (!vma || addr + len <= vma->vm_start)
3492+ if (check_heap_stack_gap(vma, addr, len))
3493 return addr;
3494 addr = vma->vm_end;
3495 }
fe2de317 3496@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
57199397
MT
3497 /* At this point: (!vma || addr < vma->vm_end). */
3498 if (TASK_SIZE - len < addr)
3499 return -ENOMEM;
3500- if (!vma || addr + len <= vma->vm_start)
3501+ if (check_heap_stack_gap(vma, addr, len))
3502 return addr;
3503 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
3504 if (addr < vma->vm_end) /* handle wraparound */
fe2de317 3505@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
3506 if (flags & MAP_FIXED)
3507 return addr;
3508 if (!addr)
3509- addr = TASK_UNMAPPED_BASE;
3510+ addr = current->mm->mmap_base;
3511
3512 if (filp) {
3513 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
fe2de317 3514diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
c6e2a6c8 3515index 45ba99f..8e22c33 100644
fe2de317
MT
3516--- a/arch/parisc/kernel/traps.c
3517+++ b/arch/parisc/kernel/traps.c
c6e2a6c8 3518@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
58c5fc13
MT
3519
3520 down_read(&current->mm->mmap_sem);
3521 vma = find_vma(current->mm,regs->iaoq[0]);
3522- if (vma && (regs->iaoq[0] >= vma->vm_start)
3523- && (vma->vm_flags & VM_EXEC)) {
3524-
3525+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
3526 fault_address = regs->iaoq[0];
3527 fault_space = regs->iasq[0];
3528
fe2de317
MT
3529diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
3530index 18162ce..94de376 100644
3531--- a/arch/parisc/mm/fault.c
3532+++ b/arch/parisc/mm/fault.c
58c5fc13
MT
3533@@ -15,6 +15,7 @@
3534 #include <linux/sched.h>
3535 #include <linux/interrupt.h>
3536 #include <linux/module.h>
3537+#include <linux/unistd.h>
3538
3539 #include <asm/uaccess.h>
3540 #include <asm/traps.h>
fe2de317 3541@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
58c5fc13
MT
3542 static unsigned long
3543 parisc_acctyp(unsigned long code, unsigned int inst)
3544 {
3545- if (code == 6 || code == 16)
3546+ if (code == 6 || code == 7 || code == 16)
3547 return VM_EXEC;
3548
3549 switch (inst & 0xf0000000) {
fe2de317 3550@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
58c5fc13
MT
3551 }
3552 #endif
3553
3554+#ifdef CONFIG_PAX_PAGEEXEC
3555+/*
3556+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
3557+ *
3558+ * returns 1 when task should be killed
3559+ * 2 when rt_sigreturn trampoline was detected
3560+ * 3 when unpatched PLT trampoline was detected
3561+ */
3562+static int pax_handle_fetch_fault(struct pt_regs *regs)
3563+{
3564+
3565+#ifdef CONFIG_PAX_EMUPLT
3566+ int err;
3567+
3568+ do { /* PaX: unpatched PLT emulation */
3569+ unsigned int bl, depwi;
3570+
3571+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
3572+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
3573+
3574+ if (err)
3575+ break;
3576+
3577+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
3578+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
3579+
3580+ err = get_user(ldw, (unsigned int *)addr);
3581+ err |= get_user(bv, (unsigned int *)(addr+4));
3582+ err |= get_user(ldw2, (unsigned int *)(addr+8));
3583+
3584+ if (err)
3585+ break;
3586+
3587+ if (ldw == 0x0E801096U &&
3588+ bv == 0xEAC0C000U &&
3589+ ldw2 == 0x0E881095U)
3590+ {
3591+ unsigned int resolver, map;
3592+
3593+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
3594+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
3595+ if (err)
3596+ break;
3597+
3598+ regs->gr[20] = instruction_pointer(regs)+8;
3599+ regs->gr[21] = map;
3600+ regs->gr[22] = resolver;
3601+ regs->iaoq[0] = resolver | 3UL;
3602+ regs->iaoq[1] = regs->iaoq[0] + 4;
3603+ return 3;
3604+ }
3605+ }
3606+ } while (0);
3607+#endif
3608+
3609+#ifdef CONFIG_PAX_EMUTRAMP
3610+
3611+#ifndef CONFIG_PAX_EMUSIGRT
3612+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
3613+ return 1;
3614+#endif
3615+
3616+ do { /* PaX: rt_sigreturn emulation */
3617+ unsigned int ldi1, ldi2, bel, nop;
3618+
3619+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
3620+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
3621+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
3622+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
3623+
3624+ if (err)
3625+ break;
3626+
3627+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
3628+ ldi2 == 0x3414015AU &&
3629+ bel == 0xE4008200U &&
3630+ nop == 0x08000240U)
3631+ {
3632+ regs->gr[25] = (ldi1 & 2) >> 1;
3633+ regs->gr[20] = __NR_rt_sigreturn;
3634+ regs->gr[31] = regs->iaoq[1] + 16;
3635+ regs->sr[0] = regs->iasq[1];
3636+ regs->iaoq[0] = 0x100UL;
3637+ regs->iaoq[1] = regs->iaoq[0] + 4;
3638+ regs->iasq[0] = regs->sr[2];
3639+ regs->iasq[1] = regs->sr[2];
3640+ return 2;
3641+ }
3642+ } while (0);
3643+#endif
3644+
3645+ return 1;
3646+}
3647+
6e9df6a3 3648+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
3649+{
3650+ unsigned long i;
3651+
3652+ printk(KERN_ERR "PAX: bytes at PC: ");
3653+ for (i = 0; i < 5; i++) {
3654+ unsigned int c;
3655+ if (get_user(c, (unsigned int *)pc+i))
3656+ printk(KERN_CONT "???????? ");
3657+ else
3658+ printk(KERN_CONT "%08x ", c);
3659+ }
3660+ printk("\n");
3661+}
3662+#endif
3663+
3664 int fixup_exception(struct pt_regs *regs)
3665 {
3666 const struct exception_table_entry *fix;
3667@@ -192,8 +303,33 @@ good_area:
3668
3669 acc_type = parisc_acctyp(code,regs->iir);
3670
3671- if ((vma->vm_flags & acc_type) != acc_type)
3672+ if ((vma->vm_flags & acc_type) != acc_type) {
3673+
3674+#ifdef CONFIG_PAX_PAGEEXEC
3675+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
3676+ (address & ~3UL) == instruction_pointer(regs))
3677+ {
3678+ up_read(&mm->mmap_sem);
3679+ switch (pax_handle_fetch_fault(regs)) {
3680+
3681+#ifdef CONFIG_PAX_EMUPLT
3682+ case 3:
3683+ return;
3684+#endif
3685+
3686+#ifdef CONFIG_PAX_EMUTRAMP
3687+ case 2:
3688+ return;
3689+#endif
3690+
3691+ }
3692+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
3693+ do_group_exit(SIGKILL);
3694+ }
3695+#endif
3696+
3697 goto bad_area;
3698+ }
3699
3700 /*
3701 * If for any reason at all we couldn't handle the fault, make
4c928ab7 3702diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
c6e2a6c8 3703index da29032..f76c24c 100644
4c928ab7
MT
3704--- a/arch/powerpc/include/asm/atomic.h
3705+++ b/arch/powerpc/include/asm/atomic.h
c6e2a6c8
MT
3706@@ -522,6 +522,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
3707 return t1;
3708 }
4c928ab7
MT
3709
3710+#define atomic64_read_unchecked(v) atomic64_read(v)
3711+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
3712+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
3713+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
3714+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
3715+#define atomic64_inc_unchecked(v) atomic64_inc(v)
3716+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
3717+#define atomic64_dec_unchecked(v) atomic64_dec(v)
3718+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
3719+
3720 #endif /* __powerpc64__ */
3721
3722 #endif /* __KERNEL__ */
3723diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
c6e2a6c8 3724index 9e495c9..b6878e5 100644
4c928ab7
MT
3725--- a/arch/powerpc/include/asm/cache.h
3726+++ b/arch/powerpc/include/asm/cache.h
3727@@ -3,6 +3,7 @@
3728
3729 #ifdef __KERNEL__
3730
3731+#include <linux/const.h>
3732
3733 /* bytes per L1 cache line */
3734 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
3735@@ -22,7 +23,7 @@
3736 #define L1_CACHE_SHIFT 7
3737 #endif
3738
3739-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
3740+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
3741
3742 #define SMP_CACHE_BYTES L1_CACHE_BYTES
3743
fe2de317
MT
3744diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
3745index 3bf9cca..e7457d0 100644
3746--- a/arch/powerpc/include/asm/elf.h
3747+++ b/arch/powerpc/include/asm/elf.h
3748@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
58c5fc13
MT
3749 the loader. We need to make sure that it is out of the way of the program
3750 that it will "exec", and that there is sufficient room for the brk. */
3751
3752-extern unsigned long randomize_et_dyn(unsigned long base);
3753-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
3754+#define ELF_ET_DYN_BASE (0x20000000)
3755+
3756+#ifdef CONFIG_PAX_ASLR
3757+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
3758+
3759+#ifdef __powerpc64__
bc901d79
MT
3760+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
3761+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
58c5fc13
MT
3762+#else
3763+#define PAX_DELTA_MMAP_LEN 15
3764+#define PAX_DELTA_STACK_LEN 15
3765+#endif
3766+#endif
3767
3768 /*
3769 * Our registers are always unsigned longs, whether we're a 32 bit
fe2de317 3770@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
58c5fc13
MT
3771 (0x7ff >> (PAGE_SHIFT - 12)) : \
3772 (0x3ffff >> (PAGE_SHIFT - 12)))
3773
3774-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
3775-#define arch_randomize_brk arch_randomize_brk
3776-
3777 #endif /* __KERNEL__ */
3778
3779 /*
c6e2a6c8
MT
3780diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
3781index 8196e9c..d83a9f3 100644
3782--- a/arch/powerpc/include/asm/exec.h
3783+++ b/arch/powerpc/include/asm/exec.h
3784@@ -4,6 +4,6 @@
3785 #ifndef _ASM_POWERPC_EXEC_H
3786 #define _ASM_POWERPC_EXEC_H
3787
3788-extern unsigned long arch_align_stack(unsigned long sp);
3789+#define arch_align_stack(x) ((x) & ~0xfUL)
3790
3791 #endif /* _ASM_POWERPC_EXEC_H */
fe2de317
MT
3792diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
3793index bca8fdc..61e9580 100644
3794--- a/arch/powerpc/include/asm/kmap_types.h
3795+++ b/arch/powerpc/include/asm/kmap_types.h
57199397 3796@@ -27,6 +27,7 @@ enum km_type {
58c5fc13
MT
3797 KM_PPC_SYNC_PAGE,
3798 KM_PPC_SYNC_ICACHE,
57199397 3799 KM_KDB,
58c5fc13
MT
3800+ KM_CLEARPAGE,
3801 KM_TYPE_NR
3802 };
3803
fe2de317
MT
3804diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
3805index d4a7f64..451de1c 100644
3806--- a/arch/powerpc/include/asm/mman.h
3807+++ b/arch/powerpc/include/asm/mman.h
3808@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
15a11c5b
MT
3809 }
3810 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
3811
3812-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
3813+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
3814 {
3815 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
3816 }
fe2de317 3817diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
5e856224 3818index f072e97..b436dee 100644
fe2de317
MT
3819--- a/arch/powerpc/include/asm/page.h
3820+++ b/arch/powerpc/include/asm/page.h
5e856224 3821@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
57199397
MT
3822 * and needs to be executable. This means the whole heap ends
3823 * up being executable.
3824 */
3825-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3826- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3827+#define VM_DATA_DEFAULT_FLAGS32 \
3828+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3829+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3830
3831 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3832 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
5e856224 3833@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
57199397
MT
3834 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
3835 #endif
3836
3837+#define ktla_ktva(addr) (addr)
3838+#define ktva_ktla(addr) (addr)
3839+
4c928ab7
MT
3840 /*
3841 * Use the top bit of the higher-level page table entries to indicate whether
3842 * the entries we point to contain hugepages. This works because we know that
fe2de317 3843diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
5e856224 3844index fed85e6..da5c71b 100644
fe2de317
MT
3845--- a/arch/powerpc/include/asm/page_64.h
3846+++ b/arch/powerpc/include/asm/page_64.h
5e856224 3847@@ -146,15 +146,18 @@ do { \
fe2de317
MT
3848 * stack by default, so in the absence of a PT_GNU_STACK program header
3849 * we turn execute permission off.
3850 */
3851-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
3852- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3853+#define VM_STACK_DEFAULT_FLAGS32 \
3854+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
3855+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3856
3857 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
3858 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
3859
3860+#ifndef CONFIG_PAX_PAGEEXEC
3861 #define VM_STACK_DEFAULT_FLAGS \
3862 (is_32bit_task() ? \
3863 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
3864+#endif
3865
3866 #include <asm-generic/getorder.h>
3867
5e856224
MT
3868diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
3869index 292725c..f87ae14 100644
3870--- a/arch/powerpc/include/asm/pgalloc-64.h
3871+++ b/arch/powerpc/include/asm/pgalloc-64.h
3872@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
3873 #ifndef CONFIG_PPC_64K_PAGES
3874
3875 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
3876+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
3877
3878 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
3879 {
3880@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3881 pud_set(pud, (unsigned long)pmd);
3882 }
3883
3884+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3885+{
3886+ pud_populate(mm, pud, pmd);
3887+}
3888+
3889 #define pmd_populate(mm, pmd, pte_page) \
3890 pmd_populate_kernel(mm, pmd, page_address(pte_page))
3891 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
3892@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
3893 #else /* CONFIG_PPC_64K_PAGES */
3894
3895 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
3896+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
3897
3898 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
3899 pte_t *pte)
fe2de317 3900diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
5e856224 3901index 2e0e411..7899c68 100644
fe2de317
MT
3902--- a/arch/powerpc/include/asm/pgtable.h
3903+++ b/arch/powerpc/include/asm/pgtable.h
317566c1
MT
3904@@ -2,6 +2,7 @@
3905 #define _ASM_POWERPC_PGTABLE_H
3906 #ifdef __KERNEL__
3907
3908+#include <linux/const.h>
3909 #ifndef __ASSEMBLY__
3910 #include <asm/processor.h> /* For TASK_SIZE */
3911 #include <asm/mmu.h>
fe2de317
MT
3912diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
3913index 4aad413..85d86bf 100644
3914--- a/arch/powerpc/include/asm/pte-hash32.h
3915+++ b/arch/powerpc/include/asm/pte-hash32.h
58c5fc13
MT
3916@@ -21,6 +21,7 @@
3917 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
3918 #define _PAGE_USER 0x004 /* usermode access allowed */
3919 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
ae4e228f 3920+#define _PAGE_EXEC _PAGE_GUARDED
58c5fc13
MT
3921 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
3922 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
3923 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
fe2de317 3924diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
c6e2a6c8 3925index 9d7f0fb..a28fe69 100644
fe2de317
MT
3926--- a/arch/powerpc/include/asm/reg.h
3927+++ b/arch/powerpc/include/asm/reg.h
6e9df6a3 3928@@ -212,6 +212,7 @@
58c5fc13
MT
3929 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
3930 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
3931 #define DSISR_NOHPTE 0x40000000 /* no translation found */
3932+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
3933 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
3934 #define DSISR_ISSTORE 0x02000000 /* access was a store */
3935 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
5e856224 3936diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
c6e2a6c8 3937index 4a741c7..c8162227b 100644
5e856224
MT
3938--- a/arch/powerpc/include/asm/thread_info.h
3939+++ b/arch/powerpc/include/asm/thread_info.h
c6e2a6c8 3940@@ -104,12 +104,14 @@ static inline struct thread_info *current_thread_info(void)
5e856224
MT
3941 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
3942 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
3943 #define TIF_SINGLESTEP 8 /* singlestepping active */
3944-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
3945 #define TIF_SECCOMP 10 /* secure computing */
3946 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
3947 #define TIF_NOERROR 12 /* Force successful syscall return */
3948 #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
3949 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
c6e2a6c8 3950+#define TIF_MEMDIE 16 /* is terminating due to OOM killer */
5e856224
MT
3951+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
3952+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
3953
3954 /* as above, but as bit values */
3955 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
c6e2a6c8 3956@@ -127,8 +129,11 @@ static inline struct thread_info *current_thread_info(void)
5e856224
MT
3957 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
3958 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
3959 #define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
3960+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
3961+
3962 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
3963- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
3964+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT \
3965+ _TIF_GRSEC_SETXID)
3966
3967 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
3968 _TIF_NOTIFY_RESUME)
fe2de317
MT
3969diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
3970index bd0fb84..a42a14b 100644
3971--- a/arch/powerpc/include/asm/uaccess.h
3972+++ b/arch/powerpc/include/asm/uaccess.h
efbe55a5
MT
3973@@ -13,6 +13,8 @@
3974 #define VERIFY_READ 0
3975 #define VERIFY_WRITE 1
3976
3977+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3978+
3979 /*
3980 * The fs value determines whether argument validity checking should be
3981 * performed or not. If get_fs() == USER_DS, checking is performed, with
3982@@ -327,52 +329,6 @@ do { \
58c5fc13
MT
3983 extern unsigned long __copy_tofrom_user(void __user *to,
3984 const void __user *from, unsigned long size);
3985
3986-#ifndef __powerpc64__
3987-
3988-static inline unsigned long copy_from_user(void *to,
3989- const void __user *from, unsigned long n)
3990-{
3991- unsigned long over;
3992-
3993- if (access_ok(VERIFY_READ, from, n))
3994- return __copy_tofrom_user((__force void __user *)to, from, n);
3995- if ((unsigned long)from < TASK_SIZE) {
3996- over = (unsigned long)from + n - TASK_SIZE;
3997- return __copy_tofrom_user((__force void __user *)to, from,
3998- n - over) + over;
3999- }
4000- return n;
4001-}
4002-
4003-static inline unsigned long copy_to_user(void __user *to,
4004- const void *from, unsigned long n)
4005-{
4006- unsigned long over;
4007-
4008- if (access_ok(VERIFY_WRITE, to, n))
4009- return __copy_tofrom_user(to, (__force void __user *)from, n);
4010- if ((unsigned long)to < TASK_SIZE) {
4011- over = (unsigned long)to + n - TASK_SIZE;
4012- return __copy_tofrom_user(to, (__force void __user *)from,
4013- n - over) + over;
4014- }
4015- return n;
4016-}
4017-
4018-#else /* __powerpc64__ */
4019-
4020-#define __copy_in_user(to, from, size) \
4021- __copy_tofrom_user((to), (from), (size))
4022-
4023-extern unsigned long copy_from_user(void *to, const void __user *from,
4024- unsigned long n);
4025-extern unsigned long copy_to_user(void __user *to, const void *from,
4026- unsigned long n);
4027-extern unsigned long copy_in_user(void __user *to, const void __user *from,
4028- unsigned long n);
4029-
4030-#endif /* __powerpc64__ */
4031-
4032 static inline unsigned long __copy_from_user_inatomic(void *to,
4033 const void __user *from, unsigned long n)
4034 {
fe2de317 4035@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
58c5fc13
MT
4036 if (ret == 0)
4037 return 0;
4038 }
ae4e228f 4039+
58c5fc13
MT
4040+ if (!__builtin_constant_p(n))
4041+ check_object_size(to, n, false);
4042+
4043 return __copy_tofrom_user((__force void __user *)to, from, n);
4044 }
4045
fe2de317 4046@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
58c5fc13
MT
4047 if (ret == 0)
4048 return 0;
4049 }
ae4e228f 4050+
58c5fc13
MT
4051+ if (!__builtin_constant_p(n))
4052+ check_object_size(from, n, true);
4053+
4054 return __copy_tofrom_user(to, (__force const void __user *)from, n);
4055 }
4056
fe2de317 4057@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
58c5fc13
MT
4058 return __copy_to_user_inatomic(to, from, size);
4059 }
4060
4061+#ifndef __powerpc64__
4062+
4063+static inline unsigned long __must_check copy_from_user(void *to,
4064+ const void __user *from, unsigned long n)
4065+{
4066+ unsigned long over;
4067+
ae4e228f 4068+ if ((long)n < 0)
58c5fc13
MT
4069+ return n;
4070+
4071+ if (access_ok(VERIFY_READ, from, n)) {
4072+ if (!__builtin_constant_p(n))
4073+ check_object_size(to, n, false);
58c5fc13
MT
4074+ return __copy_tofrom_user((__force void __user *)to, from, n);
4075+ }
4076+ if ((unsigned long)from < TASK_SIZE) {
4077+ over = (unsigned long)from + n - TASK_SIZE;
4078+ if (!__builtin_constant_p(n - over))
4079+ check_object_size(to, n - over, false);
4080+ return __copy_tofrom_user((__force void __user *)to, from,
4081+ n - over) + over;
4082+ }
4083+ return n;
4084+}
4085+
4086+static inline unsigned long __must_check copy_to_user(void __user *to,
4087+ const void *from, unsigned long n)
4088+{
4089+ unsigned long over;
4090+
ae4e228f 4091+ if ((long)n < 0)
58c5fc13
MT
4092+ return n;
4093+
4094+ if (access_ok(VERIFY_WRITE, to, n)) {
4095+ if (!__builtin_constant_p(n))
4096+ check_object_size(from, n, true);
4097+ return __copy_tofrom_user(to, (__force void __user *)from, n);
4098+ }
4099+ if ((unsigned long)to < TASK_SIZE) {
4100+ over = (unsigned long)to + n - TASK_SIZE;
4101+ if (!__builtin_constant_p(n))
4102+ check_object_size(from, n - over, true);
4103+ return __copy_tofrom_user(to, (__force void __user *)from,
4104+ n - over) + over;
4105+ }
4106+ return n;
4107+}
4108+
4109+#else /* __powerpc64__ */
4110+
4111+#define __copy_in_user(to, from, size) \
4112+ __copy_tofrom_user((to), (from), (size))
4113+
ae4e228f 4114+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
58c5fc13 4115+{
ae4e228f 4116+ if ((long)n < 0 || n > INT_MAX)
58c5fc13
MT
4117+ return n;
4118+
4119+ if (!__builtin_constant_p(n))
4120+ check_object_size(to, n, false);
4121+
4122+ if (likely(access_ok(VERIFY_READ, from, n)))
4123+ n = __copy_from_user(to, from, n);
4124+ else
4125+ memset(to, 0, n);
58c5fc13
MT
4126+ return n;
4127+}
4128+
ae4e228f 4129+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
58c5fc13 4130+{
ae4e228f 4131+ if ((long)n < 0 || n > INT_MAX)
58c5fc13
MT
4132+ return n;
4133+
4134+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
4135+ if (!__builtin_constant_p(n))
4136+ check_object_size(from, n, true);
4137+ n = __copy_to_user(to, from, n);
4138+ }
58c5fc13
MT
4139+ return n;
4140+}
4141+
4142+extern unsigned long copy_in_user(void __user *to, const void __user *from,
4143+ unsigned long n);
4144+
4145+#endif /* __powerpc64__ */
4146+
4147 extern unsigned long __clear_user(void __user *addr, unsigned long size);
4148
4149 static inline unsigned long clear_user(void __user *addr, unsigned long size)
fe2de317 4150diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
c6e2a6c8 4151index 7215cc2..a9730c1 100644
fe2de317
MT
4152--- a/arch/powerpc/kernel/exceptions-64e.S
4153+++ b/arch/powerpc/kernel/exceptions-64e.S
c6e2a6c8 4154@@ -661,6 +661,7 @@ storage_fault_common:
ae4e228f
MT
4155 std r14,_DAR(r1)
4156 std r15,_DSISR(r1)
4157 addi r3,r1,STACK_FRAME_OVERHEAD
4158+ bl .save_nvgprs
4159 mr r4,r14
4160 mr r5,r15
4161 ld r14,PACA_EXGEN+EX_R14(r13)
c6e2a6c8 4162@@ -669,8 +670,7 @@ storage_fault_common:
ae4e228f
MT
4163 cmpdi r3,0
4164 bne- 1f
4165 b .ret_from_except_lite
4166-1: bl .save_nvgprs
4167- mr r5,r3
4168+1: mr r5,r3
4169 addi r3,r1,STACK_FRAME_OVERHEAD
4170 ld r4,_DAR(r1)
4171 bl .bad_page_fault
fe2de317 4172diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
c6e2a6c8 4173index 8f880bc..c5bd2f3 100644
fe2de317
MT
4174--- a/arch/powerpc/kernel/exceptions-64s.S
4175+++ b/arch/powerpc/kernel/exceptions-64s.S
c6e2a6c8 4176@@ -890,10 +890,10 @@ handle_page_fault:
ae4e228f
MT
4177 11: ld r4,_DAR(r1)
4178 ld r5,_DSISR(r1)
4179 addi r3,r1,STACK_FRAME_OVERHEAD
4180+ bl .save_nvgprs
4181 bl .do_page_fault
4182 cmpdi r3,0
c6e2a6c8 4183 beq+ 12f
ae4e228f
MT
4184- bl .save_nvgprs
4185 mr r5,r3
4186 addi r3,r1,STACK_FRAME_OVERHEAD
4187 lwz r4,_DAR(r1)
fe2de317 4188diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
c6e2a6c8 4189index 2e3200c..72095ce 100644
fe2de317
MT
4190--- a/arch/powerpc/kernel/module_32.c
4191+++ b/arch/powerpc/kernel/module_32.c
4192@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
57199397
MT
4193 me->arch.core_plt_section = i;
4194 }
4195 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
4196- printk("Module doesn't contain .plt or .init.plt sections.\n");
4197+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
4198 return -ENOEXEC;
4199 }
4200
fe2de317 4201@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
57199397
MT
4202
4203 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
4204 /* Init, or core PLT? */
4205- if (location >= mod->module_core
4206- && location < mod->module_core + mod->core_size)
4207+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
4208+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
4209 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
4210- else
4211+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
4212+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
4213 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
4214+ else {
4215+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
4216+ return ~0UL;
4217+ }
4218
4219 /* Find this entry, or if that fails, the next avail. entry */
4220 while (entry->jump[0]) {
fe2de317 4221diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
c6e2a6c8 4222index 4937c96..70714b7 100644
fe2de317
MT
4223--- a/arch/powerpc/kernel/process.c
4224+++ b/arch/powerpc/kernel/process.c
c6e2a6c8 4225@@ -681,8 +681,8 @@ void show_regs(struct pt_regs * regs)
bc901d79
MT
4226 * Lookup NIP late so we have the best change of getting the
4227 * above info out without failing
4228 */
4229- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
4230- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
4231+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
4232+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
4233 #endif
4234 show_stack(current, (unsigned long *) regs->gpr[1]);
4235 if (!user_mode(regs))
c6e2a6c8 4236@@ -1186,10 +1186,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
bc901d79
MT
4237 newsp = stack[0];
4238 ip = stack[STACK_FRAME_LR_SAVE];
4239 if (!firstframe || ip != lr) {
4240- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
4241+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
4242 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4243 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
4244- printk(" (%pS)",
4245+ printk(" (%pA)",
4246 (void *)current->ret_stack[curr_frame].ret);
4247 curr_frame--;
4248 }
c6e2a6c8 4249@@ -1209,7 +1209,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
bc901d79
MT
4250 struct pt_regs *regs = (struct pt_regs *)
4251 (sp + STACK_FRAME_OVERHEAD);
4252 lr = regs->link;
4253- printk("--- Exception: %lx at %pS\n LR = %pS\n",
4254+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
4255 regs->trap, (void *)regs->nip, (void *)lr);
4256 firstframe = 1;
4257 }
c6e2a6c8 4258@@ -1282,58 +1282,3 @@ void thread_info_cache_init(void)
58c5fc13 4259 }
6892158b 4260
bc901d79
MT
4261 #endif /* THREAD_SHIFT < PAGE_SHIFT */
4262-
4263-unsigned long arch_align_stack(unsigned long sp)
4264-{
4265- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4266- sp -= get_random_int() & ~PAGE_MASK;
4267- return sp & ~0xf;
4268-}
4269-
58c5fc13
MT
4270-static inline unsigned long brk_rnd(void)
4271-{
4272- unsigned long rnd = 0;
4273-
4274- /* 8MB for 32bit, 1GB for 64bit */
4275- if (is_32bit_task())
4276- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
4277- else
4278- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
4279-
4280- return rnd << PAGE_SHIFT;
4281-}
4282-
4283-unsigned long arch_randomize_brk(struct mm_struct *mm)
4284-{
ae4e228f
MT
4285- unsigned long base = mm->brk;
4286- unsigned long ret;
4287-
4288-#ifdef CONFIG_PPC_STD_MMU_64
4289- /*
4290- * If we are using 1TB segments and we are allowed to randomise
4291- * the heap, we can put it above 1TB so it is backed by a 1TB
4292- * segment. Otherwise the heap will be in the bottom 1TB
4293- * which always uses 256MB segments and this may result in a
4294- * performance penalty.
4295- */
4296- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
4297- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
4298-#endif
4299-
4300- ret = PAGE_ALIGN(base + brk_rnd());
58c5fc13
MT
4301-
4302- if (ret < mm->brk)
4303- return mm->brk;
4304-
4305- return ret;
4306-}
4307-
4308-unsigned long randomize_et_dyn(unsigned long base)
4309-{
4310- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4311-
4312- if (ret < base)
4313- return base;
4314-
4315- return ret;
4316-}
5e856224 4317diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
c6e2a6c8 4318index 8d8e028..c2aeb50 100644
5e856224
MT
4319--- a/arch/powerpc/kernel/ptrace.c
4320+++ b/arch/powerpc/kernel/ptrace.c
4321@@ -1702,6 +1702,10 @@ long arch_ptrace(struct task_struct *child, long request,
4322 return ret;
4323 }
4324
4325+#ifdef CONFIG_GRKERNSEC_SETXID
4326+extern void gr_delayed_cred_worker(void);
4327+#endif
4328+
4329 /*
4330 * We must return the syscall number to actually look up in the table.
4331 * This can be -1L to skip running any syscall at all.
4332@@ -1712,6 +1716,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
4333
4334 secure_computing(regs->gpr[0]);
4335
4336+#ifdef CONFIG_GRKERNSEC_SETXID
4337+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4338+ gr_delayed_cred_worker();
4339+#endif
4340+
4341 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
4342 tracehook_report_syscall_entry(regs))
4343 /*
4344@@ -1746,6 +1755,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
4345 {
4346 int step;
4347
4348+#ifdef CONFIG_GRKERNSEC_SETXID
4349+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
4350+ gr_delayed_cred_worker();
4351+#endif
4352+
4353 audit_syscall_exit(regs);
4354
4355 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
fe2de317 4356diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
c6e2a6c8 4357index 45eb998..0cb36bc 100644
fe2de317
MT
4358--- a/arch/powerpc/kernel/signal_32.c
4359+++ b/arch/powerpc/kernel/signal_32.c
c6e2a6c8 4360@@ -861,7 +861,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
58c5fc13
MT
4361 /* Save user registers on the stack */
4362 frame = &rt_sf->uc.uc_mcontext;
4363 addr = frame;
4364- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
4365+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4366 if (save_user_regs(regs, frame, 0, 1))
4367 goto badframe;
4368 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
fe2de317 4369diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
c6e2a6c8 4370index 2692efd..6673d2e 100644
fe2de317
MT
4371--- a/arch/powerpc/kernel/signal_64.c
4372+++ b/arch/powerpc/kernel/signal_64.c
c6e2a6c8 4373@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
58c5fc13
MT
4374 current->thread.fpscr.val = 0;
4375
4376 /* Set up to return from userspace. */
4377- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
4378+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
4379 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
4380 } else {
4381 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
fe2de317 4382diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
c6e2a6c8 4383index 1589723..cefe690 100644
fe2de317
MT
4384--- a/arch/powerpc/kernel/traps.c
4385+++ b/arch/powerpc/kernel/traps.c
c6e2a6c8 4386@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
5e856224
MT
4387 return flags;
4388 }
15a11c5b
MT
4389
4390+extern void gr_handle_kernel_exploit(void);
4391+
5e856224
MT
4392 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
4393 int signr)
15a11c5b 4394 {
c6e2a6c8 4395@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
5e856224 4396 panic("Fatal exception in interrupt");
15a11c5b
MT
4397 if (panic_on_oops)
4398 panic("Fatal exception");
5e856224 4399+
15a11c5b
MT
4400+ gr_handle_kernel_exploit();
4401+
5e856224
MT
4402 do_exit(signr);
4403 }
15a11c5b 4404
fe2de317 4405diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
c6e2a6c8 4406index 9eb5b9b..e45498a 100644
fe2de317
MT
4407--- a/arch/powerpc/kernel/vdso.c
4408+++ b/arch/powerpc/kernel/vdso.c
c6e2a6c8 4409@@ -34,6 +34,7 @@
58c5fc13
MT
4410 #include <asm/firmware.h>
4411 #include <asm/vdso.h>
4412 #include <asm/vdso_datapage.h>
4413+#include <asm/mman.h>
4414
4415 #include "setup.h"
4416
c6e2a6c8 4417@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
58c5fc13
MT
4418 vdso_base = VDSO32_MBASE;
4419 #endif
4420
4421- current->mm->context.vdso_base = 0;
4422+ current->mm->context.vdso_base = ~0UL;
4423
4424 /* vDSO has a problem and was disabled, just don't "enable" it for the
4425 * process
c6e2a6c8 4426@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
58c5fc13 4427 vdso_base = get_unmapped_area(NULL, vdso_base,
ae4e228f
MT
4428 (vdso_pages << PAGE_SHIFT) +
4429 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
4430- 0, 0);
4431+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
58c5fc13
MT
4432 if (IS_ERR_VALUE(vdso_base)) {
4433 rc = vdso_base;
4434 goto fail_mmapsem;
fe2de317
MT
4435diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
4436index 5eea6f3..5d10396 100644
4437--- a/arch/powerpc/lib/usercopy_64.c
4438+++ b/arch/powerpc/lib/usercopy_64.c
58c5fc13
MT
4439@@ -9,22 +9,6 @@
4440 #include <linux/module.h>
4441 #include <asm/uaccess.h>
4442
4443-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4444-{
4445- if (likely(access_ok(VERIFY_READ, from, n)))
4446- n = __copy_from_user(to, from, n);
4447- else
4448- memset(to, 0, n);
4449- return n;
4450-}
4451-
4452-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4453-{
4454- if (likely(access_ok(VERIFY_WRITE, to, n)))
4455- n = __copy_to_user(to, from, n);
4456- return n;
4457-}
4458-
4459 unsigned long copy_in_user(void __user *to, const void __user *from,
4460 unsigned long n)
4461 {
fe2de317 4462@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
58c5fc13
MT
4463 return n;
4464 }
4465
4466-EXPORT_SYMBOL(copy_from_user);
4467-EXPORT_SYMBOL(copy_to_user);
4468 EXPORT_SYMBOL(copy_in_user);
4469
fe2de317 4470diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
c6e2a6c8 4471index 08ffcf5..a0ab912 100644
fe2de317
MT
4472--- a/arch/powerpc/mm/fault.c
4473+++ b/arch/powerpc/mm/fault.c
15a11c5b 4474@@ -32,6 +32,10 @@
ae4e228f 4475 #include <linux/perf_event.h>
bc901d79 4476 #include <linux/magic.h>
15a11c5b 4477 #include <linux/ratelimit.h>
58c5fc13
MT
4478+#include <linux/slab.h>
4479+#include <linux/pagemap.h>
4480+#include <linux/compiler.h>
4481+#include <linux/unistd.h>
4482
4483 #include <asm/firmware.h>
4484 #include <asm/page.h>
c6e2a6c8 4485@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
58c5fc13
MT
4486 }
4487 #endif
4488
4489+#ifdef CONFIG_PAX_PAGEEXEC
4490+/*
4491+ * PaX: decide what to do with offenders (regs->nip = fault address)
4492+ *
4493+ * returns 1 when task should be killed
4494+ */
4495+static int pax_handle_fetch_fault(struct pt_regs *regs)
4496+{
4497+ return 1;
4498+}
4499+
6e9df6a3 4500+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
4501+{
4502+ unsigned long i;
4503+
4504+ printk(KERN_ERR "PAX: bytes at PC: ");
4505+ for (i = 0; i < 5; i++) {
4506+ unsigned int c;
ae4e228f 4507+ if (get_user(c, (unsigned int __user *)pc+i))
58c5fc13
MT
4508+ printk(KERN_CONT "???????? ");
4509+ else
4510+ printk(KERN_CONT "%08x ", c);
4511+ }
4512+ printk("\n");
4513+}
4514+#endif
4515+
4516 /*
4517 * Check whether the instruction at regs->nip is a store using
4518 * an update addressing form which will update r1.
c6e2a6c8 4519@@ -215,7 +246,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
58c5fc13
MT
4520 * indicate errors in DSISR but can validly be set in SRR1.
4521 */
4522 if (trap == 0x400)
4523- error_code &= 0x48200000;
4524+ error_code &= 0x58200000;
4525 else
4526 is_write = error_code & DSISR_ISSTORE;
4527 #else
c6e2a6c8 4528@@ -366,7 +397,7 @@ good_area:
58c5fc13
MT
4529 * "undefined". Of those that can be set, this is the only
4530 * one which seems bad.
4531 */
4532- if (error_code & 0x10000000)
4533+ if (error_code & DSISR_GUARDED)
4534 /* Guarded storage error. */
4535 goto bad_area;
4536 #endif /* CONFIG_8xx */
c6e2a6c8 4537@@ -381,7 +412,7 @@ good_area:
58c5fc13
MT
4538 * processors use the same I/D cache coherency mechanism
4539 * as embedded.
4540 */
4541- if (error_code & DSISR_PROTFAULT)
4542+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
4543 goto bad_area;
4544 #endif /* CONFIG_PPC_STD_MMU */
4545
c6e2a6c8 4546@@ -463,6 +494,23 @@ bad_area:
58c5fc13
MT
4547 bad_area_nosemaphore:
4548 /* User mode accesses cause a SIGSEGV */
4549 if (user_mode(regs)) {
4550+
4551+#ifdef CONFIG_PAX_PAGEEXEC
4552+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4553+#ifdef CONFIG_PPC_STD_MMU
4554+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
4555+#else
4556+ if (is_exec && regs->nip == address) {
4557+#endif
4558+ switch (pax_handle_fetch_fault(regs)) {
4559+ }
4560+
4561+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
4562+ do_group_exit(SIGKILL);
4563+ }
4564+ }
4565+#endif
4566+
4567 _exception(SIGSEGV, regs, code, address);
4568 return 0;
4569 }
fe2de317 4570diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
5e856224 4571index 67a42ed..1c7210c 100644
fe2de317
MT
4572--- a/arch/powerpc/mm/mmap_64.c
4573+++ b/arch/powerpc/mm/mmap_64.c
5e856224 4574@@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
58c5fc13
MT
4575 */
4576 if (mmap_is_legacy()) {
4577 mm->mmap_base = TASK_UNMAPPED_BASE;
4578+
4579+#ifdef CONFIG_PAX_RANDMMAP
4580+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4581+ mm->mmap_base += mm->delta_mmap;
4582+#endif
4583+
4584 mm->get_unmapped_area = arch_get_unmapped_area;
4585 mm->unmap_area = arch_unmap_area;
4586 } else {
4587 mm->mmap_base = mmap_base();
4588+
4589+#ifdef CONFIG_PAX_RANDMMAP
4590+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4591+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4592+#endif
4593+
4594 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4595 mm->unmap_area = arch_unmap_area_topdown;
4596 }
fe2de317 4597diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
4c928ab7 4598index 73709f7..6b90313 100644
fe2de317
MT
4599--- a/arch/powerpc/mm/slice.c
4600+++ b/arch/powerpc/mm/slice.c
4601@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
57199397
MT
4602 if ((mm->task_size - len) < addr)
4603 return 0;
4604 vma = find_vma(mm, addr);
4605- return (!vma || (addr + len) <= vma->vm_start);
4606+ return check_heap_stack_gap(vma, addr, len);
4607 }
4608
6892158b
MT
4609 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
4610@@ -256,7 +256,7 @@ full_search:
57199397
MT
4611 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
4612 continue;
4613 }
4614- if (!vma || addr + len <= vma->vm_start) {
4615+ if (check_heap_stack_gap(vma, addr, len)) {
4616 /*
4617 * Remember the place where we stopped the search:
4618 */
fe2de317 4619@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
16454cff
MT
4620 }
4621 }
4622
4623- addr = mm->mmap_base;
4624- while (addr > len) {
4625+ if (mm->mmap_base < len)
4626+ addr = -ENOMEM;
4627+ else
4628+ addr = mm->mmap_base - len;
4629+
4630+ while (!IS_ERR_VALUE(addr)) {
4631 /* Go down by chunk size */
4632- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
4633+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
4634
4635 /* Check for hit with different page size */
4636 mask = slice_range_to_mask(addr, len);
fe2de317 4637@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
57199397
MT
4638 * return with success:
4639 */
4640 vma = find_vma(mm, addr);
4641- if (!vma || (addr + len) <= vma->vm_start) {
4642+ if (check_heap_stack_gap(vma, addr, len)) {
4643 /* remember the address as a hint for next time */
4644 if (use_cache)
4645 mm->free_area_cache = addr;
fe2de317 4646@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
16454cff
MT
4647 mm->cached_hole_size = vma->vm_start - addr;
4648
4649 /* try just below the current vma->vm_start */
4650- addr = vma->vm_start;
4651+ addr = skip_heap_stack_gap(vma, len);
4652 }
4653
4654 /*
fe2de317 4655@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
58c5fc13
MT
4656 if (fixed && addr > (mm->task_size - len))
4657 return -EINVAL;
4658
4659+#ifdef CONFIG_PAX_RANDMMAP
4660+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
4661+ addr = 0;
4662+#endif
4663+
4664 /* If hint, make sure it matches our alignment restrictions */
4665 if (!fixed && addr) {
4666 addr = _ALIGN_UP(addr, 1ul << pshift);
4c928ab7 4667diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
c6e2a6c8 4668index 748347b..81bc6c7 100644
4c928ab7
MT
4669--- a/arch/s390/include/asm/atomic.h
4670+++ b/arch/s390/include/asm/atomic.h
4671@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
4672 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
4673 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4674
4675+#define atomic64_read_unchecked(v) atomic64_read(v)
4676+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4677+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4678+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4679+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4680+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4681+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4682+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4683+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4684+
4685 #define smp_mb__before_atomic_dec() smp_mb()
4686 #define smp_mb__after_atomic_dec() smp_mb()
4687 #define smp_mb__before_atomic_inc() smp_mb()
4688diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
4689index 2a30d5a..5e5586f 100644
4690--- a/arch/s390/include/asm/cache.h
4691+++ b/arch/s390/include/asm/cache.h
4692@@ -11,8 +11,10 @@
4693 #ifndef __ARCH_S390_CACHE_H
4694 #define __ARCH_S390_CACHE_H
4695
4696-#define L1_CACHE_BYTES 256
4697+#include <linux/const.h>
4698+
4699 #define L1_CACHE_SHIFT 8
4700+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4701 #define NET_SKB_PAD 32
4702
4703 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
fe2de317 4704diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
c6e2a6c8 4705index c4ee39f..352881b 100644
fe2de317
MT
4706--- a/arch/s390/include/asm/elf.h
4707+++ b/arch/s390/include/asm/elf.h
c6e2a6c8 4708@@ -161,8 +161,14 @@ extern unsigned int vdso_enabled;
16454cff 4709 the loader. We need to make sure that it is out of the way of the program
ae4e228f 4710 that it will "exec", and that there is sufficient room for the brk. */
58c5fc13 4711
16454cff
MT
4712-extern unsigned long randomize_et_dyn(unsigned long base);
4713-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
4714+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
4715+
ae4e228f
MT
4716+#ifdef CONFIG_PAX_ASLR
4717+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
58c5fc13 4718+
4c928ab7
MT
4719+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
4720+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
ae4e228f 4721+#endif
16454cff 4722
ae4e228f
MT
4723 /* This yields a mask that user programs can use to figure out what
4724 instruction set this CPU supports. */
c6e2a6c8 4725@@ -210,7 +216,4 @@ struct linux_binprm;
16454cff
MT
4726 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
4727 int arch_setup_additional_pages(struct linux_binprm *, int);
4728
4729-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
4730-#define arch_randomize_brk arch_randomize_brk
4731-
4732 #endif
c6e2a6c8
MT
4733diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
4734index c4a93d6..4d2a9b4 100644
4735--- a/arch/s390/include/asm/exec.h
4736+++ b/arch/s390/include/asm/exec.h
4737@@ -7,6 +7,6 @@
4738 #ifndef __ASM_EXEC_H
4739 #define __ASM_EXEC_H
58c5fc13 4740
16454cff
MT
4741-extern unsigned long arch_align_stack(unsigned long sp);
4742+#define arch_align_stack(x) ((x) & ~0xfUL)
4743
c6e2a6c8 4744 #endif /* __ASM_EXEC_H */
fe2de317 4745diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
c1e3898a 4746index 8f2cada..43072c1 100644
fe2de317
MT
4747--- a/arch/s390/include/asm/uaccess.h
4748+++ b/arch/s390/include/asm/uaccess.h
c6e2a6c8 4749@@ -236,6 +236,10 @@ static inline unsigned long __must_check
58c5fc13
MT
4750 copy_to_user(void __user *to, const void *from, unsigned long n)
4751 {
4752 might_fault();
4753+
4754+ if ((long)n < 0)
4755+ return n;
4756+
4757 if (access_ok(VERIFY_WRITE, to, n))
4758 n = __copy_to_user(to, from, n);
4759 return n;
c6e2a6c8 4760@@ -261,6 +265,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
58c5fc13
MT
4761 static inline unsigned long __must_check
4762 __copy_from_user(void *to, const void __user *from, unsigned long n)
4763 {
4764+ if ((long)n < 0)
4765+ return n;
4766+
4767 if (__builtin_constant_p(n) && (n <= 256))
4768 return uaccess.copy_from_user_small(n, from, to);
4769 else
c1e3898a
MT
4770@@ -292,10 +299,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
4771 static inline unsigned long __must_check
4772 copy_from_user(void *to, const void __user *from, unsigned long n)
4773 {
4774- unsigned int sz = __compiletime_object_size(to);
4775+ size_t sz = __compiletime_object_size(to);
df50ba0c 4776
58c5fc13 4777 might_fault();
c1e3898a 4778- if (unlikely(sz != -1 && sz < n)) {
58c5fc13
MT
4779+
4780+ if ((long)n < 0)
4781+ return n;
4782+
c1e3898a 4783+ if (unlikely(sz != (size_t)-1 && sz < n)) {
df50ba0c
MT
4784 copy_from_user_overflow();
4785 return n;
c1e3898a 4786 }
fe2de317
MT
4787diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
4788index dfcb343..eda788a 100644
4789--- a/arch/s390/kernel/module.c
4790+++ b/arch/s390/kernel/module.c
4791@@ -161,11 +161,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
58c5fc13
MT
4792
4793 /* Increase core size by size of got & plt and set start
4794 offsets for got and plt. */
4795- me->core_size = ALIGN(me->core_size, 4);
4796- me->arch.got_offset = me->core_size;
4797- me->core_size += me->arch.got_size;
4798- me->arch.plt_offset = me->core_size;
4799- me->core_size += me->arch.plt_size;
4800+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
4801+ me->arch.got_offset = me->core_size_rw;
4802+ me->core_size_rw += me->arch.got_size;
4803+ me->arch.plt_offset = me->core_size_rx;
4804+ me->core_size_rx += me->arch.plt_size;
4805 return 0;
4806 }
4807
fe2de317 4808@@ -242,7 +242,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
4809 if (info->got_initialized == 0) {
4810 Elf_Addr *gotent;
4811
4812- gotent = me->module_core + me->arch.got_offset +
4813+ gotent = me->module_core_rw + me->arch.got_offset +
4814 info->got_offset;
4815 *gotent = val;
4816 info->got_initialized = 1;
fe2de317 4817@@ -266,7 +266,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
4818 else if (r_type == R_390_GOTENT ||
4819 r_type == R_390_GOTPLTENT)
4820 *(unsigned int *) loc =
4821- (val + (Elf_Addr) me->module_core - loc) >> 1;
4822+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
4823 else if (r_type == R_390_GOT64 ||
4824 r_type == R_390_GOTPLT64)
4825 *(unsigned long *) loc = val;
fe2de317 4826@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
4827 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
4828 if (info->plt_initialized == 0) {
4829 unsigned int *ip;
4830- ip = me->module_core + me->arch.plt_offset +
4831+ ip = me->module_core_rx + me->arch.plt_offset +
4832 info->plt_offset;
4833 #ifndef CONFIG_64BIT
4834 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
fe2de317 4835@@ -305,7 +305,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
4836 val - loc + 0xffffUL < 0x1ffffeUL) ||
4837 (r_type == R_390_PLT32DBL &&
4838 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
4839- val = (Elf_Addr) me->module_core +
4840+ val = (Elf_Addr) me->module_core_rx +
4841 me->arch.plt_offset +
4842 info->plt_offset;
4843 val += rela->r_addend - loc;
fe2de317 4844@@ -327,7 +327,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
4845 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
4846 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
4847 val = val + rela->r_addend -
4848- ((Elf_Addr) me->module_core + me->arch.got_offset);
4849+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
4850 if (r_type == R_390_GOTOFF16)
4851 *(unsigned short *) loc = val;
4852 else if (r_type == R_390_GOTOFF32)
fe2de317 4853@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
58c5fc13
MT
4854 break;
4855 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
4856 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
4857- val = (Elf_Addr) me->module_core + me->arch.got_offset +
4858+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
4859 rela->r_addend - loc;
4860 if (r_type == R_390_GOTPC)
4861 *(unsigned int *) loc = val;
fe2de317 4862diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
c6e2a6c8 4863index 60055ce..ee4b252 100644
fe2de317
MT
4864--- a/arch/s390/kernel/process.c
4865+++ b/arch/s390/kernel/process.c
c6e2a6c8 4866@@ -316,39 +316,3 @@ unsigned long get_wchan(struct task_struct *p)
16454cff
MT
4867 }
4868 return 0;
4869 }
4870-
4871-unsigned long arch_align_stack(unsigned long sp)
4872-{
4873- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
4874- sp -= get_random_int() & ~PAGE_MASK;
4875- return sp & ~0xf;
4876-}
4877-
4878-static inline unsigned long brk_rnd(void)
4879-{
4880- /* 8MB for 32bit, 1GB for 64bit */
4881- if (is_32bit_task())
4882- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
4883- else
4884- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
4885-}
4886-
4887-unsigned long arch_randomize_brk(struct mm_struct *mm)
4888-{
4889- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
4890-
4891- if (ret < mm->brk)
4892- return mm->brk;
4893- return ret;
4894-}
4895-
4896-unsigned long randomize_et_dyn(unsigned long base)
4897-{
4898- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
4899-
4900- if (!(current->flags & PF_RANDOMIZE))
4901- return base;
4902- if (ret < base)
4903- return base;
4904- return ret;
4905-}
fe2de317 4906diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
c6e2a6c8 4907index 2857c48..d047481 100644
fe2de317
MT
4908--- a/arch/s390/mm/mmap.c
4909+++ b/arch/s390/mm/mmap.c
4c928ab7 4910@@ -92,10 +92,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
ae4e228f
MT
4911 */
4912 if (mmap_is_legacy()) {
4913 mm->mmap_base = TASK_UNMAPPED_BASE;
4914+
4915+#ifdef CONFIG_PAX_RANDMMAP
4916+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4917+ mm->mmap_base += mm->delta_mmap;
4918+#endif
4919+
4920 mm->get_unmapped_area = arch_get_unmapped_area;
4921 mm->unmap_area = arch_unmap_area;
4922 } else {
4923 mm->mmap_base = mmap_base();
4924+
4925+#ifdef CONFIG_PAX_RANDMMAP
4926+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4927+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4928+#endif
4929+
4930 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4931 mm->unmap_area = arch_unmap_area_topdown;
4932 }
c6e2a6c8 4933@@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
ae4e228f
MT
4934 */
4935 if (mmap_is_legacy()) {
4936 mm->mmap_base = TASK_UNMAPPED_BASE;
4937+
4938+#ifdef CONFIG_PAX_RANDMMAP
4939+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4940+ mm->mmap_base += mm->delta_mmap;
4941+#endif
4942+
4943 mm->get_unmapped_area = s390_get_unmapped_area;
4944 mm->unmap_area = arch_unmap_area;
4945 } else {
4946 mm->mmap_base = mmap_base();
4947+
4948+#ifdef CONFIG_PAX_RANDMMAP
4949+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4950+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4951+#endif
4952+
4953 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4954 mm->unmap_area = arch_unmap_area_topdown;
4955 }
4c928ab7
MT
4956diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
4957index ae3d59f..f65f075 100644
4958--- a/arch/score/include/asm/cache.h
4959+++ b/arch/score/include/asm/cache.h
4960@@ -1,7 +1,9 @@
4961 #ifndef _ASM_SCORE_CACHE_H
4962 #define _ASM_SCORE_CACHE_H
4963
4964+#include <linux/const.h>
4965+
4966 #define L1_CACHE_SHIFT 4
4967-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4968+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4969
4970 #endif /* _ASM_SCORE_CACHE_H */
c6e2a6c8
MT
4971diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
4972index f9f3cd5..58ff438 100644
4973--- a/arch/score/include/asm/exec.h
4974+++ b/arch/score/include/asm/exec.h
4975@@ -1,6 +1,6 @@
4976 #ifndef _ASM_SCORE_EXEC_H
4977 #define _ASM_SCORE_EXEC_H
4978
bc901d79
MT
4979-extern unsigned long arch_align_stack(unsigned long sp);
4980+#define arch_align_stack(x) (x)
4981
c6e2a6c8 4982 #endif /* _ASM_SCORE_EXEC_H */
fe2de317 4983diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
c6e2a6c8 4984index 2707023..1c2a3b7 100644
fe2de317
MT
4985--- a/arch/score/kernel/process.c
4986+++ b/arch/score/kernel/process.c
c6e2a6c8 4987@@ -159,8 +159,3 @@ unsigned long get_wchan(struct task_struct *task)
bc901d79
MT
4988
4989 return task_pt_regs(task)->cp0_epc;
4990 }
4991-
4992-unsigned long arch_align_stack(unsigned long sp)
4993-{
4994- return sp;
4995-}
4c928ab7
MT
4996diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
4997index ef9e555..331bd29 100644
4998--- a/arch/sh/include/asm/cache.h
4999+++ b/arch/sh/include/asm/cache.h
5000@@ -9,10 +9,11 @@
5001 #define __ASM_SH_CACHE_H
5002 #ifdef __KERNEL__
5003
5004+#include <linux/const.h>
5005 #include <linux/init.h>
5006 #include <cpu/cache.h>
5007
5008-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5009+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5010
5011 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
5012
fe2de317
MT
5013diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
5014index afeb710..d1d1289 100644
5015--- a/arch/sh/mm/mmap.c
5016+++ b/arch/sh/mm/mmap.c
5017@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
57199397
MT
5018 addr = PAGE_ALIGN(addr);
5019
5020 vma = find_vma(mm, addr);
5021- if (TASK_SIZE - len >= addr &&
5022- (!vma || addr + len <= vma->vm_start))
5023+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5024 return addr;
5025 }
efbe55a5 5026
57199397
MT
5027@@ -106,7 +105,7 @@ full_search:
5028 }
5029 return -ENOMEM;
5030 }
5031- if (likely(!vma || addr + len <= vma->vm_start)) {
5032+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5033 /*
5034 * Remember the place where we stopped the search:
5035 */
fe2de317 5036@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
5037 addr = PAGE_ALIGN(addr);
5038
5039 vma = find_vma(mm, addr);
5040- if (TASK_SIZE - len >= addr &&
5041- (!vma || addr + len <= vma->vm_start))
5042+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
5043 return addr;
5044 }
5045
fe2de317 5046@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
5047 /* make sure it can fit in the remaining address space */
5048 if (likely(addr > len)) {
5049 vma = find_vma(mm, addr-len);
5050- if (!vma || addr <= vma->vm_start) {
5051+ if (check_heap_stack_gap(vma, addr - len, len)) {
5052 /* remember the address as a hint for next time */
5053 return (mm->free_area_cache = addr-len);
5054 }
fe2de317 5055@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
5056 if (unlikely(mm->mmap_base < len))
5057 goto bottomup;
5058
5059- addr = mm->mmap_base-len;
5060- if (do_colour_align)
5061- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5062+ addr = mm->mmap_base - len;
5063
5064 do {
5065+ if (do_colour_align)
5066+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5067 /*
5068 * Lookup failure means no vma is above this address,
5069 * else if new region fits below vma->vm_start,
57199397
MT
5070 * return with success:
5071 */
5072 vma = find_vma(mm, addr);
5073- if (likely(!vma || addr+len <= vma->vm_start)) {
5074+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5075 /* remember the address as a hint for next time */
5076 return (mm->free_area_cache = addr);
5077 }
fe2de317 5078@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
5079 mm->cached_hole_size = vma->vm_start - addr;
5080
5081 /* try just below the current vma->vm_start */
5082- addr = vma->vm_start-len;
5083- if (do_colour_align)
5084- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5085- } while (likely(len < vma->vm_start));
5086+ addr = skip_heap_stack_gap(vma, len);
5087+ } while (!IS_ERR_VALUE(addr));
5088
5089 bottomup:
5090 /*
fe2de317 5091diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4c928ab7 5092index eddcfb3..b117d90 100644
fe2de317
MT
5093--- a/arch/sparc/Makefile
5094+++ b/arch/sparc/Makefile
5095@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
5096 # Export what is needed by arch/sparc/boot/Makefile
5097 export VMLINUX_INIT VMLINUX_MAIN
5098 VMLINUX_INIT := $(head-y) $(init-y)
5099-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5100+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5101 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5102 VMLINUX_MAIN += $(drivers-y) $(net-y)
5103
5104diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
c6e2a6c8 5105index ce35a1c..2e7b8f9 100644
fe2de317
MT
5106--- a/arch/sparc/include/asm/atomic_64.h
5107+++ b/arch/sparc/include/asm/atomic_64.h
57199397 5108@@ -14,18 +14,40 @@
58c5fc13
MT
5109 #define ATOMIC64_INIT(i) { (i) }
5110
57199397 5111 #define atomic_read(v) (*(volatile int *)&(v)->counter)
ae4e228f
MT
5112+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5113+{
5114+ return v->counter;
5115+}
57199397 5116 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
ae4e228f
MT
5117+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5118+{
5119+ return v->counter;
5120+}
58c5fc13
MT
5121
5122 #define atomic_set(v, i) (((v)->counter) = i)
ae4e228f
MT
5123+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5124+{
5125+ v->counter = i;
5126+}
58c5fc13 5127 #define atomic64_set(v, i) (((v)->counter) = i)
ae4e228f
MT
5128+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5129+{
5130+ v->counter = i;
5131+}
58c5fc13
MT
5132
5133 extern void atomic_add(int, atomic_t *);
5134+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
57199397
MT
5135 extern void atomic64_add(long, atomic64_t *);
5136+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
58c5fc13
MT
5137 extern void atomic_sub(int, atomic_t *);
5138+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
57199397
MT
5139 extern void atomic64_sub(long, atomic64_t *);
5140+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
58c5fc13
MT
5141
5142 extern int atomic_add_ret(int, atomic_t *);
57199397
MT
5143+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
5144 extern long atomic64_add_ret(long, atomic64_t *);
5145+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
ae4e228f 5146 extern int atomic_sub_ret(int, atomic_t *);
57199397 5147 extern long atomic64_sub_ret(long, atomic64_t *);
ae4e228f 5148
fe2de317 5149@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
57199397 5150 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
ae4e228f
MT
5151
5152 #define atomic_inc_return(v) atomic_add_ret(1, v)
57199397
MT
5153+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5154+{
5155+ return atomic_add_ret_unchecked(1, v);
5156+}
ae4e228f 5157 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
57199397
MT
5158+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
5159+{
5160+ return atomic64_add_ret_unchecked(1, v);
5161+}
ae4e228f
MT
5162
5163 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
5164 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
6892158b
MT
5165
5166 #define atomic_add_return(i, v) atomic_add_ret(i, v)
5167+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5168+{
5169+ return atomic_add_ret_unchecked(i, v);
5170+}
5171 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
15a11c5b
MT
5172+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
5173+{
5174+ return atomic64_add_ret_unchecked(i, v);
5175+}
6892158b
MT
5176
5177 /*
15a11c5b 5178 * atomic_inc_and_test - increment and test
fe2de317 5179@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8308f9c9
MT
5180 * other cases.
5181 */
5182 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
15a11c5b
MT
5183+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5184+{
5185+ return atomic_inc_return_unchecked(v) == 0;
5186+}
8308f9c9
MT
5187 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
5188
5189 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
fe2de317 5190@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
58c5fc13
MT
5191 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
5192
5193 #define atomic_inc(v) atomic_add(1, v)
ae4e228f
MT
5194+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
5195+{
5196+ atomic_add_unchecked(1, v);
5197+}
58c5fc13 5198 #define atomic64_inc(v) atomic64_add(1, v)
ae4e228f
MT
5199+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
5200+{
5201+ atomic64_add_unchecked(1, v);
5202+}
58c5fc13
MT
5203
5204 #define atomic_dec(v) atomic_sub(1, v)
df50ba0c
MT
5205+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
5206+{
5207+ atomic_sub_unchecked(1, v);
5208+}
ae4e228f 5209 #define atomic64_dec(v) atomic64_sub(1, v)
df50ba0c
MT
5210+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
5211+{
5212+ atomic64_sub_unchecked(1, v);
5213+}
5214
5215 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
5216 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
8308f9c9
MT
5217
5218 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
15a11c5b
MT
5219+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
5220+{
5221+ return cmpxchg(&v->counter, old, new);
5222+}
8308f9c9 5223 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
15a11c5b
MT
5224+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5225+{
5226+ return xchg(&v->counter, new);
5227+}
58c5fc13 5228
6e9df6a3 5229 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
58c5fc13
MT
5230 {
5231- int c, old;
5232+ int c, old, new;
5233 c = atomic_read(v);
5234 for (;;) {
5235- if (unlikely(c == (u)))
5236+ if (unlikely(c == u))
5237 break;
5238- old = atomic_cmpxchg((v), c, c + (a));
5239+
5240+ asm volatile("addcc %2, %0, %0\n"
5241+
5242+#ifdef CONFIG_PAX_REFCOUNT
5243+ "tvs %%icc, 6\n"
5244+#endif
5245+
5246+ : "=r" (new)
5247+ : "0" (c), "ir" (a)
5248+ : "cc");
5249+
5250+ old = atomic_cmpxchg(v, c, new);
5251 if (likely(old == c))
5252 break;
5253 c = old;
c6e2a6c8 5254@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
15a11c5b
MT
5255 #define atomic64_cmpxchg(v, o, n) \
5256 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
5257 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
5258+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
5259+{
5260+ return xchg(&v->counter, new);
5261+}
58c5fc13 5262
57199397 5263 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
58c5fc13
MT
5264 {
5265- long c, old;
5266+ long c, old, new;
5267 c = atomic64_read(v);
5268 for (;;) {
5269- if (unlikely(c == (u)))
5270+ if (unlikely(c == u))
5271 break;
5272- old = atomic64_cmpxchg((v), c, c + (a));
5273+
5274+ asm volatile("addcc %2, %0, %0\n"
5275+
5276+#ifdef CONFIG_PAX_REFCOUNT
5277+ "tvs %%xcc, 6\n"
5278+#endif
5279+
5280+ : "=r" (new)
5281+ : "0" (c), "ir" (a)
5282+ : "cc");
5283+
5284+ old = atomic64_cmpxchg(v, c, new);
5285 if (likely(old == c))
5286 break;
5287 c = old;
5288 }
5289- return c != (u);
5290+ return c != u;
5291 }
5292
5293 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
fe2de317 5294diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4c928ab7 5295index 69358b5..9d0d492 100644
fe2de317
MT
5296--- a/arch/sparc/include/asm/cache.h
5297+++ b/arch/sparc/include/asm/cache.h
4c928ab7
MT
5298@@ -7,10 +7,12 @@
5299 #ifndef _SPARC_CACHE_H
5300 #define _SPARC_CACHE_H
5301
5302+#include <linux/const.h>
5303+
66a7e928
MT
5304 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
5305
5306 #define L1_CACHE_SHIFT 5
5307-#define L1_CACHE_BYTES 32
4c928ab7 5308+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
66a7e928
MT
5309
5310 #ifdef CONFIG_SPARC32
5311 #define SMP_CACHE_BYTES_SHIFT 5
fe2de317
MT
5312diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
5313index 4269ca6..e3da77f 100644
5314--- a/arch/sparc/include/asm/elf_32.h
5315+++ b/arch/sparc/include/asm/elf_32.h
ae4e228f 5316@@ -114,6 +114,13 @@ typedef struct {
58c5fc13
MT
5317
5318 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
5319
5320+#ifdef CONFIG_PAX_ASLR
5321+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5322+
5323+#define PAX_DELTA_MMAP_LEN 16
5324+#define PAX_DELTA_STACK_LEN 16
5325+#endif
5326+
5327 /* This yields a mask that user programs can use to figure out what
5328 instruction set this cpu supports. This can NOT be done in userspace
5329 on Sparc. */
fe2de317
MT
5330diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
5331index 7df8b7f..4946269 100644
5332--- a/arch/sparc/include/asm/elf_64.h
5333+++ b/arch/sparc/include/asm/elf_64.h
15a11c5b 5334@@ -180,6 +180,13 @@ typedef struct {
58c5fc13
MT
5335 #define ELF_ET_DYN_BASE 0x0000010000000000UL
5336 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
5337
5338+#ifdef CONFIG_PAX_ASLR
5339+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
5340+
ae4e228f
MT
5341+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
5342+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
58c5fc13 5343+#endif
15a11c5b
MT
5344+
5345 extern unsigned long sparc64_elf_hwcap;
5346 #define ELF_HWCAP sparc64_elf_hwcap
58c5fc13 5347
5e856224
MT
5348diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
5349index ca2b344..c6084f89 100644
5350--- a/arch/sparc/include/asm/pgalloc_32.h
5351+++ b/arch/sparc/include/asm/pgalloc_32.h
5352@@ -37,6 +37,7 @@ BTFIXUPDEF_CALL(void, free_pgd_fast, pgd_t *)
5353 BTFIXUPDEF_CALL(void, pgd_set, pgd_t *, pmd_t *)
5354 #define pgd_set(pgdp,pmdp) BTFIXUP_CALL(pgd_set)(pgdp,pmdp)
5355 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
5356+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
4c928ab7 5357
5e856224
MT
5358 BTFIXUPDEF_CALL(pmd_t *, pmd_alloc_one, struct mm_struct *, unsigned long)
5359 #define pmd_alloc_one(mm, address) BTFIXUP_CALL(pmd_alloc_one)(mm, address)
5360diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
5361index 40b2d7a..22a665b 100644
5362--- a/arch/sparc/include/asm/pgalloc_64.h
5363+++ b/arch/sparc/include/asm/pgalloc_64.h
5364@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
5365 }
4c928ab7 5366
5e856224
MT
5367 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
5368+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
5369
5370 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
5371 {
fe2de317 5372diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
c6e2a6c8 5373index 3d71018..48a11c5 100644
fe2de317
MT
5374--- a/arch/sparc/include/asm/pgtable_32.h
5375+++ b/arch/sparc/include/asm/pgtable_32.h
15a11c5b 5376@@ -45,6 +45,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
58c5fc13
MT
5377 BTFIXUPDEF_INT(page_none)
5378 BTFIXUPDEF_INT(page_copy)
5379 BTFIXUPDEF_INT(page_readonly)
5380+
5381+#ifdef CONFIG_PAX_PAGEEXEC
5382+BTFIXUPDEF_INT(page_shared_noexec)
5383+BTFIXUPDEF_INT(page_copy_noexec)
5384+BTFIXUPDEF_INT(page_readonly_noexec)
5385+#endif
5386+
5387 BTFIXUPDEF_INT(page_kernel)
5388
5389 #define PMD_SHIFT SUN4C_PMD_SHIFT
15a11c5b 5390@@ -66,6 +73,16 @@ extern pgprot_t PAGE_SHARED;
58c5fc13
MT
5391 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
5392 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
5393
5394+#ifdef CONFIG_PAX_PAGEEXEC
5395+extern pgprot_t PAGE_SHARED_NOEXEC;
5396+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
5397+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
5398+#else
5399+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5400+# define PAGE_COPY_NOEXEC PAGE_COPY
5401+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5402+#endif
5403+
5404 extern unsigned long page_kernel;
5405
5406 #ifdef MODULE
fe2de317
MT
5407diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
5408index f6ae2b2..b03ffc7 100644
5409--- a/arch/sparc/include/asm/pgtsrmmu.h
5410+++ b/arch/sparc/include/asm/pgtsrmmu.h
58c5fc13
MT
5411@@ -115,6 +115,13 @@
5412 SRMMU_EXEC | SRMMU_REF)
5413 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
5414 SRMMU_EXEC | SRMMU_REF)
5415+
5416+#ifdef CONFIG_PAX_PAGEEXEC
5417+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
5418+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5419+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
5420+#endif
5421+
5422 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
5423 SRMMU_DIRTY | SRMMU_REF)
5424
fe2de317
MT
5425diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
5426index 9689176..63c18ea 100644
5427--- a/arch/sparc/include/asm/spinlock_64.h
5428+++ b/arch/sparc/include/asm/spinlock_64.h
5429@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
8308f9c9
MT
5430
5431 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
5432
5433-static void inline arch_read_lock(arch_rwlock_t *lock)
5434+static inline void arch_read_lock(arch_rwlock_t *lock)
5435 {
5436 unsigned long tmp1, tmp2;
5437
58c5fc13
MT
5438 __asm__ __volatile__ (
5439 "1: ldsw [%2], %0\n"
5440 " brlz,pn %0, 2f\n"
5441-"4: add %0, 1, %1\n"
5442+"4: addcc %0, 1, %1\n"
5443+
5444+#ifdef CONFIG_PAX_REFCOUNT
5445+" tvs %%icc, 6\n"
5446+#endif
5447+
5448 " cas [%2], %0, %1\n"
5449 " cmp %0, %1\n"
5450 " bne,pn %%icc, 1b\n"
fe2de317 5451@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
58c5fc13
MT
5452 " .previous"
5453 : "=&r" (tmp1), "=&r" (tmp2)
5454 : "r" (lock)
5455- : "memory");
5456+ : "memory", "cc");
5457 }
5458
8308f9c9
MT
5459-static int inline arch_read_trylock(arch_rwlock_t *lock)
5460+static inline int arch_read_trylock(arch_rwlock_t *lock)
5461 {
5462 int tmp1, tmp2;
5463
fe2de317 5464@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
58c5fc13
MT
5465 "1: ldsw [%2], %0\n"
5466 " brlz,a,pn %0, 2f\n"
5467 " mov 0, %0\n"
5468-" add %0, 1, %1\n"
5469+" addcc %0, 1, %1\n"
5470+
5471+#ifdef CONFIG_PAX_REFCOUNT
5472+" tvs %%icc, 6\n"
5473+#endif
5474+
5475 " cas [%2], %0, %1\n"
5476 " cmp %0, %1\n"
5477 " bne,pn %%icc, 1b\n"
fe2de317 5478@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8308f9c9
MT
5479 return tmp1;
5480 }
5481
5482-static void inline arch_read_unlock(arch_rwlock_t *lock)
5483+static inline void arch_read_unlock(arch_rwlock_t *lock)
5484 {
5485 unsigned long tmp1, tmp2;
58c5fc13
MT
5486
5487 __asm__ __volatile__(
5488 "1: lduw [%2], %0\n"
5489-" sub %0, 1, %1\n"
5490+" subcc %0, 1, %1\n"
5491+
5492+#ifdef CONFIG_PAX_REFCOUNT
ae4e228f 5493+" tvs %%icc, 6\n"
58c5fc13
MT
5494+#endif
5495+
5496 " cas [%2], %0, %1\n"
5497 " cmp %0, %1\n"
5498 " bne,pn %%xcc, 1b\n"
fe2de317 5499@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8308f9c9
MT
5500 : "memory");
5501 }
5502
5503-static void inline arch_write_lock(arch_rwlock_t *lock)
5504+static inline void arch_write_lock(arch_rwlock_t *lock)
5505 {
5506 unsigned long mask, tmp1, tmp2;
5507
fe2de317 5508@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8308f9c9
MT
5509 : "memory");
5510 }
5511
5512-static void inline arch_write_unlock(arch_rwlock_t *lock)
5513+static inline void arch_write_unlock(arch_rwlock_t *lock)
5514 {
5515 __asm__ __volatile__(
5516 " stw %%g0, [%0]"
fe2de317 5517@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8308f9c9
MT
5518 : "memory");
5519 }
5520
5521-static int inline arch_write_trylock(arch_rwlock_t *lock)
5522+static inline int arch_write_trylock(arch_rwlock_t *lock)
5523 {
5524 unsigned long mask, tmp1, tmp2, result;
5525
fe2de317 5526diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
5e856224 5527index c2a1080..21ed218 100644
fe2de317
MT
5528--- a/arch/sparc/include/asm/thread_info_32.h
5529+++ b/arch/sparc/include/asm/thread_info_32.h
15a11c5b
MT
5530@@ -50,6 +50,8 @@ struct thread_info {
5531 unsigned long w_saved;
5532
5533 struct restart_block restart_block;
5534+
5535+ unsigned long lowest_stack;
5536 };
5537
5538 /*
fe2de317 5539diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
5e856224 5540index 01d057f..13a7d2f 100644
fe2de317
MT
5541--- a/arch/sparc/include/asm/thread_info_64.h
5542+++ b/arch/sparc/include/asm/thread_info_64.h
15a11c5b
MT
5543@@ -63,6 +63,8 @@ struct thread_info {
5544 struct pt_regs *kern_una_regs;
5545 unsigned int kern_una_insn;
5546
5547+ unsigned long lowest_stack;
5548+
5549 unsigned long fpregs[0] __attribute__ ((aligned(64)));
5550 };
5551
5e856224
MT
5552@@ -214,10 +216,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
5553 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
5554 /* flag bit 6 is available */
5555 #define TIF_32BIT 7 /* 32-bit binary */
5556-/* flag bit 8 is available */
5557+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
5558 #define TIF_SECCOMP 9 /* secure computing */
5559 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
5560 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
5561+
5562 /* NOTE: Thread flags >= 12 should be ones we have no interest
5563 * in using in assembly, else we can't use the mask as
5564 * an immediate value in instructions such as andcc.
5565@@ -236,12 +239,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
5566 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
5567 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
5568 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
5569+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5570
5571 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
5572 _TIF_DO_NOTIFY_RESUME_MASK | \
5573 _TIF_NEED_RESCHED)
5574 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
5575
5576+#define _TIF_WORK_SYSCALL \
5577+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
5578+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
5579+
5580+
5581 /*
5582 * Thread-synchronous status.
5583 *
fe2de317
MT
5584diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
5585index e88fbe5..96b0ce5 100644
5586--- a/arch/sparc/include/asm/uaccess.h
5587+++ b/arch/sparc/include/asm/uaccess.h
5588@@ -1,5 +1,13 @@
5589 #ifndef ___ASM_SPARC_UACCESS_H
5590 #define ___ASM_SPARC_UACCESS_H
5591+
5592+#ifdef __KERNEL__
5593+#ifndef __ASSEMBLY__
5594+#include <linux/types.h>
5595+extern void check_object_size(const void *ptr, unsigned long n, bool to);
5596+#endif
5597+#endif
5598+
5599 #if defined(__sparc__) && defined(__arch64__)
5600 #include <asm/uaccess_64.h>
5601 #else
5602diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
5603index 8303ac4..07f333d 100644
5604--- a/arch/sparc/include/asm/uaccess_32.h
5605+++ b/arch/sparc/include/asm/uaccess_32.h
5606@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
58c5fc13
MT
5607
5608 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
5609 {
5610- if (n && __access_ok((unsigned long) to, n))
5611+ if ((long)n < 0)
5612+ return n;
5613+
5614+ if (n && __access_ok((unsigned long) to, n)) {
5615+ if (!__builtin_constant_p(n))
5616+ check_object_size(from, n, true);
5617 return __copy_user(to, (__force void __user *) from, n);
5618- else
5619+ } else
5620 return n;
5621 }
5622
5623 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
5624 {
5625+ if ((long)n < 0)
5626+ return n;
5627+
5628+ if (!__builtin_constant_p(n))
5629+ check_object_size(from, n, true);
5630+
5631 return __copy_user(to, (__force void __user *) from, n);
5632 }
5633
6892158b 5634 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
58c5fc13 5635 {
6892158b 5636- if (n && __access_ok((unsigned long) from, n))
58c5fc13
MT
5637+ if ((long)n < 0)
5638+ return n;
5639+
5640+ if (n && __access_ok((unsigned long) from, n)) {
5641+ if (!__builtin_constant_p(n))
5642+ check_object_size(to, n, false);
5643 return __copy_user((__force void __user *) to, from, n);
5644- else
5645+ } else
5646 return n;
5647 }
5648
5649 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
5650 {
5651+ if ((long)n < 0)
5652+ return n;
58c5fc13
MT
5653+
5654 return __copy_user((__force void __user *) to, from, n);
5655 }
5656
fe2de317 5657diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
c6e2a6c8 5658index a1091afb..380228e 100644
fe2de317
MT
5659--- a/arch/sparc/include/asm/uaccess_64.h
5660+++ b/arch/sparc/include/asm/uaccess_64.h
ae4e228f
MT
5661@@ -10,6 +10,7 @@
5662 #include <linux/compiler.h>
5663 #include <linux/string.h>
5664 #include <linux/thread_info.h>
5665+#include <linux/kernel.h>
5666 #include <asm/asi.h>
ae4e228f 5667 #include <asm/spitfire.h>
c6e2a6c8
MT
5668 #include <asm-generic/uaccess-unaligned.h>
5669@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
6892158b
MT
5670 static inline unsigned long __must_check
5671 copy_from_user(void *to, const void __user *from, unsigned long size)
5672 {
5673- unsigned long ret = ___copy_from_user(to, from, size);
5674+ unsigned long ret;
ae4e228f
MT
5675
5676+ if ((long)size < 0 || size > INT_MAX)
58c5fc13
MT
5677+ return size;
5678+
5679+ if (!__builtin_constant_p(size))
5680+ check_object_size(to, size, false);
5681+
6892158b
MT
5682+ ret = ___copy_from_user(to, from, size);
5683 if (unlikely(ret))
5684 ret = copy_from_user_fixup(to, from, size);
5685
c6e2a6c8 5686@@ -229,8 +237,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
58c5fc13
MT
5687 static inline unsigned long __must_check
5688 copy_to_user(void __user *to, const void *from, unsigned long size)
5689 {
5690- unsigned long ret = ___copy_to_user(to, from, size);
5691+ unsigned long ret;
fe2de317 5692
ae4e228f 5693+ if ((long)size < 0 || size > INT_MAX)
58c5fc13
MT
5694+ return size;
5695+
5696+ if (!__builtin_constant_p(size))
5697+ check_object_size(from, size, true);
fe2de317 5698+
ae4e228f 5699+ ret = ___copy_to_user(to, from, size);
58c5fc13
MT
5700 if (unlikely(ret))
5701 ret = copy_to_user_fixup(to, from, size);
ae4e228f 5702 return ret;
fe2de317
MT
5703diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
5704index cb85458..e063f17 100644
5705--- a/arch/sparc/kernel/Makefile
5706+++ b/arch/sparc/kernel/Makefile
57199397
MT
5707@@ -3,7 +3,7 @@
5708 #
5709
5710 asflags-y := -ansi
5711-ccflags-y := -Werror
5712+#ccflags-y := -Werror
5713
5714 extra-y := head_$(BITS).o
5715 extra-y += init_task.o
fe2de317 5716diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
c6e2a6c8 5717index efa0754..74b03fe 100644
fe2de317
MT
5718--- a/arch/sparc/kernel/process_32.c
5719+++ b/arch/sparc/kernel/process_32.c
c6e2a6c8 5720@@ -200,7 +200,7 @@ void __show_backtrace(unsigned long fp)
bc901d79
MT
5721 rw->ins[4], rw->ins[5],
5722 rw->ins[6],
5723 rw->ins[7]);
5724- printk("%pS\n", (void *) rw->ins[7]);
5725+ printk("%pA\n", (void *) rw->ins[7]);
5726 rw = (struct reg_window32 *) rw->ins[6];
5727 }
5728 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
c6e2a6c8 5729@@ -267,14 +267,14 @@ void show_regs(struct pt_regs *r)
bc901d79
MT
5730
5731 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
5732 r->psr, r->pc, r->npc, r->y, print_tainted());
5733- printk("PC: <%pS>\n", (void *) r->pc);
5734+ printk("PC: <%pA>\n", (void *) r->pc);
5735 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5736 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
5737 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
5738 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5739 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
5740 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
5741- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
5742+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
5743
5744 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
5745 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
c6e2a6c8 5746@@ -309,7 +309,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
bc901d79
MT
5747 rw = (struct reg_window32 *) fp;
5748 pc = rw->ins[7];
5749 printk("[%08lx : ", pc);
5750- printk("%pS ] ", (void *) pc);
5751+ printk("%pA ] ", (void *) pc);
5752 fp = rw->ins[6];
5753 } while (++count < 16);
5754 printk("\n");
fe2de317 5755diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
c6e2a6c8 5756index aff0c72..9067b39 100644
fe2de317
MT
5757--- a/arch/sparc/kernel/process_64.c
5758+++ b/arch/sparc/kernel/process_64.c
c6e2a6c8 5759@@ -179,14 +179,14 @@ static void show_regwindow(struct pt_regs *regs)
bc901d79
MT
5760 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
5761 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
5762 if (regs->tstate & TSTATE_PRIV)
5763- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
5764+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
5765 }
5766
5767 void show_regs(struct pt_regs *regs)
5768 {
5769 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5770 regs->tpc, regs->tnpc, regs->y, print_tainted());
5771- printk("TPC: <%pS>\n", (void *) regs->tpc);
5772+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5773 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5774 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5775 regs->u_regs[3]);
c6e2a6c8 5776@@ -199,7 +199,7 @@ void show_regs(struct pt_regs *regs)
bc901d79
MT
5777 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5778 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5779 regs->u_regs[15]);
5780- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5781+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5782 show_regwindow(regs);
5783 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
5784 }
c6e2a6c8 5785@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
bc901d79
MT
5786 ((tp && tp->task) ? tp->task->pid : -1));
5787
5788 if (gp->tstate & TSTATE_PRIV) {
5789- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5790+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5791 (void *) gp->tpc,
5792 (void *) gp->o7,
5793 (void *) gp->i7,
5e856224 5794diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
c6e2a6c8 5795index 6f97c07..b1300ec 100644
5e856224
MT
5796--- a/arch/sparc/kernel/ptrace_64.c
5797+++ b/arch/sparc/kernel/ptrace_64.c
c6e2a6c8 5798@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
5e856224
MT
5799 return ret;
5800 }
5801
5802+#ifdef CONFIG_GRKERNSEC_SETXID
5803+extern void gr_delayed_cred_worker(void);
5804+#endif
5805+
5806 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5807 {
5808 int ret = 0;
c6e2a6c8 5809@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5e856224
MT
5810 /* do the secure computing check first */
5811 secure_computing(regs->u_regs[UREG_G1]);
5812
5813+#ifdef CONFIG_GRKERNSEC_SETXID
5814+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5815+ gr_delayed_cred_worker();
5816+#endif
5817+
5818 if (test_thread_flag(TIF_SYSCALL_TRACE))
5819 ret = tracehook_report_syscall_entry(regs);
5820
c6e2a6c8 5821@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
5e856224
MT
5822
5823 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
5824 {
5825+#ifdef CONFIG_GRKERNSEC_SETXID
5826+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5827+ gr_delayed_cred_worker();
5828+#endif
5829+
5830 audit_syscall_exit(regs);
5831
5832 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
fe2de317
MT
5833diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5834index 42b282f..28ce9f2 100644
5835--- a/arch/sparc/kernel/sys_sparc_32.c
5836+++ b/arch/sparc/kernel/sys_sparc_32.c
5837@@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
58c5fc13
MT
5838 if (ARCH_SUN4C && len > 0x20000000)
5839 return -ENOMEM;
5840 if (!addr)
5841- addr = TASK_UNMAPPED_BASE;
5842+ addr = current->mm->mmap_base;
5843
5844 if (flags & MAP_SHARED)
5845 addr = COLOUR_ALIGN(addr);
fe2de317 5846@@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
5847 }
5848 if (TASK_SIZE - PAGE_SIZE - len < addr)
5849 return -ENOMEM;
5850- if (!vmm || addr + len <= vmm->vm_start)
5851+ if (check_heap_stack_gap(vmm, addr, len))
5852 return addr;
5853 addr = vmm->vm_end;
5854 if (flags & MAP_SHARED)
fe2de317 5855diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
c6e2a6c8 5856index 3ee51f1..2ba4913 100644
fe2de317
MT
5857--- a/arch/sparc/kernel/sys_sparc_64.c
5858+++ b/arch/sparc/kernel/sys_sparc_64.c
5859@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
58c5fc13
MT
5860 /* We do not accept a shared mapping if it would violate
5861 * cache aliasing constraints.
5862 */
5863- if ((flags & MAP_SHARED) &&
5864+ if ((filp || (flags & MAP_SHARED)) &&
5865 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5866 return -EINVAL;
5867 return addr;
fe2de317 5868@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
58c5fc13
MT
5869 if (filp || (flags & MAP_SHARED))
5870 do_color_align = 1;
5871
5872+#ifdef CONFIG_PAX_RANDMMAP
5873+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5874+#endif
5875+
5876 if (addr) {
5877 if (do_color_align)
5878 addr = COLOUR_ALIGN(addr, pgoff);
fe2de317 5879@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
57199397
MT
5880 addr = PAGE_ALIGN(addr);
5881
5882 vma = find_vma(mm, addr);
5883- if (task_size - len >= addr &&
5884- (!vma || addr + len <= vma->vm_start))
5885+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5886 return addr;
58c5fc13
MT
5887 }
5888
5889 if (len > mm->cached_hole_size) {
5890- start_addr = addr = mm->free_area_cache;
5891+ start_addr = addr = mm->free_area_cache;
5892 } else {
5893- start_addr = addr = TASK_UNMAPPED_BASE;
5894+ start_addr = addr = mm->mmap_base;
5895 mm->cached_hole_size = 0;
5896 }
5897
57199397 5898@@ -174,14 +177,14 @@ full_search:
58c5fc13
MT
5899 vma = find_vma(mm, VA_EXCLUDE_END);
5900 }
5901 if (unlikely(task_size < addr)) {
5902- if (start_addr != TASK_UNMAPPED_BASE) {
5903- start_addr = addr = TASK_UNMAPPED_BASE;
5904+ if (start_addr != mm->mmap_base) {
5905+ start_addr = addr = mm->mmap_base;
5906 mm->cached_hole_size = 0;
5907 goto full_search;
5908 }
57199397
MT
5909 return -ENOMEM;
5910 }
5911- if (likely(!vma || addr + len <= vma->vm_start)) {
5912+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5913 /*
5914 * Remember the place where we stopped the search:
5915 */
fe2de317 5916@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
5917 /* We do not accept a shared mapping if it would violate
5918 * cache aliasing constraints.
5919 */
5920- if ((flags & MAP_SHARED) &&
5921+ if ((filp || (flags & MAP_SHARED)) &&
5922 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5923 return -EINVAL;
5924 return addr;
fe2de317 5925@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
5926 addr = PAGE_ALIGN(addr);
5927
5928 vma = find_vma(mm, addr);
5929- if (task_size - len >= addr &&
5930- (!vma || addr + len <= vma->vm_start))
5931+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5932 return addr;
5933 }
5934
fe2de317 5935@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
5936 /* make sure it can fit in the remaining address space */
5937 if (likely(addr > len)) {
5938 vma = find_vma(mm, addr-len);
5939- if (!vma || addr <= vma->vm_start) {
5940+ if (check_heap_stack_gap(vma, addr - len, len)) {
5941 /* remember the address as a hint for next time */
5942 return (mm->free_area_cache = addr-len);
5943 }
fe2de317 5944@@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
5945 if (unlikely(mm->mmap_base < len))
5946 goto bottomup;
5947
5948- addr = mm->mmap_base-len;
5949- if (do_color_align)
5950- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5951+ addr = mm->mmap_base - len;
5952
5953 do {
5954+ if (do_color_align)
5955+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5956 /*
5957 * Lookup failure means no vma is above this address,
5958 * else if new region fits below vma->vm_start,
57199397
MT
5959 * return with success:
5960 */
5961 vma = find_vma(mm, addr);
5962- if (likely(!vma || addr+len <= vma->vm_start)) {
5963+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5964 /* remember the address as a hint for next time */
5965 return (mm->free_area_cache = addr);
5966 }
fe2de317 5967@@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
5968 mm->cached_hole_size = vma->vm_start - addr;
5969
5970 /* try just below the current vma->vm_start */
5971- addr = vma->vm_start-len;
5972- if (do_color_align)
5973- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5974- } while (likely(len < vma->vm_start));
5975+ addr = skip_heap_stack_gap(vma, len);
5976+ } while (!IS_ERR_VALUE(addr));
5977
5978 bottomup:
5979 /*
fe2de317 5980@@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
ae4e228f 5981 gap == RLIM_INFINITY ||
58c5fc13
MT
5982 sysctl_legacy_va_layout) {
5983 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5984+
5985+#ifdef CONFIG_PAX_RANDMMAP
5986+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5987+ mm->mmap_base += mm->delta_mmap;
5988+#endif
5989+
5990 mm->get_unmapped_area = arch_get_unmapped_area;
5991 mm->unmap_area = arch_unmap_area;
5992 } else {
fe2de317 5993@@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
58c5fc13
MT
5994 gap = (task_size / 6 * 5);
5995
5996 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5997+
5998+#ifdef CONFIG_PAX_RANDMMAP
5999+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6000+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6001+#endif
6002+
6003 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6004 mm->unmap_area = arch_unmap_area_topdown;
6005 }
5e856224
MT
6006diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
6007index 1d7e274..b39c527 100644
6008--- a/arch/sparc/kernel/syscalls.S
6009+++ b/arch/sparc/kernel/syscalls.S
6010@@ -62,7 +62,7 @@ sys32_rt_sigreturn:
6011 #endif
6012 .align 32
6013 1: ldx [%g6 + TI_FLAGS], %l5
6014- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6015+ andcc %l5, _TIF_WORK_SYSCALL, %g0
6016 be,pt %icc, rtrap
6017 nop
6018 call syscall_trace_leave
6019@@ -179,7 +179,7 @@ linux_sparc_syscall32:
6020
6021 srl %i5, 0, %o5 ! IEU1
6022 srl %i2, 0, %o2 ! IEU0 Group
6023- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6024+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6025 bne,pn %icc, linux_syscall_trace32 ! CTI
6026 mov %i0, %l5 ! IEU1
6027 call %l7 ! CTI Group brk forced
6028@@ -202,7 +202,7 @@ linux_sparc_syscall:
6029
6030 mov %i3, %o3 ! IEU1
6031 mov %i4, %o4 ! IEU0 Group
6032- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
6033+ andcc %l0, _TIF_WORK_SYSCALL, %g0
6034 bne,pn %icc, linux_syscall_trace ! CTI Group
6035 mov %i0, %l5 ! IEU0
6036 2: call %l7 ! CTI Group brk forced
6037@@ -226,7 +226,7 @@ ret_sys_call:
6038
6039 cmp %o0, -ERESTART_RESTARTBLOCK
6040 bgeu,pn %xcc, 1f
6041- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6042+ andcc %l0, _TIF_WORK_SYSCALL, %l6
6043 80:
6044 /* System call success, clear Carry condition code. */
6045 andn %g3, %g2, %g3
6046@@ -241,7 +241,7 @@ ret_sys_call:
6047 /* System call failure, set Carry condition code.
6048 * Also, get abs(errno) to return to the process.
6049 */
6050- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6
6051+ andcc %l0, _TIF_WORK_SYSCALL, %l6
6052 sub %g0, %o0, %o0
6053 or %g3, %g2, %g3
6054 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
fe2de317 6055diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
c6e2a6c8 6056index d2de213..6b22bc3 100644
fe2de317
MT
6057--- a/arch/sparc/kernel/traps_32.c
6058+++ b/arch/sparc/kernel/traps_32.c
c6e2a6c8 6059@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
15a11c5b
MT
6060 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
6061 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
6062
6063+extern void gr_handle_kernel_exploit(void);
6064+
6065 void die_if_kernel(char *str, struct pt_regs *regs)
6066 {
6067 static int die_counter;
c6e2a6c8 6068@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
bc901d79
MT
6069 count++ < 30 &&
6070 (((unsigned long) rw) >= PAGE_OFFSET) &&
6071 !(((unsigned long) rw) & 0x7)) {
6072- printk("Caller[%08lx]: %pS\n", rw->ins[7],
6073+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
6074 (void *) rw->ins[7]);
6075 rw = (struct reg_window32 *)rw->ins[6];
6076 }
15a11c5b
MT
6077 }
6078 printk("Instruction DUMP:");
6079 instruction_dump ((unsigned long *) regs->pc);
6080- if(regs->psr & PSR_PS)
6081+ if(regs->psr & PSR_PS) {
6082+ gr_handle_kernel_exploit();
6083 do_exit(SIGKILL);
6084+ }
6085 do_exit(SIGSEGV);
6086 }
6087
fe2de317 6088diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
c6e2a6c8 6089index c72fdf5..743a344 100644
fe2de317
MT
6090--- a/arch/sparc/kernel/traps_64.c
6091+++ b/arch/sparc/kernel/traps_64.c
6092@@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
bc901d79
MT
6093 i + 1,
6094 p->trapstack[i].tstate, p->trapstack[i].tpc,
6095 p->trapstack[i].tnpc, p->trapstack[i].tt);
6096- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
6097+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
6098 }
6099 }
6100
fe2de317 6101@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
58c5fc13
MT
6102
6103 lvl -= 0x100;
6104 if (regs->tstate & TSTATE_PRIV) {
6105+
6106+#ifdef CONFIG_PAX_REFCOUNT
6107+ if (lvl == 6)
6108+ pax_report_refcount_overflow(regs);
6109+#endif
6110+
6111 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
6112 die_if_kernel(buffer, regs);
6113 }
fe2de317 6114@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
58c5fc13
MT
6115 void bad_trap_tl1(struct pt_regs *regs, long lvl)
6116 {
6117 char buffer[32];
6118-
6119+
6120 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
6121 0, lvl, SIGTRAP) == NOTIFY_STOP)
6122 return;
6123
6124+#ifdef CONFIG_PAX_REFCOUNT
6125+ if (lvl == 6)
6126+ pax_report_refcount_overflow(regs);
6127+#endif
6128+
6129 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
6130
6131 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
fe2de317 6132@@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
bc901d79
MT
6133 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
6134 printk("%s" "ERROR(%d): ",
6135 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
6136- printk("TPC<%pS>\n", (void *) regs->tpc);
6137+ printk("TPC<%pA>\n", (void *) regs->tpc);
6138 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
6139 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
6140 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
fe2de317 6141@@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
bc901d79
MT
6142 smp_processor_id(),
6143 (type & 0x1) ? 'I' : 'D',
6144 regs->tpc);
6145- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
6146+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
6147 panic("Irrecoverable Cheetah+ parity error.");
6148 }
6149
fe2de317 6150@@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
bc901d79
MT
6151 smp_processor_id(),
6152 (type & 0x1) ? 'I' : 'D',
6153 regs->tpc);
6154- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
6155+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
6156 }
6157
6158 struct sun4v_error_entry {
fe2de317 6159@@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
bc901d79
MT
6160
6161 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
6162 regs->tpc, tl);
6163- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
6164+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
6165 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6166- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
6167+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
6168 (void *) regs->u_regs[UREG_I7]);
6169 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
6170 "pte[%lx] error[%lx]\n",
fe2de317 6171@@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
bc901d79
MT
6172
6173 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
6174 regs->tpc, tl);
6175- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
6176+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
6177 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
6178- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
6179+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
6180 (void *) regs->u_regs[UREG_I7]);
6181 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
6182 "pte[%lx] error[%lx]\n",
fe2de317 6183@@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
bc901d79
MT
6184 fp = (unsigned long)sf->fp + STACK_BIAS;
6185 }
6186
6187- printk(" [%016lx] %pS\n", pc, (void *) pc);
6188+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6189 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6190 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
6191 int index = tsk->curr_ret_stack;
6192 if (tsk->ret_stack && index >= graph) {
6193 pc = tsk->ret_stack[index - graph].ret;
6194- printk(" [%016lx] %pS\n", pc, (void *) pc);
6195+ printk(" [%016lx] %pA\n", pc, (void *) pc);
6196 graph++;
6197 }
6198 }
fe2de317 6199@@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
15a11c5b
MT
6200 return (struct reg_window *) (fp + STACK_BIAS);
6201 }
6202
6203+extern void gr_handle_kernel_exploit(void);
6204+
6205 void die_if_kernel(char *str, struct pt_regs *regs)
6206 {
6207 static int die_counter;
fe2de317 6208@@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
bc901d79
MT
6209 while (rw &&
6210 count++ < 30 &&
6211 kstack_valid(tp, (unsigned long) rw)) {
6212- printk("Caller[%016lx]: %pS\n", rw->ins[7],
6213+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
6214 (void *) rw->ins[7]);
6215
6216 rw = kernel_stack_up(rw);
fe2de317 6217@@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
15a11c5b
MT
6218 }
6219 user_instruction_dump ((unsigned int __user *) regs->tpc);
6220 }
6221- if (regs->tstate & TSTATE_PRIV)
6222+ if (regs->tstate & TSTATE_PRIV) {
6223+ gr_handle_kernel_exploit();
6224 do_exit(SIGKILL);
6225+ }
6226 do_exit(SIGSEGV);
6227 }
6228 EXPORT_SYMBOL(die_if_kernel);
fe2de317 6229diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
c6e2a6c8 6230index dae85bc..af1e19d 100644
fe2de317
MT
6231--- a/arch/sparc/kernel/unaligned_64.c
6232+++ b/arch/sparc/kernel/unaligned_64.c
6233@@ -279,7 +279,7 @@ static void log_unaligned(struct pt_regs *regs)
bc901d79
MT
6234 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
6235
6236 if (__ratelimit(&ratelimit)) {
6237- printk("Kernel unaligned access at TPC[%lx] %pS\n",
6238+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
6239 regs->tpc, (void *) regs->tpc);
6240 }
6241 }
fe2de317
MT
6242diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
6243index a3fc437..fea9957 100644
6244--- a/arch/sparc/lib/Makefile
6245+++ b/arch/sparc/lib/Makefile
6246@@ -2,7 +2,7 @@
6247 #
6248
6249 asflags-y := -ansi -DST_DIV0=0x02
6250-ccflags-y := -Werror
6251+#ccflags-y := -Werror
6252
6253 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
6254 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
6255diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
6256index 59186e0..f747d7a 100644
6257--- a/arch/sparc/lib/atomic_64.S
6258+++ b/arch/sparc/lib/atomic_64.S
58c5fc13
MT
6259@@ -18,7 +18,12 @@
6260 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
6261 BACKOFF_SETUP(%o2)
6262 1: lduw [%o1], %g1
6263- add %g1, %o0, %g7
6264+ addcc %g1, %o0, %g7
6265+
6266+#ifdef CONFIG_PAX_REFCOUNT
6267+ tvs %icc, 6
6268+#endif
6269+
6270 cas [%o1], %g1, %g7
6271 cmp %g1, %g7
6892158b 6272 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
fe2de317 6273@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
58c5fc13
MT
6274 2: BACKOFF_SPIN(%o2, %o3, 1b)
6275 .size atomic_add, .-atomic_add
6276
6277+ .globl atomic_add_unchecked
6278+ .type atomic_add_unchecked,#function
6279+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6280+ BACKOFF_SETUP(%o2)
6281+1: lduw [%o1], %g1
6282+ add %g1, %o0, %g7
6283+ cas [%o1], %g1, %g7
6284+ cmp %g1, %g7
6285+ bne,pn %icc, 2f
6286+ nop
6287+ retl
6288+ nop
6289+2: BACKOFF_SPIN(%o2, %o3, 1b)
6290+ .size atomic_add_unchecked, .-atomic_add_unchecked
6291+
6292 .globl atomic_sub
6293 .type atomic_sub,#function
6294 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6295 BACKOFF_SETUP(%o2)
6296 1: lduw [%o1], %g1
6297- sub %g1, %o0, %g7
6298+ subcc %g1, %o0, %g7
6299+
6300+#ifdef CONFIG_PAX_REFCOUNT
6301+ tvs %icc, 6
6302+#endif
6303+
6304 cas [%o1], %g1, %g7
6305 cmp %g1, %g7
6892158b 6306 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
fe2de317 6307@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
58c5fc13
MT
6308 2: BACKOFF_SPIN(%o2, %o3, 1b)
6309 .size atomic_sub, .-atomic_sub
6310
6311+ .globl atomic_sub_unchecked
6312+ .type atomic_sub_unchecked,#function
6313+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6314+ BACKOFF_SETUP(%o2)
6315+1: lduw [%o1], %g1
6316+ sub %g1, %o0, %g7
6317+ cas [%o1], %g1, %g7
6318+ cmp %g1, %g7
6319+ bne,pn %icc, 2f
6320+ nop
6321+ retl
6322+ nop
6323+2: BACKOFF_SPIN(%o2, %o3, 1b)
6324+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
6325+
6326 .globl atomic_add_ret
6327 .type atomic_add_ret,#function
6328 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6329 BACKOFF_SETUP(%o2)
6330 1: lduw [%o1], %g1
6331- add %g1, %o0, %g7
6332+ addcc %g1, %o0, %g7
6333+
6334+#ifdef CONFIG_PAX_REFCOUNT
6335+ tvs %icc, 6
6336+#endif
6337+
6338 cas [%o1], %g1, %g7
6339 cmp %g1, %g7
6892158b 6340 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
fe2de317 6341@@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
57199397
MT
6342 2: BACKOFF_SPIN(%o2, %o3, 1b)
6343 .size atomic_add_ret, .-atomic_add_ret
6344
6345+ .globl atomic_add_ret_unchecked
6346+ .type atomic_add_ret_unchecked,#function
6347+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6348+ BACKOFF_SETUP(%o2)
6349+1: lduw [%o1], %g1
6350+ addcc %g1, %o0, %g7
6351+ cas [%o1], %g1, %g7
6352+ cmp %g1, %g7
6353+ bne,pn %icc, 2f
6354+ add %g7, %o0, %g7
6355+ sra %g7, 0, %o0
6356+ retl
6357+ nop
6358+2: BACKOFF_SPIN(%o2, %o3, 1b)
6359+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
6360+
6361 .globl atomic_sub_ret
6362 .type atomic_sub_ret,#function
58c5fc13
MT
6363 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6364 BACKOFF_SETUP(%o2)
6365 1: lduw [%o1], %g1
6366- sub %g1, %o0, %g7
6367+ subcc %g1, %o0, %g7
6368+
6369+#ifdef CONFIG_PAX_REFCOUNT
6370+ tvs %icc, 6
6371+#endif
6372+
6373 cas [%o1], %g1, %g7
6374 cmp %g1, %g7
6892158b 6375 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
fe2de317 6376@@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
58c5fc13
MT
6377 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
6378 BACKOFF_SETUP(%o2)
6379 1: ldx [%o1], %g1
6380- add %g1, %o0, %g7
6381+ addcc %g1, %o0, %g7
6382+
6383+#ifdef CONFIG_PAX_REFCOUNT
6384+ tvs %xcc, 6
6385+#endif
6386+
6387 casx [%o1], %g1, %g7
6388 cmp %g1, %g7
6892158b 6389 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
fe2de317 6390@@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
ae4e228f
MT
6391 2: BACKOFF_SPIN(%o2, %o3, 1b)
6392 .size atomic64_add, .-atomic64_add
6393
6394+ .globl atomic64_add_unchecked
6395+ .type atomic64_add_unchecked,#function
6396+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6397+ BACKOFF_SETUP(%o2)
6398+1: ldx [%o1], %g1
6399+ addcc %g1, %o0, %g7
6400+ casx [%o1], %g1, %g7
6401+ cmp %g1, %g7
6402+ bne,pn %xcc, 2f
6403+ nop
6404+ retl
6405+ nop
6406+2: BACKOFF_SPIN(%o2, %o3, 1b)
6407+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
6408+
6409 .globl atomic64_sub
6410 .type atomic64_sub,#function
58c5fc13
MT
6411 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
6412 BACKOFF_SETUP(%o2)
6413 1: ldx [%o1], %g1
6414- sub %g1, %o0, %g7
6415+ subcc %g1, %o0, %g7
6416+
6417+#ifdef CONFIG_PAX_REFCOUNT
6418+ tvs %xcc, 6
6419+#endif
6420+
6421 casx [%o1], %g1, %g7
6422 cmp %g1, %g7
6892158b 6423 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
fe2de317 6424@@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
df50ba0c
MT
6425 2: BACKOFF_SPIN(%o2, %o3, 1b)
6426 .size atomic64_sub, .-atomic64_sub
6427
6428+ .globl atomic64_sub_unchecked
6429+ .type atomic64_sub_unchecked,#function
6430+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
6431+ BACKOFF_SETUP(%o2)
6432+1: ldx [%o1], %g1
6433+ subcc %g1, %o0, %g7
6434+ casx [%o1], %g1, %g7
6435+ cmp %g1, %g7
6436+ bne,pn %xcc, 2f
6437+ nop
6438+ retl
6439+ nop
6440+2: BACKOFF_SPIN(%o2, %o3, 1b)
6441+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
6442+
6443 .globl atomic64_add_ret
6444 .type atomic64_add_ret,#function
58c5fc13
MT
6445 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
6446 BACKOFF_SETUP(%o2)
6447 1: ldx [%o1], %g1
6448- add %g1, %o0, %g7
6449+ addcc %g1, %o0, %g7
6450+
6451+#ifdef CONFIG_PAX_REFCOUNT
6452+ tvs %xcc, 6
6453+#endif
6454+
6455 casx [%o1], %g1, %g7
6456 cmp %g1, %g7
6892158b 6457 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
fe2de317 6458@@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
ae4e228f
MT
6459 2: BACKOFF_SPIN(%o2, %o3, 1b)
6460 .size atomic64_add_ret, .-atomic64_add_ret
6461
6462+ .globl atomic64_add_ret_unchecked
6463+ .type atomic64_add_ret_unchecked,#function
6464+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
6465+ BACKOFF_SETUP(%o2)
6466+1: ldx [%o1], %g1
6467+ addcc %g1, %o0, %g7
6468+ casx [%o1], %g1, %g7
6469+ cmp %g1, %g7
6470+ bne,pn %xcc, 2f
6471+ add %g7, %o0, %g7
6472+ mov %g7, %o0
6473+ retl
6474+ nop
6475+2: BACKOFF_SPIN(%o2, %o3, 1b)
6476+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
6477+
6478 .globl atomic64_sub_ret
6479 .type atomic64_sub_ret,#function
58c5fc13
MT
6480 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
6481 BACKOFF_SETUP(%o2)
6482 1: ldx [%o1], %g1
6483- sub %g1, %o0, %g7
6484+ subcc %g1, %o0, %g7
6485+
6486+#ifdef CONFIG_PAX_REFCOUNT
6487+ tvs %xcc, 6
6488+#endif
6489+
6490 casx [%o1], %g1, %g7
6491 cmp %g1, %g7
6892158b 6492 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
fe2de317 6493diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5e856224 6494index f73c224..662af10 100644
fe2de317
MT
6495--- a/arch/sparc/lib/ksyms.c
6496+++ b/arch/sparc/lib/ksyms.c
5e856224 6497@@ -136,12 +136,18 @@ EXPORT_SYMBOL(__downgrade_write);
58c5fc13
MT
6498
6499 /* Atomic counter implementation. */
6500 EXPORT_SYMBOL(atomic_add);
6501+EXPORT_SYMBOL(atomic_add_unchecked);
6502 EXPORT_SYMBOL(atomic_add_ret);
15a11c5b 6503+EXPORT_SYMBOL(atomic_add_ret_unchecked);
58c5fc13
MT
6504 EXPORT_SYMBOL(atomic_sub);
6505+EXPORT_SYMBOL(atomic_sub_unchecked);
6506 EXPORT_SYMBOL(atomic_sub_ret);
6507 EXPORT_SYMBOL(atomic64_add);
57199397 6508+EXPORT_SYMBOL(atomic64_add_unchecked);
58c5fc13 6509 EXPORT_SYMBOL(atomic64_add_ret);
57199397 6510+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
df50ba0c
MT
6511 EXPORT_SYMBOL(atomic64_sub);
6512+EXPORT_SYMBOL(atomic64_sub_unchecked);
6513 EXPORT_SYMBOL(atomic64_sub_ret);
6514
6515 /* Atomic bit operations. */
fe2de317 6516diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
4c928ab7 6517index 301421c..e2535d1 100644
fe2de317
MT
6518--- a/arch/sparc/mm/Makefile
6519+++ b/arch/sparc/mm/Makefile
66a7e928
MT
6520@@ -2,7 +2,7 @@
6521 #
6522
fe2de317 6523 asflags-y := -ansi
66a7e928
MT
6524-ccflags-y := -Werror
6525+#ccflags-y := -Werror
6526
fe2de317
MT
6527 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
6528 obj-y += fault_$(BITS).o
6529diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
572b4308 6530index df3155a..b6e32fa 100644
fe2de317
MT
6531--- a/arch/sparc/mm/fault_32.c
6532+++ b/arch/sparc/mm/fault_32.c
4c928ab7
MT
6533@@ -21,6 +21,9 @@
6534 #include <linux/perf_event.h>
58c5fc13 6535 #include <linux/interrupt.h>
58c5fc13
MT
6536 #include <linux/kdebug.h>
6537+#include <linux/slab.h>
6538+#include <linux/pagemap.h>
6539+#include <linux/compiler.h>
6540
58c5fc13 6541 #include <asm/page.h>
c6e2a6c8 6542 #include <asm/pgtable.h>
572b4308 6543@@ -207,6 +210,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
58c5fc13
MT
6544 return safe_compute_effective_address(regs, insn);
6545 }
6546
6547+#ifdef CONFIG_PAX_PAGEEXEC
6548+#ifdef CONFIG_PAX_DLRESOLVE
ae4e228f 6549+static void pax_emuplt_close(struct vm_area_struct *vma)
58c5fc13
MT
6550+{
6551+ vma->vm_mm->call_dl_resolve = 0UL;
6552+}
6553+
6554+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6555+{
6556+ unsigned int *kaddr;
6557+
6558+ vmf->page = alloc_page(GFP_HIGHUSER);
6559+ if (!vmf->page)
6560+ return VM_FAULT_OOM;
6561+
6562+ kaddr = kmap(vmf->page);
6563+ memset(kaddr, 0, PAGE_SIZE);
6564+ kaddr[0] = 0x9DE3BFA8U; /* save */
6565+ flush_dcache_page(vmf->page);
6566+ kunmap(vmf->page);
6567+ return VM_FAULT_MAJOR;
6568+}
6569+
6570+static const struct vm_operations_struct pax_vm_ops = {
6571+ .close = pax_emuplt_close,
6572+ .fault = pax_emuplt_fault
6573+};
6574+
6575+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6576+{
6577+ int ret;
6578+
df50ba0c 6579+ INIT_LIST_HEAD(&vma->anon_vma_chain);
58c5fc13
MT
6580+ vma->vm_mm = current->mm;
6581+ vma->vm_start = addr;
6582+ vma->vm_end = addr + PAGE_SIZE;
6583+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6584+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6585+ vma->vm_ops = &pax_vm_ops;
6586+
6587+ ret = insert_vm_struct(current->mm, vma);
6588+ if (ret)
6589+ return ret;
6590+
6591+ ++current->mm->total_vm;
6592+ return 0;
6593+}
6594+#endif
6595+
6596+/*
6597+ * PaX: decide what to do with offenders (regs->pc = fault address)
6598+ *
6599+ * returns 1 when task should be killed
6600+ * 2 when patched PLT trampoline was detected
6601+ * 3 when unpatched PLT trampoline was detected
6602+ */
6603+static int pax_handle_fetch_fault(struct pt_regs *regs)
6604+{
6605+
6606+#ifdef CONFIG_PAX_EMUPLT
6607+ int err;
6608+
6609+ do { /* PaX: patched PLT emulation #1 */
6610+ unsigned int sethi1, sethi2, jmpl;
6611+
6612+ err = get_user(sethi1, (unsigned int *)regs->pc);
6613+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
6614+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
6615+
6616+ if (err)
6617+ break;
6618+
6619+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6620+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6621+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6622+ {
6623+ unsigned int addr;
6624+
6625+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6626+ addr = regs->u_regs[UREG_G1];
6627+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6628+ regs->pc = addr;
6629+ regs->npc = addr+4;
6630+ return 2;
6631+ }
6632+ } while (0);
6633+
572b4308 6634+ do { /* PaX: patched PLT emulation #2 */
58c5fc13
MT
6635+ unsigned int ba;
6636+
6637+ err = get_user(ba, (unsigned int *)regs->pc);
6638+
572b4308
MT
6639+ if (err)
6640+ break;
6641+
6642+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
58c5fc13
MT
6643+ unsigned int addr;
6644+
572b4308
MT
6645+ if ((ba & 0xFFC00000U) == 0x30800000U)
6646+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6647+ else
6648+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
58c5fc13
MT
6649+ regs->pc = addr;
6650+ regs->npc = addr+4;
6651+ return 2;
6652+ }
572b4308 6653+ } while (0);
58c5fc13
MT
6654+
6655+ do { /* PaX: patched PLT emulation #3 */
572b4308 6656+ unsigned int sethi, bajmpl, nop;
58c5fc13
MT
6657+
6658+ err = get_user(sethi, (unsigned int *)regs->pc);
572b4308 6659+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
58c5fc13
MT
6660+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6661+
6662+ if (err)
6663+ break;
6664+
6665+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
572b4308 6666+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
58c5fc13
MT
6667+ nop == 0x01000000U)
6668+ {
6669+ unsigned int addr;
6670+
6671+ addr = (sethi & 0x003FFFFFU) << 10;
6672+ regs->u_regs[UREG_G1] = addr;
572b4308
MT
6673+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
6674+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6675+ else
6676+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
58c5fc13
MT
6677+ regs->pc = addr;
6678+ regs->npc = addr+4;
6679+ return 2;
6680+ }
6681+ } while (0);
6682+
6683+ do { /* PaX: unpatched PLT emulation step 1 */
6684+ unsigned int sethi, ba, nop;
6685+
6686+ err = get_user(sethi, (unsigned int *)regs->pc);
6687+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
6688+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
6689+
6690+ if (err)
6691+ break;
6692+
6693+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6694+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6695+ nop == 0x01000000U)
6696+ {
6697+ unsigned int addr, save, call;
6698+
6699+ if ((ba & 0xFFC00000U) == 0x30800000U)
6700+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
6701+ else
6702+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
6703+
6704+ err = get_user(save, (unsigned int *)addr);
6705+ err |= get_user(call, (unsigned int *)(addr+4));
6706+ err |= get_user(nop, (unsigned int *)(addr+8));
6707+ if (err)
6708+ break;
6709+
6710+#ifdef CONFIG_PAX_DLRESOLVE
6711+ if (save == 0x9DE3BFA8U &&
6712+ (call & 0xC0000000U) == 0x40000000U &&
6713+ nop == 0x01000000U)
6714+ {
6715+ struct vm_area_struct *vma;
6716+ unsigned long call_dl_resolve;
6717+
6718+ down_read(&current->mm->mmap_sem);
6719+ call_dl_resolve = current->mm->call_dl_resolve;
6720+ up_read(&current->mm->mmap_sem);
6721+ if (likely(call_dl_resolve))
6722+ goto emulate;
6723+
6724+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6725+
6726+ down_write(&current->mm->mmap_sem);
6727+ if (current->mm->call_dl_resolve) {
6728+ call_dl_resolve = current->mm->call_dl_resolve;
6729+ up_write(&current->mm->mmap_sem);
6730+ if (vma)
6731+ kmem_cache_free(vm_area_cachep, vma);
6732+ goto emulate;
6733+ }
6734+
6735+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6736+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6737+ up_write(&current->mm->mmap_sem);
6738+ if (vma)
6739+ kmem_cache_free(vm_area_cachep, vma);
6740+ return 1;
6741+ }
6742+
6743+ if (pax_insert_vma(vma, call_dl_resolve)) {
6744+ up_write(&current->mm->mmap_sem);
6745+ kmem_cache_free(vm_area_cachep, vma);
6746+ return 1;
6747+ }
6748+
6749+ current->mm->call_dl_resolve = call_dl_resolve;
6750+ up_write(&current->mm->mmap_sem);
6751+
6752+emulate:
6753+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6754+ regs->pc = call_dl_resolve;
6755+ regs->npc = addr+4;
6756+ return 3;
6757+ }
6758+#endif
6759+
6760+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6761+ if ((save & 0xFFC00000U) == 0x05000000U &&
6762+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6763+ nop == 0x01000000U)
6764+ {
6765+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6766+ regs->u_regs[UREG_G2] = addr + 4;
6767+ addr = (save & 0x003FFFFFU) << 10;
6768+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6769+ regs->pc = addr;
6770+ regs->npc = addr+4;
6771+ return 3;
6772+ }
6773+ }
6774+ } while (0);
6775+
6776+ do { /* PaX: unpatched PLT emulation step 2 */
6777+ unsigned int save, call, nop;
6778+
6779+ err = get_user(save, (unsigned int *)(regs->pc-4));
6780+ err |= get_user(call, (unsigned int *)regs->pc);
6781+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6782+ if (err)
6783+ break;
6784+
6785+ if (save == 0x9DE3BFA8U &&
6786+ (call & 0xC0000000U) == 0x40000000U &&
6787+ nop == 0x01000000U)
6788+ {
6789+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6790+
6791+ regs->u_regs[UREG_RETPC] = regs->pc;
6792+ regs->pc = dl_resolve;
6793+ regs->npc = dl_resolve+4;
6794+ return 3;
6795+ }
6796+ } while (0);
6797+#endif
6798+
6799+ return 1;
6800+}
6801+
6e9df6a3 6802+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
6803+{
6804+ unsigned long i;
6805+
6806+ printk(KERN_ERR "PAX: bytes at PC: ");
ae4e228f 6807+ for (i = 0; i < 8; i++) {
58c5fc13
MT
6808+ unsigned int c;
6809+ if (get_user(c, (unsigned int *)pc+i))
6810+ printk(KERN_CONT "???????? ");
6811+ else
6812+ printk(KERN_CONT "%08x ", c);
6813+ }
6814+ printk("\n");
6815+}
6816+#endif
6817+
df50ba0c
MT
6818 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
6819 int text_fault)
58c5fc13 6820 {
572b4308 6821@@ -282,6 +556,24 @@ good_area:
58c5fc13
MT
6822 if(!(vma->vm_flags & VM_WRITE))
6823 goto bad_area;
6824 } else {
6825+
6826+#ifdef CONFIG_PAX_PAGEEXEC
6827+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6828+ up_read(&mm->mmap_sem);
6829+ switch (pax_handle_fetch_fault(regs)) {
6830+
6831+#ifdef CONFIG_PAX_EMUPLT
6832+ case 2:
6833+ case 3:
6834+ return;
6835+#endif
6836+
6837+ }
6838+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6839+ do_group_exit(SIGKILL);
6840+ }
6841+#endif
6842+
6843 /* Allow reads even for write-only mappings */
6844 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6845 goto bad_area;
fe2de317 6846diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
572b4308 6847index 1fe0429..8dd5dd5 100644
fe2de317
MT
6848--- a/arch/sparc/mm/fault_64.c
6849+++ b/arch/sparc/mm/fault_64.c
ae4e228f 6850@@ -21,6 +21,9 @@
58c5fc13
MT
6851 #include <linux/kprobes.h>
6852 #include <linux/kdebug.h>
6853 #include <linux/percpu.h>
6854+#include <linux/slab.h>
6855+#include <linux/pagemap.h>
6856+#include <linux/compiler.h>
6857
6858 #include <asm/page.h>
6859 #include <asm/pgtable.h>
fe2de317 6860@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
bc901d79
MT
6861 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6862 regs->tpc);
6863 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6864- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6865+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6866 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6867 dump_stack();
6868 unhandled_fault(regs->tpc, current, regs);
572b4308 6869@@ -272,6 +275,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
58c5fc13
MT
6870 show_regs(regs);
6871 }
6872
6873+#ifdef CONFIG_PAX_PAGEEXEC
6874+#ifdef CONFIG_PAX_DLRESOLVE
6875+static void pax_emuplt_close(struct vm_area_struct *vma)
6876+{
6877+ vma->vm_mm->call_dl_resolve = 0UL;
6878+}
6879+
6880+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6881+{
6882+ unsigned int *kaddr;
6883+
6884+ vmf->page = alloc_page(GFP_HIGHUSER);
6885+ if (!vmf->page)
6886+ return VM_FAULT_OOM;
6887+
6888+ kaddr = kmap(vmf->page);
6889+ memset(kaddr, 0, PAGE_SIZE);
6890+ kaddr[0] = 0x9DE3BFA8U; /* save */
6891+ flush_dcache_page(vmf->page);
6892+ kunmap(vmf->page);
6893+ return VM_FAULT_MAJOR;
6894+}
6895+
6896+static const struct vm_operations_struct pax_vm_ops = {
6897+ .close = pax_emuplt_close,
6898+ .fault = pax_emuplt_fault
6899+};
6900+
6901+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6902+{
6903+ int ret;
6904+
df50ba0c 6905+ INIT_LIST_HEAD(&vma->anon_vma_chain);
58c5fc13
MT
6906+ vma->vm_mm = current->mm;
6907+ vma->vm_start = addr;
6908+ vma->vm_end = addr + PAGE_SIZE;
6909+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6910+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6911+ vma->vm_ops = &pax_vm_ops;
6912+
6913+ ret = insert_vm_struct(current->mm, vma);
6914+ if (ret)
6915+ return ret;
6916+
6917+ ++current->mm->total_vm;
6918+ return 0;
6919+}
6920+#endif
6921+
6922+/*
6923+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6924+ *
6925+ * returns 1 when task should be killed
6926+ * 2 when patched PLT trampoline was detected
6927+ * 3 when unpatched PLT trampoline was detected
6928+ */
6929+static int pax_handle_fetch_fault(struct pt_regs *regs)
6930+{
6931+
6932+#ifdef CONFIG_PAX_EMUPLT
6933+ int err;
6934+
6935+ do { /* PaX: patched PLT emulation #1 */
6936+ unsigned int sethi1, sethi2, jmpl;
6937+
6938+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6939+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6940+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6941+
6942+ if (err)
6943+ break;
6944+
6945+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6946+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6947+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6948+ {
6949+ unsigned long addr;
6950+
6951+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6952+ addr = regs->u_regs[UREG_G1];
6953+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6954+
6955+ if (test_thread_flag(TIF_32BIT))
6956+ addr &= 0xFFFFFFFFUL;
6957+
6958+ regs->tpc = addr;
6959+ regs->tnpc = addr+4;
6960+ return 2;
6961+ }
6962+ } while (0);
6963+
572b4308 6964+ do { /* PaX: patched PLT emulation #2 */
58c5fc13
MT
6965+ unsigned int ba;
6966+
6967+ err = get_user(ba, (unsigned int *)regs->tpc);
6968+
572b4308
MT
6969+ if (err)
6970+ break;
6971+
6972+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
58c5fc13
MT
6973+ unsigned long addr;
6974+
572b4308
MT
6975+ if ((ba & 0xFFC00000U) == 0x30800000U)
6976+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6977+ else
6978+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
58c5fc13
MT
6979+
6980+ if (test_thread_flag(TIF_32BIT))
6981+ addr &= 0xFFFFFFFFUL;
6982+
6983+ regs->tpc = addr;
6984+ regs->tnpc = addr+4;
6985+ return 2;
6986+ }
572b4308 6987+ } while (0);
58c5fc13
MT
6988+
6989+ do { /* PaX: patched PLT emulation #3 */
572b4308 6990+ unsigned int sethi, bajmpl, nop;
58c5fc13
MT
6991+
6992+ err = get_user(sethi, (unsigned int *)regs->tpc);
572b4308 6993+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
58c5fc13
MT
6994+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6995+
6996+ if (err)
6997+ break;
6998+
6999+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
572b4308 7000+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
58c5fc13
MT
7001+ nop == 0x01000000U)
7002+ {
7003+ unsigned long addr;
7004+
7005+ addr = (sethi & 0x003FFFFFU) << 10;
7006+ regs->u_regs[UREG_G1] = addr;
572b4308
MT
7007+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
7008+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7009+ else
7010+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
58c5fc13
MT
7011+
7012+ if (test_thread_flag(TIF_32BIT))
7013+ addr &= 0xFFFFFFFFUL;
7014+
7015+ regs->tpc = addr;
7016+ regs->tnpc = addr+4;
7017+ return 2;
7018+ }
7019+ } while (0);
7020+
7021+ do { /* PaX: patched PLT emulation #4 */
ae4e228f 7022+ unsigned int sethi, mov1, call, mov2;
58c5fc13 7023+
ae4e228f
MT
7024+ err = get_user(sethi, (unsigned int *)regs->tpc);
7025+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
7026+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
7027+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
58c5fc13
MT
7028+
7029+ if (err)
7030+ break;
7031+
ae4e228f
MT
7032+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7033+ mov1 == 0x8210000FU &&
58c5fc13
MT
7034+ (call & 0xC0000000U) == 0x40000000U &&
7035+ mov2 == 0x9E100001U)
7036+ {
7037+ unsigned long addr;
7038+
7039+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
7040+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7041+
7042+ if (test_thread_flag(TIF_32BIT))
7043+ addr &= 0xFFFFFFFFUL;
7044+
7045+ regs->tpc = addr;
7046+ regs->tnpc = addr+4;
7047+ return 2;
7048+ }
7049+ } while (0);
7050+
7051+ do { /* PaX: patched PLT emulation #5 */
ae4e228f 7052+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
58c5fc13 7053+
ae4e228f
MT
7054+ err = get_user(sethi, (unsigned int *)regs->tpc);
7055+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7056+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7057+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
7058+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
7059+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
7060+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
7061+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
58c5fc13
MT
7062+
7063+ if (err)
7064+ break;
7065+
ae4e228f
MT
7066+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7067+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
58c5fc13
MT
7068+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7069+ (or1 & 0xFFFFE000U) == 0x82106000U &&
7070+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
ae4e228f 7071+ sllx == 0x83287020U &&
58c5fc13
MT
7072+ jmpl == 0x81C04005U &&
7073+ nop == 0x01000000U)
7074+ {
7075+ unsigned long addr;
7076+
7077+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7078+ regs->u_regs[UREG_G1] <<= 32;
7079+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7080+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7081+ regs->tpc = addr;
7082+ regs->tnpc = addr+4;
7083+ return 2;
7084+ }
7085+ } while (0);
7086+
7087+ do { /* PaX: patched PLT emulation #6 */
ae4e228f 7088+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
58c5fc13 7089+
ae4e228f
MT
7090+ err = get_user(sethi, (unsigned int *)regs->tpc);
7091+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
7092+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
7093+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
7094+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
7095+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
7096+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
58c5fc13
MT
7097+
7098+ if (err)
7099+ break;
7100+
ae4e228f
MT
7101+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7102+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
58c5fc13 7103+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
ae4e228f 7104+ sllx == 0x83287020U &&
58c5fc13
MT
7105+ (or & 0xFFFFE000U) == 0x8A116000U &&
7106+ jmpl == 0x81C04005U &&
7107+ nop == 0x01000000U)
7108+ {
7109+ unsigned long addr;
7110+
7111+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
7112+ regs->u_regs[UREG_G1] <<= 32;
7113+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
7114+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
7115+ regs->tpc = addr;
7116+ regs->tnpc = addr+4;
7117+ return 2;
7118+ }
7119+ } while (0);
7120+
7121+ do { /* PaX: unpatched PLT emulation step 1 */
7122+ unsigned int sethi, ba, nop;
7123+
7124+ err = get_user(sethi, (unsigned int *)regs->tpc);
7125+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7126+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7127+
7128+ if (err)
7129+ break;
7130+
7131+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7132+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
7133+ nop == 0x01000000U)
7134+ {
7135+ unsigned long addr;
7136+ unsigned int save, call;
ae4e228f 7137+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
58c5fc13
MT
7138+
7139+ if ((ba & 0xFFC00000U) == 0x30800000U)
7140+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
7141+ else
7142+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7143+
7144+ if (test_thread_flag(TIF_32BIT))
7145+ addr &= 0xFFFFFFFFUL;
7146+
7147+ err = get_user(save, (unsigned int *)addr);
7148+ err |= get_user(call, (unsigned int *)(addr+4));
7149+ err |= get_user(nop, (unsigned int *)(addr+8));
7150+ if (err)
7151+ break;
7152+
7153+#ifdef CONFIG_PAX_DLRESOLVE
7154+ if (save == 0x9DE3BFA8U &&
7155+ (call & 0xC0000000U) == 0x40000000U &&
7156+ nop == 0x01000000U)
7157+ {
7158+ struct vm_area_struct *vma;
7159+ unsigned long call_dl_resolve;
7160+
7161+ down_read(&current->mm->mmap_sem);
7162+ call_dl_resolve = current->mm->call_dl_resolve;
7163+ up_read(&current->mm->mmap_sem);
7164+ if (likely(call_dl_resolve))
7165+ goto emulate;
7166+
7167+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
7168+
7169+ down_write(&current->mm->mmap_sem);
7170+ if (current->mm->call_dl_resolve) {
7171+ call_dl_resolve = current->mm->call_dl_resolve;
7172+ up_write(&current->mm->mmap_sem);
7173+ if (vma)
7174+ kmem_cache_free(vm_area_cachep, vma);
7175+ goto emulate;
7176+ }
7177+
7178+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
7179+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
7180+ up_write(&current->mm->mmap_sem);
7181+ if (vma)
7182+ kmem_cache_free(vm_area_cachep, vma);
7183+ return 1;
7184+ }
7185+
7186+ if (pax_insert_vma(vma, call_dl_resolve)) {
7187+ up_write(&current->mm->mmap_sem);
7188+ kmem_cache_free(vm_area_cachep, vma);
7189+ return 1;
7190+ }
7191+
7192+ current->mm->call_dl_resolve = call_dl_resolve;
7193+ up_write(&current->mm->mmap_sem);
7194+
7195+emulate:
7196+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7197+ regs->tpc = call_dl_resolve;
7198+ regs->tnpc = addr+4;
7199+ return 3;
7200+ }
7201+#endif
7202+
7203+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
7204+ if ((save & 0xFFC00000U) == 0x05000000U &&
7205+ (call & 0xFFFFE000U) == 0x85C0A000U &&
7206+ nop == 0x01000000U)
7207+ {
7208+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7209+ regs->u_regs[UREG_G2] = addr + 4;
7210+ addr = (save & 0x003FFFFFU) << 10;
7211+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
7212+
7213+ if (test_thread_flag(TIF_32BIT))
7214+ addr &= 0xFFFFFFFFUL;
7215+
7216+ regs->tpc = addr;
7217+ regs->tnpc = addr+4;
7218+ return 3;
7219+ }
ae4e228f
MT
7220+
7221+ /* PaX: 64-bit PLT stub */
7222+ err = get_user(sethi1, (unsigned int *)addr);
7223+ err |= get_user(sethi2, (unsigned int *)(addr+4));
7224+ err |= get_user(or1, (unsigned int *)(addr+8));
7225+ err |= get_user(or2, (unsigned int *)(addr+12));
7226+ err |= get_user(sllx, (unsigned int *)(addr+16));
7227+ err |= get_user(add, (unsigned int *)(addr+20));
7228+ err |= get_user(jmpl, (unsigned int *)(addr+24));
7229+ err |= get_user(nop, (unsigned int *)(addr+28));
7230+ if (err)
7231+ break;
7232+
7233+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
7234+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
7235+ (or1 & 0xFFFFE000U) == 0x88112000U &&
7236+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
7237+ sllx == 0x89293020U &&
7238+ add == 0x8A010005U &&
7239+ jmpl == 0x89C14000U &&
7240+ nop == 0x01000000U)
7241+ {
7242+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
7243+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
7244+ regs->u_regs[UREG_G4] <<= 32;
7245+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
7246+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
7247+ regs->u_regs[UREG_G4] = addr + 24;
7248+ addr = regs->u_regs[UREG_G5];
7249+ regs->tpc = addr;
7250+ regs->tnpc = addr+4;
7251+ return 3;
7252+ }
58c5fc13
MT
7253+ }
7254+ } while (0);
7255+
7256+#ifdef CONFIG_PAX_DLRESOLVE
7257+ do { /* PaX: unpatched PLT emulation step 2 */
7258+ unsigned int save, call, nop;
7259+
7260+ err = get_user(save, (unsigned int *)(regs->tpc-4));
7261+ err |= get_user(call, (unsigned int *)regs->tpc);
7262+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
7263+ if (err)
7264+ break;
7265+
7266+ if (save == 0x9DE3BFA8U &&
7267+ (call & 0xC0000000U) == 0x40000000U &&
7268+ nop == 0x01000000U)
7269+ {
7270+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
7271+
7272+ if (test_thread_flag(TIF_32BIT))
7273+ dl_resolve &= 0xFFFFFFFFUL;
7274+
7275+ regs->u_regs[UREG_RETPC] = regs->tpc;
7276+ regs->tpc = dl_resolve;
7277+ regs->tnpc = dl_resolve+4;
7278+ return 3;
7279+ }
7280+ } while (0);
7281+#endif
7282+
7283+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
7284+ unsigned int sethi, ba, nop;
7285+
7286+ err = get_user(sethi, (unsigned int *)regs->tpc);
7287+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
7288+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
7289+
7290+ if (err)
7291+ break;
7292+
7293+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
7294+ (ba & 0xFFF00000U) == 0x30600000U &&
7295+ nop == 0x01000000U)
7296+ {
7297+ unsigned long addr;
7298+
7299+ addr = (sethi & 0x003FFFFFU) << 10;
7300+ regs->u_regs[UREG_G1] = addr;
7301+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
7302+
7303+ if (test_thread_flag(TIF_32BIT))
7304+ addr &= 0xFFFFFFFFUL;
7305+
7306+ regs->tpc = addr;
7307+ regs->tnpc = addr+4;
7308+ return 2;
7309+ }
7310+ } while (0);
7311+
7312+#endif
7313+
7314+ return 1;
7315+}
7316+
6e9df6a3 7317+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
7318+{
7319+ unsigned long i;
7320+
7321+ printk(KERN_ERR "PAX: bytes at PC: ");
ae4e228f 7322+ for (i = 0; i < 8; i++) {
58c5fc13
MT
7323+ unsigned int c;
7324+ if (get_user(c, (unsigned int *)pc+i))
7325+ printk(KERN_CONT "???????? ");
7326+ else
7327+ printk(KERN_CONT "%08x ", c);
7328+ }
7329+ printk("\n");
7330+}
7331+#endif
7332+
7333 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
7334 {
7335 struct mm_struct *mm = current->mm;
572b4308 7336@@ -343,6 +806,29 @@ retry:
58c5fc13
MT
7337 if (!vma)
7338 goto bad_area;
7339
7340+#ifdef CONFIG_PAX_PAGEEXEC
7341+ /* PaX: detect ITLB misses on non-exec pages */
7342+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
7343+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
7344+ {
7345+ if (address != regs->tpc)
7346+ goto good_area;
7347+
7348+ up_read(&mm->mmap_sem);
7349+ switch (pax_handle_fetch_fault(regs)) {
7350+
7351+#ifdef CONFIG_PAX_EMUPLT
7352+ case 2:
7353+ case 3:
7354+ return;
7355+#endif
7356+
7357+ }
7358+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
7359+ do_group_exit(SIGKILL);
7360+ }
7361+#endif
7362+
7363 /* Pure DTLB misses do not tell us whether the fault causing
7364 * load/store/atomic was a write or not, it only says that there
7365 * was no match. So in such a case we (carefully) read the
fe2de317 7366diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
4c928ab7 7367index 07e1453..0a7d9e9 100644
fe2de317
MT
7368--- a/arch/sparc/mm/hugetlbpage.c
7369+++ b/arch/sparc/mm/hugetlbpage.c
4c928ab7 7370@@ -67,7 +67,7 @@ full_search:
57199397
MT
7371 }
7372 return -ENOMEM;
7373 }
7374- if (likely(!vma || addr + len <= vma->vm_start)) {
7375+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7376 /*
7377 * Remember the place where we stopped the search:
7378 */
4c928ab7 7379@@ -106,7 +106,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
57199397
MT
7380 /* make sure it can fit in the remaining address space */
7381 if (likely(addr > len)) {
7382 vma = find_vma(mm, addr-len);
7383- if (!vma || addr <= vma->vm_start) {
7384+ if (check_heap_stack_gap(vma, addr - len, len)) {
7385 /* remember the address as a hint for next time */
7386 return (mm->free_area_cache = addr-len);
7387 }
4c928ab7 7388@@ -115,16 +115,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
7389 if (unlikely(mm->mmap_base < len))
7390 goto bottomup;
7391
7392- addr = (mm->mmap_base-len) & HPAGE_MASK;
7393+ addr = mm->mmap_base - len;
7394
7395 do {
7396+ addr &= HPAGE_MASK;
7397 /*
7398 * Lookup failure means no vma is above this address,
7399 * else if new region fits below vma->vm_start,
57199397
MT
7400 * return with success:
7401 */
7402 vma = find_vma(mm, addr);
7403- if (likely(!vma || addr+len <= vma->vm_start)) {
7404+ if (likely(check_heap_stack_gap(vma, addr, len))) {
7405 /* remember the address as a hint for next time */
7406 return (mm->free_area_cache = addr);
7407 }
4c928ab7 7408@@ -134,8 +135,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16454cff
MT
7409 mm->cached_hole_size = vma->vm_start - addr;
7410
7411 /* try just below the current vma->vm_start */
7412- addr = (vma->vm_start-len) & HPAGE_MASK;
7413- } while (likely(len < vma->vm_start));
7414+ addr = skip_heap_stack_gap(vma, len);
7415+ } while (!IS_ERR_VALUE(addr));
7416
7417 bottomup:
7418 /*
4c928ab7 7419@@ -181,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
57199397
MT
7420 if (addr) {
7421 addr = ALIGN(addr, HPAGE_SIZE);
7422 vma = find_vma(mm, addr);
7423- if (task_size - len >= addr &&
7424- (!vma || addr + len <= vma->vm_start))
7425+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
7426 return addr;
7427 }
7428 if (mm->get_unmapped_area == arch_get_unmapped_area)
fe2de317 7429diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
c6e2a6c8 7430index c5f9021..7591bae 100644
fe2de317
MT
7431--- a/arch/sparc/mm/init_32.c
7432+++ b/arch/sparc/mm/init_32.c
c6e2a6c8 7433@@ -315,6 +315,9 @@ extern void device_scan(void);
58c5fc13
MT
7434 pgprot_t PAGE_SHARED __read_mostly;
7435 EXPORT_SYMBOL(PAGE_SHARED);
7436
7437+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
7438+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
7439+
7440 void __init paging_init(void)
7441 {
7442 switch(sparc_cpu_model) {
c6e2a6c8 7443@@ -343,17 +346,17 @@ void __init paging_init(void)
58c5fc13
MT
7444
7445 /* Initialize the protection map with non-constant, MMU dependent values. */
7446 protection_map[0] = PAGE_NONE;
7447- protection_map[1] = PAGE_READONLY;
7448- protection_map[2] = PAGE_COPY;
7449- protection_map[3] = PAGE_COPY;
7450+ protection_map[1] = PAGE_READONLY_NOEXEC;
7451+ protection_map[2] = PAGE_COPY_NOEXEC;
7452+ protection_map[3] = PAGE_COPY_NOEXEC;
7453 protection_map[4] = PAGE_READONLY;
7454 protection_map[5] = PAGE_READONLY;
7455 protection_map[6] = PAGE_COPY;
7456 protection_map[7] = PAGE_COPY;
7457 protection_map[8] = PAGE_NONE;
7458- protection_map[9] = PAGE_READONLY;
7459- protection_map[10] = PAGE_SHARED;
7460- protection_map[11] = PAGE_SHARED;
7461+ protection_map[9] = PAGE_READONLY_NOEXEC;
7462+ protection_map[10] = PAGE_SHARED_NOEXEC;
7463+ protection_map[11] = PAGE_SHARED_NOEXEC;
7464 protection_map[12] = PAGE_READONLY;
7465 protection_map[13] = PAGE_READONLY;
7466 protection_map[14] = PAGE_SHARED;
fe2de317
MT
7467diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
7468index cbef74e..c38fead 100644
7469--- a/arch/sparc/mm/srmmu.c
7470+++ b/arch/sparc/mm/srmmu.c
bc901d79 7471@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
58c5fc13
MT
7472 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
7473 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
7474 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
7475+
7476+#ifdef CONFIG_PAX_PAGEEXEC
7477+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
7478+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
7479+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
7480+#endif
7481+
7482 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
7483 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
7484
4c928ab7 7485diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
c6e2a6c8 7486index f4500c6..889656c 100644
4c928ab7
MT
7487--- a/arch/tile/include/asm/atomic_64.h
7488+++ b/arch/tile/include/asm/atomic_64.h
c6e2a6c8 7489@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4c928ab7
MT
7490
7491 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7492
7493+#define atomic64_read_unchecked(v) atomic64_read(v)
7494+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7495+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7496+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7497+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7498+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7499+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7500+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7501+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7502+
7503 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
7504 #define smp_mb__before_atomic_dec() smp_mb()
7505 #define smp_mb__after_atomic_dec() smp_mb()
7506diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
7507index 392e533..536b092 100644
7508--- a/arch/tile/include/asm/cache.h
7509+++ b/arch/tile/include/asm/cache.h
7510@@ -15,11 +15,12 @@
7511 #ifndef _ASM_TILE_CACHE_H
7512 #define _ASM_TILE_CACHE_H
7513
7514+#include <linux/const.h>
7515 #include <arch/chip.h>
7516
7517 /* bytes per L1 data cache line */
7518 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
7519-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7520+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7521
7522 /* bytes per L2 cache line */
7523 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
c1e3898a
MT
7524diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
7525index ef34d2c..d6ce60c 100644
7526--- a/arch/tile/include/asm/uaccess.h
7527+++ b/arch/tile/include/asm/uaccess.h
7528@@ -361,9 +361,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
7529 const void __user *from,
7530 unsigned long n)
7531 {
7532- int sz = __compiletime_object_size(to);
7533+ size_t sz = __compiletime_object_size(to);
7534
7535- if (likely(sz == -1 || sz >= n))
7536+ if (likely(sz == (size_t)-1 || sz >= n))
7537 n = _copy_from_user(to, from, n);
7538 else
7539 copy_from_user_overflow();
fe2de317 7540diff --git a/arch/um/Makefile b/arch/um/Makefile
c6e2a6c8 7541index 55c0661..86ad413 100644
fe2de317
MT
7542--- a/arch/um/Makefile
7543+++ b/arch/um/Makefile
c6e2a6c8 7544@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
fe2de317 7545 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
4c928ab7 7546 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
fe2de317
MT
7547
7548+ifdef CONSTIFY_PLUGIN
7549+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7550+endif
7551+
fe2de317 7552 #This will adjust *FLAGS accordingly to the platform.
4c928ab7
MT
7553 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
7554
7555diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
7556index 19e1bdd..3665b77 100644
7557--- a/arch/um/include/asm/cache.h
7558+++ b/arch/um/include/asm/cache.h
7559@@ -1,6 +1,7 @@
7560 #ifndef __UM_CACHE_H
7561 #define __UM_CACHE_H
7562
7563+#include <linux/const.h>
7564
7565 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
7566 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7567@@ -12,6 +13,6 @@
7568 # define L1_CACHE_SHIFT 5
7569 #endif
7570
7571-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7572+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7573
7574 #endif
fe2de317
MT
7575diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
7576index 6c03acd..a5e0215 100644
7577--- a/arch/um/include/asm/kmap_types.h
7578+++ b/arch/um/include/asm/kmap_types.h
58c5fc13
MT
7579@@ -23,6 +23,7 @@ enum km_type {
7580 KM_IRQ1,
7581 KM_SOFTIRQ0,
7582 KM_SOFTIRQ1,
7583+ KM_CLEARPAGE,
7584 KM_TYPE_NR
7585 };
7586
fe2de317 7587diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
4c928ab7 7588index 7cfc3ce..cbd1a58 100644
fe2de317
MT
7589--- a/arch/um/include/asm/page.h
7590+++ b/arch/um/include/asm/page.h
58c5fc13
MT
7591@@ -14,6 +14,9 @@
7592 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
7593 #define PAGE_MASK (~(PAGE_SIZE-1))
7594
7595+#define ktla_ktva(addr) (addr)
7596+#define ktva_ktla(addr) (addr)
7597+
7598 #ifndef __ASSEMBLY__
7599
7600 struct page;
5e856224
MT
7601diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
7602index 0032f92..cd151e0 100644
7603--- a/arch/um/include/asm/pgtable-3level.h
7604+++ b/arch/um/include/asm/pgtable-3level.h
7605@@ -58,6 +58,7 @@
7606 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
7607 #define pud_populate(mm, pud, pmd) \
7608 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
7609+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7610
7611 #ifdef CONFIG_64BIT
7612 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
fe2de317 7613diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
c6e2a6c8 7614index 2b73ded..804f540 100644
fe2de317
MT
7615--- a/arch/um/kernel/process.c
7616+++ b/arch/um/kernel/process.c
c6e2a6c8 7617@@ -404,22 +404,6 @@ int singlestepping(void * t)
bc901d79
MT
7618 return 2;
7619 }
7620
7621-/*
7622- * Only x86 and x86_64 have an arch_align_stack().
7623- * All other arches have "#define arch_align_stack(x) (x)"
7624- * in their asm/system.h
7625- * As this is included in UML from asm-um/system-generic.h,
7626- * we can use it to behave as the subarch does.
7627- */
7628-#ifndef arch_align_stack
7629-unsigned long arch_align_stack(unsigned long sp)
7630-{
7631- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7632- sp -= get_random_int() % 8192;
7633- return sp & ~0xf;
7634-}
7635-#endif
7636-
7637 unsigned long get_wchan(struct task_struct *p)
7638 {
7639 unsigned long stack_page, sp, ip;
4c928ab7
MT
7640diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
7641index ad8f795..2c7eec6 100644
7642--- a/arch/unicore32/include/asm/cache.h
7643+++ b/arch/unicore32/include/asm/cache.h
7644@@ -12,8 +12,10 @@
7645 #ifndef __UNICORE_CACHE_H__
7646 #define __UNICORE_CACHE_H__
7647
7648-#define L1_CACHE_SHIFT (5)
7649-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7650+#include <linux/const.h>
58c5fc13 7651+
4c928ab7
MT
7652+#define L1_CACHE_SHIFT 5
7653+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6e9df6a3 7654
4c928ab7
MT
7655 /*
7656 * Memory returned by kmalloc() may be used for DMA, so we must make
fe2de317 7657diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
c6e2a6c8 7658index c9866b0..fe53aef 100644
fe2de317
MT
7659--- a/arch/x86/Kconfig
7660+++ b/arch/x86/Kconfig
c6e2a6c8 7661@@ -229,7 +229,7 @@ config X86_HT
fe2de317
MT
7662
7663 config X86_32_LAZY_GS
7664 def_bool y
7665- depends on X86_32 && !CC_STACKPROTECTOR
7666+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
7667
7668 config ARCH_HWEIGHT_CFLAGS
7669 string
c6e2a6c8 7670@@ -1042,7 +1042,7 @@ choice
fe2de317
MT
7671
7672 config NOHIGHMEM
7673 bool "off"
7674- depends on !X86_NUMAQ
7675+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7676 ---help---
7677 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
7678 However, the address space of 32-bit x86 processors is only 4
c6e2a6c8 7679@@ -1079,7 +1079,7 @@ config NOHIGHMEM
fe2de317
MT
7680
7681 config HIGHMEM4G
7682 bool "4GB"
7683- depends on !X86_NUMAQ
7684+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
7685 ---help---
7686 Select this if you have a 32-bit processor and between 1 and 4
7687 gigabytes of physical RAM.
c6e2a6c8 7688@@ -1133,7 +1133,7 @@ config PAGE_OFFSET
fe2de317
MT
7689 hex
7690 default 0xB0000000 if VMSPLIT_3G_OPT
7691 default 0x80000000 if VMSPLIT_2G
7692- default 0x78000000 if VMSPLIT_2G_OPT
7693+ default 0x70000000 if VMSPLIT_2G_OPT
7694 default 0x40000000 if VMSPLIT_1G
7695 default 0xC0000000
7696 depends on X86_32
c6e2a6c8 7697@@ -1523,6 +1523,7 @@ config SECCOMP
fe2de317
MT
7698
7699 config CC_STACKPROTECTOR
7700 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
7701+ depends on X86_64 || !PAX_MEMORY_UDEREF
7702 ---help---
7703 This option turns on the -fstack-protector GCC feature. This
7704 feature puts, at the beginning of functions, a canary value on
c6e2a6c8 7705@@ -1580,6 +1581,7 @@ config KEXEC_JUMP
fe2de317
MT
7706 config PHYSICAL_START
7707 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
7708 default "0x1000000"
7709+ range 0x400000 0x40000000
7710 ---help---
7711 This gives the physical address where the kernel is loaded.
7712
c6e2a6c8 7713@@ -1643,6 +1645,7 @@ config X86_NEED_RELOCS
fe2de317
MT
7714 config PHYSICAL_ALIGN
7715 hex "Alignment value to which kernel should be aligned" if X86_32
7716 default "0x1000000"
7717+ range 0x400000 0x1000000 if PAX_KERNEXEC
7718 range 0x2000 0x1000000
7719 ---help---
7720 This value puts the alignment restrictions on physical address
c6e2a6c8 7721@@ -1674,9 +1677,10 @@ config HOTPLUG_CPU
fe2de317
MT
7722 Say N if you want to disable CPU hotplug.
7723
7724 config COMPAT_VDSO
7725- def_bool y
7726+ def_bool n
7727 prompt "Compat VDSO support"
7728 depends on X86_32 || IA32_EMULATION
7729+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
7730 ---help---
7731 Map the 32-bit VDSO to the predictable old-style address too.
7732
7733diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
c6e2a6c8 7734index 706e12e..62e4feb 100644
fe2de317
MT
7735--- a/arch/x86/Kconfig.cpu
7736+++ b/arch/x86/Kconfig.cpu
c6e2a6c8 7737@@ -334,7 +334,7 @@ config X86_PPRO_FENCE
fe2de317
MT
7738
7739 config X86_F00F_BUG
7740 def_bool y
7741- depends on M586MMX || M586TSC || M586 || M486 || M386
7742+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
7743
7744 config X86_INVD_BUG
7745 def_bool y
c6e2a6c8 7746@@ -358,7 +358,7 @@ config X86_POPAD_OK
fe2de317
MT
7747
7748 config X86_ALIGNMENT_16
7749 def_bool y
7750- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7751+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
7752
7753 config X86_INTEL_USERCOPY
7754 def_bool y
c6e2a6c8 7755@@ -404,7 +404,7 @@ config X86_CMPXCHG64
fe2de317
MT
7756 # generates cmov.
7757 config X86_CMOV
7758 def_bool y
7759- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7760+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
7761
7762 config X86_MINIMUM_CPU_FAMILY
7763 int
7764diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
c1e3898a 7765index e46c214..ab62fd1 100644
fe2de317
MT
7766--- a/arch/x86/Kconfig.debug
7767+++ b/arch/x86/Kconfig.debug
5e856224 7768@@ -84,7 +84,7 @@ config X86_PTDUMP
fe2de317
MT
7769 config DEBUG_RODATA
7770 bool "Write protect kernel read-only data structures"
7771 default y
7772- depends on DEBUG_KERNEL
7773+ depends on DEBUG_KERNEL && BROKEN
7774 ---help---
7775 Mark the kernel read-only data as write-protected in the pagetables,
7776 in order to catch accidental (and incorrect) writes to such const
5e856224 7777@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
fe2de317
MT
7778
7779 config DEBUG_SET_MODULE_RONX
7780 bool "Set loadable kernel module data as NX and text as RO"
7781- depends on MODULES
7782+ depends on MODULES && BROKEN
7783 ---help---
7784 This option helps catch unintended modifications to loadable
7785 kernel module's text and read-only data. It also prevents execution
c1e3898a
MT
7786@@ -275,7 +275,7 @@ config OPTIMIZE_INLINING
7787
7788 config DEBUG_STRICT_USER_COPY_CHECKS
7789 bool "Strict copy size checks"
7790- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
7791+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
7792 ---help---
7793 Enabling this option turns a certain set of sanity checks for user
7794 copy operations into compile time failures.
fe2de317 7795diff --git a/arch/x86/Makefile b/arch/x86/Makefile
c6e2a6c8 7796index b1c611e..2c1a823 100644
fe2de317
MT
7797--- a/arch/x86/Makefile
7798+++ b/arch/x86/Makefile
7799@@ -46,6 +46,7 @@ else
7800 UTS_MACHINE := x86_64
7801 CHECKFLAGS += -D__x86_64__ -m64
7802
7803+ biarch := $(call cc-option,-m64)
7804 KBUILD_AFLAGS += -m64
7805 KBUILD_CFLAGS += -m64
7806
c6e2a6c8 7807@@ -222,3 +223,12 @@ define archhelp
fe2de317
MT
7808 echo ' FDARGS="..." arguments for the booted kernel'
7809 echo ' FDINITRD=file initrd for the booted kernel'
7810 endef
7811+
7812+define OLD_LD
7813+
7814+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
7815+*** Please upgrade your binutils to 2.18 or newer
7816+endef
7817+
7818+archprepare:
7819+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
7820diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
5e856224 7821index 5a747dd..ff7b12c 100644
fe2de317
MT
7822--- a/arch/x86/boot/Makefile
7823+++ b/arch/x86/boot/Makefile
5e856224 7824@@ -64,6 +64,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
fe2de317
MT
7825 $(call cc-option, -fno-stack-protector) \
7826 $(call cc-option, -mpreferred-stack-boundary=2)
7827 KBUILD_CFLAGS += $(call cc-option, -m32)
7828+ifdef CONSTIFY_PLUGIN
7829+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7830+endif
7831 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7832 GCOV_PROFILE := n
7833
7834diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7835index 878e4b9..20537ab 100644
7836--- a/arch/x86/boot/bitops.h
7837+++ b/arch/x86/boot/bitops.h
7838@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
58c5fc13
MT
7839 u8 v;
7840 const u32 *p = (const u32 *)addr;
7841
7842- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7843+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7844 return v;
7845 }
7846
fe2de317 7847@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
58c5fc13
MT
7848
7849 static inline void set_bit(int nr, void *addr)
7850 {
7851- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7852+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7853 }
7854
7855 #endif /* BOOT_BITOPS_H */
fe2de317 7856diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
c6e2a6c8 7857index 18997e5..83d9c67 100644
fe2de317
MT
7858--- a/arch/x86/boot/boot.h
7859+++ b/arch/x86/boot/boot.h
6892158b 7860@@ -85,7 +85,7 @@ static inline void io_delay(void)
58c5fc13
MT
7861 static inline u16 ds(void)
7862 {
7863 u16 seg;
7864- asm("movw %%ds,%0" : "=rm" (seg));
7865+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7866 return seg;
7867 }
7868
fe2de317 7869@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
58c5fc13
MT
7870 static inline int memcmp(const void *s1, const void *s2, size_t len)
7871 {
7872 u8 diff;
7873- asm("repe; cmpsb; setnz %0"
7874+ asm volatile("repe; cmpsb; setnz %0"
7875 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7876 return diff;
7877 }
fe2de317 7878diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
c6e2a6c8 7879index e398bb5..3a382ca 100644
fe2de317
MT
7880--- a/arch/x86/boot/compressed/Makefile
7881+++ b/arch/x86/boot/compressed/Makefile
7882@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7883 KBUILD_CFLAGS += $(cflags-y)
7884 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7885 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7886+ifdef CONSTIFY_PLUGIN
7887+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7888+endif
7889
7890 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7891 GCOV_PROFILE := n
c6e2a6c8
MT
7892diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
7893index 0cdfc0d..6e79437 100644
7894--- a/arch/x86/boot/compressed/eboot.c
7895+++ b/arch/x86/boot/compressed/eboot.c
7896@@ -122,7 +122,6 @@ again:
7897 *addr = max_addr;
7898 }
7899
7900-free_pool:
7901 efi_call_phys1(sys_table->boottime->free_pool, map);
7902
7903 fail:
7904@@ -186,7 +185,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
7905 if (i == map_size / desc_size)
7906 status = EFI_NOT_FOUND;
7907
7908-free_pool:
7909 efi_call_phys1(sys_table->boottime->free_pool, map);
7910 fail:
7911 return status;
fe2de317 7912diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
5e856224 7913index c85e3ac..6f5aa80 100644
fe2de317
MT
7914--- a/arch/x86/boot/compressed/head_32.S
7915+++ b/arch/x86/boot/compressed/head_32.S
5e856224 7916@@ -106,7 +106,7 @@ preferred_addr:
58c5fc13
MT
7917 notl %eax
7918 andl %eax, %ebx
7919 #else
7920- movl $LOAD_PHYSICAL_ADDR, %ebx
7921+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7922 #endif
7923
7924 /* Target address to relocate to for decompression */
5e856224 7925@@ -192,7 +192,7 @@ relocated:
58c5fc13
MT
7926 * and where it was actually loaded.
7927 */
7928 movl %ebp, %ebx
7929- subl $LOAD_PHYSICAL_ADDR, %ebx
7930+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7931 jz 2f /* Nothing to be done if loaded at compiled addr. */
7932 /*
7933 * Process relocations.
5e856224 7934@@ -200,8 +200,7 @@ relocated:
58c5fc13
MT
7935
7936 1: subl $4, %edi
7937 movl (%edi), %ecx
7938- testl %ecx, %ecx
7939- jz 2f
7940+ jecxz 2f
7941 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7942 jmp 1b
7943 2:
fe2de317 7944diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
5e856224 7945index 87e03a1..0d94c76 100644
fe2de317
MT
7946--- a/arch/x86/boot/compressed/head_64.S
7947+++ b/arch/x86/boot/compressed/head_64.S
ae4e228f 7948@@ -91,7 +91,7 @@ ENTRY(startup_32)
58c5fc13
MT
7949 notl %eax
7950 andl %eax, %ebx
7951 #else
7952- movl $LOAD_PHYSICAL_ADDR, %ebx
7953+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7954 #endif
7955
7956 /* Target address to relocate to for decompression */
5e856224 7957@@ -263,7 +263,7 @@ preferred_addr:
58c5fc13
MT
7958 notq %rax
7959 andq %rax, %rbp
7960 #else
7961- movq $LOAD_PHYSICAL_ADDR, %rbp
7962+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7963 #endif
7964
7965 /* Target address to relocate to for decompression */
fe2de317 7966diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
5e856224 7967index 7116dcb..d9ae1d7 100644
fe2de317
MT
7968--- a/arch/x86/boot/compressed/misc.c
7969+++ b/arch/x86/boot/compressed/misc.c
16454cff 7970@@ -310,7 +310,7 @@ static void parse_elf(void *output)
58c5fc13
MT
7971 case PT_LOAD:
7972 #ifdef CONFIG_RELOCATABLE
7973 dest = output;
7974- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7975+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7976 #else
7977 dest = (void *)(phdr->p_paddr);
7978 #endif
5e856224 7979@@ -365,7 +365,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
58c5fc13
MT
7980 error("Destination address too large");
7981 #endif
7982 #ifndef CONFIG_RELOCATABLE
7983- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7984+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7985 error("Wrong destination address");
7986 #endif
7987
fe2de317
MT
7988diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7989index 4d3ff03..e4972ff 100644
7990--- a/arch/x86/boot/cpucheck.c
7991+++ b/arch/x86/boot/cpucheck.c
58c5fc13
MT
7992@@ -74,7 +74,7 @@ static int has_fpu(void)
7993 u16 fcw = -1, fsw = -1;
7994 u32 cr0;
7995
7996- asm("movl %%cr0,%0" : "=r" (cr0));
7997+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7998 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7999 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
8000 asm volatile("movl %0,%%cr0" : : "r" (cr0));
8001@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
8002 {
8003 u32 f0, f1;
8004
8005- asm("pushfl ; "
8006+ asm volatile("pushfl ; "
8007 "pushfl ; "
8008 "popl %0 ; "
8009 "movl %0,%1 ; "
8010@@ -115,7 +115,7 @@ static void get_flags(void)
8011 set_bit(X86_FEATURE_FPU, cpu.flags);
8012
8013 if (has_eflag(X86_EFLAGS_ID)) {
8014- asm("cpuid"
8015+ asm volatile("cpuid"
8016 : "=a" (max_intel_level),
8017 "=b" (cpu_vendor[0]),
8018 "=d" (cpu_vendor[1]),
8019@@ -124,7 +124,7 @@ static void get_flags(void)
8020
8021 if (max_intel_level >= 0x00000001 &&
8022 max_intel_level <= 0x0000ffff) {
8023- asm("cpuid"
8024+ asm volatile("cpuid"
8025 : "=a" (tfms),
8026 "=c" (cpu.flags[4]),
8027 "=d" (cpu.flags[0])
8028@@ -136,7 +136,7 @@ static void get_flags(void)
8029 cpu.model += ((tfms >> 16) & 0xf) << 4;
8030 }
8031
8032- asm("cpuid"
8033+ asm volatile("cpuid"
8034 : "=a" (max_amd_level)
8035 : "a" (0x80000000)
8036 : "ebx", "ecx", "edx");
8037@@ -144,7 +144,7 @@ static void get_flags(void)
8038 if (max_amd_level >= 0x80000001 &&
8039 max_amd_level <= 0x8000ffff) {
8040 u32 eax = 0x80000001;
8041- asm("cpuid"
8042+ asm volatile("cpuid"
8043 : "+a" (eax),
8044 "=c" (cpu.flags[6]),
8045 "=d" (cpu.flags[1])
fe2de317 8046@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
58c5fc13
MT
8047 u32 ecx = MSR_K7_HWCR;
8048 u32 eax, edx;
8049
8050- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8051+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8052 eax &= ~(1 << 15);
8053- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8054+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8055
8056 get_flags(); /* Make sure it really did something */
8057 err = check_flags();
fe2de317 8058@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
58c5fc13
MT
8059 u32 ecx = MSR_VIA_FCR;
8060 u32 eax, edx;
8061
8062- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8063+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8064 eax |= (1<<1)|(1<<7);
8065- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8066+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8067
8068 set_bit(X86_FEATURE_CX8, cpu.flags);
8069 err = check_flags();
fe2de317 8070@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
58c5fc13
MT
8071 u32 eax, edx;
8072 u32 level = 1;
8073
8074- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8075- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8076- asm("cpuid"
8077+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
8078+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
8079+ asm volatile("cpuid"
8080 : "+a" (level), "=d" (cpu.flags[0])
8081 : : "ecx", "ebx");
8082- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8083+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
8084
8085 err = check_flags();
8086 }
fe2de317 8087diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
5e856224 8088index f1bbeeb..aff09cb 100644
fe2de317
MT
8089--- a/arch/x86/boot/header.S
8090+++ b/arch/x86/boot/header.S
5e856224 8091@@ -372,7 +372,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
58c5fc13
MT
8092 # single linked list of
8093 # struct setup_data
8094
8095-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
8096+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
8097
8098 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
8099 #define VO_INIT_SIZE (VO__end - VO__text)
fe2de317
MT
8100diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
8101index db75d07..8e6d0af 100644
8102--- a/arch/x86/boot/memory.c
8103+++ b/arch/x86/boot/memory.c
df50ba0c
MT
8104@@ -19,7 +19,7 @@
8105
8106 static int detect_memory_e820(void)
8107 {
8108- int count = 0;
8109+ unsigned int count = 0;
8110 struct biosregs ireg, oreg;
8111 struct e820entry *desc = boot_params.e820_map;
8112 static struct e820entry buf; /* static so it is zeroed */
fe2de317
MT
8113diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
8114index 11e8c6e..fdbb1ed 100644
8115--- a/arch/x86/boot/video-vesa.c
8116+++ b/arch/x86/boot/video-vesa.c
8117@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
8118
8119 boot_params.screen_info.vesapm_seg = oreg.es;
8120 boot_params.screen_info.vesapm_off = oreg.di;
8121+ boot_params.screen_info.vesapm_size = oreg.cx;
8122 }
8123
8124 /*
8125diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
8126index 43eda28..5ab5fdb 100644
8127--- a/arch/x86/boot/video.c
8128+++ b/arch/x86/boot/video.c
df50ba0c
MT
8129@@ -96,7 +96,7 @@ static void store_mode_params(void)
8130 static unsigned int get_entry(void)
8131 {
8132 char entry_buf[4];
8133- int i, len = 0;
8134+ unsigned int i, len = 0;
8135 int key;
8136 unsigned int v;
8137
fe2de317
MT
8138diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
8139index 5b577d5..3c1fed4 100644
8140--- a/arch/x86/crypto/aes-x86_64-asm_64.S
8141+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
6e9df6a3
MT
8142@@ -8,6 +8,8 @@
8143 * including this sentence is retained in full.
8144 */
8145
8146+#include <asm/alternative-asm.h>
8147+
8148 .extern crypto_ft_tab
8149 .extern crypto_it_tab
8150 .extern crypto_fl_tab
8151@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
15a11c5b
MT
8152 je B192; \
8153 leaq 32(r9),r9;
8154
fe2de317 8155+#define ret pax_force_retaddr 0, 1; ret
15a11c5b
MT
8156+
8157 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
8158 movq r1,r2; \
8159 movq r3,r4; \
fe2de317 8160diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
c6e2a6c8 8161index 3470624..201259d 100644
fe2de317
MT
8162--- a/arch/x86/crypto/aesni-intel_asm.S
8163+++ b/arch/x86/crypto/aesni-intel_asm.S
8164@@ -31,6 +31,7 @@
8165
8166 #include <linux/linkage.h>
8167 #include <asm/inst.h>
8168+#include <asm/alternative-asm.h>
8169
8170 #ifdef __x86_64__
8171 .data
8172@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
8173 pop %r14
8174 pop %r13
8175 pop %r12
8176+ pax_force_retaddr 0, 1
8177 ret
8178+ENDPROC(aesni_gcm_dec)
8179
8180
8181 /*****************************************************************************
8182@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
8183 pop %r14
8184 pop %r13
8185 pop %r12
8186+ pax_force_retaddr 0, 1
8187 ret
8188+ENDPROC(aesni_gcm_enc)
8189
8190 #endif
8191
8192@@ -1714,6 +1719,7 @@ _key_expansion_256a:
8193 pxor %xmm1, %xmm0
8194 movaps %xmm0, (TKEYP)
8195 add $0x10, TKEYP
8196+ pax_force_retaddr_bts
8197 ret
8198
8199 .align 4
8200@@ -1738,6 +1744,7 @@ _key_expansion_192a:
8201 shufps $0b01001110, %xmm2, %xmm1
8202 movaps %xmm1, 0x10(TKEYP)
8203 add $0x20, TKEYP
8204+ pax_force_retaddr_bts
8205 ret
8206
8207 .align 4
8208@@ -1757,6 +1764,7 @@ _key_expansion_192b:
8209
8210 movaps %xmm0, (TKEYP)
8211 add $0x10, TKEYP
8212+ pax_force_retaddr_bts
8213 ret
8214
8215 .align 4
8216@@ -1769,6 +1777,7 @@ _key_expansion_256b:
8217 pxor %xmm1, %xmm2
8218 movaps %xmm2, (TKEYP)
8219 add $0x10, TKEYP
8220+ pax_force_retaddr_bts
8221 ret
8222
8223 /*
8224@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
8225 #ifndef __x86_64__
8226 popl KEYP
8227 #endif
8228+ pax_force_retaddr 0, 1
8229 ret
8230+ENDPROC(aesni_set_key)
8231
8232 /*
8233 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
8234@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
8235 popl KLEN
8236 popl KEYP
8237 #endif
8238+ pax_force_retaddr 0, 1
8239 ret
8240+ENDPROC(aesni_enc)
8241
8242 /*
8243 * _aesni_enc1: internal ABI
8244@@ -1959,6 +1972,7 @@ _aesni_enc1:
8245 AESENC KEY STATE
8246 movaps 0x70(TKEYP), KEY
8247 AESENCLAST KEY STATE
8248+ pax_force_retaddr_bts
8249 ret
8250
8251 /*
8252@@ -2067,6 +2081,7 @@ _aesni_enc4:
8253 AESENCLAST KEY STATE2
8254 AESENCLAST KEY STATE3
8255 AESENCLAST KEY STATE4
8256+ pax_force_retaddr_bts
8257 ret
8258
8259 /*
8260@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
8261 popl KLEN
8262 popl KEYP
8263 #endif
8264+ pax_force_retaddr 0, 1
8265 ret
8266+ENDPROC(aesni_dec)
8267
8268 /*
8269 * _aesni_dec1: internal ABI
8270@@ -2146,6 +2163,7 @@ _aesni_dec1:
8271 AESDEC KEY STATE
8272 movaps 0x70(TKEYP), KEY
8273 AESDECLAST KEY STATE
8274+ pax_force_retaddr_bts
8275 ret
8276
8277 /*
8278@@ -2254,6 +2272,7 @@ _aesni_dec4:
8279 AESDECLAST KEY STATE2
8280 AESDECLAST KEY STATE3
8281 AESDECLAST KEY STATE4
8282+ pax_force_retaddr_bts
8283 ret
8284
8285 /*
8286@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
8287 popl KEYP
8288 popl LEN
8289 #endif
8290+ pax_force_retaddr 0, 1
8291 ret
8292+ENDPROC(aesni_ecb_enc)
8293
8294 /*
8295 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8296@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
8297 popl KEYP
8298 popl LEN
8299 #endif
8300+ pax_force_retaddr 0, 1
8301 ret
8302+ENDPROC(aesni_ecb_dec)
8303
8304 /*
8305 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
8306@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
8307 popl LEN
8308 popl IVP
8309 #endif
8310+ pax_force_retaddr 0, 1
8311 ret
8312+ENDPROC(aesni_cbc_enc)
8313
8314 /*
8315 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
c6e2a6c8 8316@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
fe2de317
MT
8317 popl LEN
8318 popl IVP
8319 #endif
8320+ pax_force_retaddr 0, 1
8321 ret
8322+ENDPROC(aesni_cbc_dec)
8323
8324 #ifdef __x86_64__
8325 .align 16
c6e2a6c8 8326@@ -2526,6 +2553,7 @@ _aesni_inc_init:
fe2de317
MT
8327 mov $1, TCTR_LOW
8328 MOVQ_R64_XMM TCTR_LOW INC
8329 MOVQ_R64_XMM CTR TCTR_LOW
8330+ pax_force_retaddr_bts
8331 ret
8332
8333 /*
c6e2a6c8 8334@@ -2554,6 +2582,7 @@ _aesni_inc:
fe2de317
MT
8335 .Linc_low:
8336 movaps CTR, IV
8337 PSHUFB_XMM BSWAP_MASK IV
8338+ pax_force_retaddr_bts
8339 ret
8340
8341 /*
c6e2a6c8 8342@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
fe2de317
MT
8343 .Lctr_enc_ret:
8344 movups IV, (IVP)
8345 .Lctr_enc_just_ret:
8346+ pax_force_retaddr 0, 1
8347 ret
8348+ENDPROC(aesni_ctr_enc)
8349 #endif
4c928ab7
MT
8350diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8351index 391d245..67f35c2 100644
8352--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
8353+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
8354@@ -20,6 +20,8 @@
8355 *
8356 */
8357
8358+#include <asm/alternative-asm.h>
8359+
8360 .file "blowfish-x86_64-asm.S"
8361 .text
8362
8363@@ -151,9 +153,11 @@ __blowfish_enc_blk:
8364 jnz __enc_xor;
8365
8366 write_block();
8367+ pax_force_retaddr 0, 1
8368 ret;
8369 __enc_xor:
8370 xor_block();
8371+ pax_force_retaddr 0, 1
8372 ret;
8373
8374 .align 8
8375@@ -188,6 +192,7 @@ blowfish_dec_blk:
8376
8377 movq %r11, %rbp;
8378
8379+ pax_force_retaddr 0, 1
8380 ret;
8381
8382 /**********************************************************************
8383@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
8384
8385 popq %rbx;
8386 popq %rbp;
8387+ pax_force_retaddr 0, 1
8388 ret;
8389
8390 __enc_xor4:
8391@@ -349,6 +355,7 @@ __enc_xor4:
8392
8393 popq %rbx;
8394 popq %rbp;
8395+ pax_force_retaddr 0, 1
8396 ret;
8397
8398 .align 8
8399@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
8400 popq %rbx;
8401 popq %rbp;
8402
8403+ pax_force_retaddr 0, 1
8404 ret;
8405
c6e2a6c8
MT
8406diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
8407index 0b33743..7a56206 100644
8408--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
8409+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
8410@@ -20,6 +20,8 @@
8411 *
8412 */
8413
8414+#include <asm/alternative-asm.h>
8415+
8416 .file "camellia-x86_64-asm_64.S"
8417 .text
8418
8419@@ -229,12 +231,14 @@ __enc_done:
8420 enc_outunpack(mov, RT1);
8421
8422 movq RRBP, %rbp;
8423+ pax_force_retaddr 0, 1
8424 ret;
8425
8426 __enc_xor:
8427 enc_outunpack(xor, RT1);
8428
8429 movq RRBP, %rbp;
8430+ pax_force_retaddr 0, 1
8431 ret;
8432
8433 .global camellia_dec_blk;
8434@@ -275,6 +279,7 @@ __dec_rounds16:
8435 dec_outunpack();
8436
8437 movq RRBP, %rbp;
8438+ pax_force_retaddr 0, 1
8439 ret;
8440
8441 /**********************************************************************
8442@@ -468,6 +473,7 @@ __enc2_done:
8443
8444 movq RRBP, %rbp;
8445 popq %rbx;
8446+ pax_force_retaddr 0, 1
8447 ret;
8448
8449 __enc2_xor:
8450@@ -475,6 +481,7 @@ __enc2_xor:
8451
8452 movq RRBP, %rbp;
8453 popq %rbx;
8454+ pax_force_retaddr 0, 1
8455 ret;
8456
8457 .global camellia_dec_blk_2way;
8458@@ -517,4 +524,5 @@ __dec2_rounds16:
8459
8460 movq RRBP, %rbp;
8461 movq RXOR, %rbx;
8462+ pax_force_retaddr 0, 1
8463 ret;
fe2de317
MT
8464diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
8465index 6214a9b..1f4fc9a 100644
8466--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
8467+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
6e9df6a3
MT
8468@@ -1,3 +1,5 @@
8469+#include <asm/alternative-asm.h>
8470+
8471 # enter ECRYPT_encrypt_bytes
8472 .text
8473 .p2align 5
8474@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
15a11c5b
MT
8475 add %r11,%rsp
8476 mov %rdi,%rax
8477 mov %rsi,%rdx
fe2de317 8478+ pax_force_retaddr 0, 1
15a11c5b
MT
8479 ret
8480 # bytesatleast65:
8481 ._bytesatleast65:
6e9df6a3 8482@@ -891,6 +894,7 @@ ECRYPT_keysetup:
15a11c5b
MT
8483 add %r11,%rsp
8484 mov %rdi,%rax
8485 mov %rsi,%rdx
6e9df6a3 8486+ pax_force_retaddr
15a11c5b
MT
8487 ret
8488 # enter ECRYPT_ivsetup
8489 .text
6e9df6a3 8490@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
15a11c5b
MT
8491 add %r11,%rsp
8492 mov %rdi,%rax
8493 mov %rsi,%rdx
6e9df6a3 8494+ pax_force_retaddr
15a11c5b 8495 ret
5e856224 8496diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
c6e2a6c8 8497index 3ee1ff0..cbc568b 100644
5e856224
MT
8498--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8499+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
8500@@ -24,6 +24,8 @@
8501 *
8502 */
8503
8504+#include <asm/alternative-asm.h>
8505+
8506 .file "serpent-sse2-x86_64-asm_64.S"
8507 .text
8508
c6e2a6c8 8509@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
5e856224
MT
8510 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8511 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8512
8513+ pax_force_retaddr
8514 ret;
8515
8516 __enc_xor8:
8517 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
8518 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
8519
8520+ pax_force_retaddr
8521 ret;
8522
8523 .align 8
c6e2a6c8 8524@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
5e856224
MT
8525 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
8526 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
8527
8528+ pax_force_retaddr
8529 ret;
4c928ab7
MT
8530diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
8531index b2c2f57..8470cab 100644
8532--- a/arch/x86/crypto/sha1_ssse3_asm.S
8533+++ b/arch/x86/crypto/sha1_ssse3_asm.S
8534@@ -28,6 +28,8 @@
8535 * (at your option) any later version.
8536 */
8537
8538+#include <asm/alternative-asm.h>
8539+
8540 #define CTX %rdi // arg1
8541 #define BUF %rsi // arg2
8542 #define CNT %rdx // arg3
8543@@ -104,6 +106,7 @@
8544 pop %r12
8545 pop %rbp
8546 pop %rbx
8547+ pax_force_retaddr 0, 1
8548 ret
8549
8550 .size \name, .-\name
8551diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8552index 5b012a2..36d5364 100644
8553--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8554+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
8555@@ -20,6 +20,8 @@
8556 *
8557 */
8558
8559+#include <asm/alternative-asm.h>
8560+
8561 .file "twofish-x86_64-asm-3way.S"
8562 .text
8563
8564@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
8565 popq %r13;
8566 popq %r14;
8567 popq %r15;
8568+ pax_force_retaddr 0, 1
8569 ret;
8570
8571 __enc_xor3:
8572@@ -271,6 +274,7 @@ __enc_xor3:
8573 popq %r13;
8574 popq %r14;
8575 popq %r15;
8576+ pax_force_retaddr 0, 1
8577 ret;
8578
8579 .global twofish_dec_blk_3way
8580@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
8581 popq %r13;
8582 popq %r14;
8583 popq %r15;
8584+ pax_force_retaddr 0, 1
8585 ret;
8586
fe2de317 8587diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
4c928ab7 8588index 7bcf3fc..f53832f 100644
fe2de317
MT
8589--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
8590+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
6e9df6a3
MT
8591@@ -21,6 +21,7 @@
8592 .text
8593
8594 #include <asm/asm-offsets.h>
8595+#include <asm/alternative-asm.h>
8596
8597 #define a_offset 0
8598 #define b_offset 4
4c928ab7 8599@@ -268,6 +269,7 @@ twofish_enc_blk:
15a11c5b
MT
8600
8601 popq R1
8602 movq $1,%rax
fe2de317 8603+ pax_force_retaddr 0, 1
15a11c5b
MT
8604 ret
8605
8606 twofish_dec_blk:
4c928ab7 8607@@ -319,4 +321,5 @@ twofish_dec_blk:
15a11c5b
MT
8608
8609 popq R1
8610 movq $1,%rax
fe2de317 8611+ pax_force_retaddr 0, 1
15a11c5b 8612 ret
fe2de317 8613diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
c6e2a6c8 8614index 07b3a68..bd2a388 100644
fe2de317
MT
8615--- a/arch/x86/ia32/ia32_aout.c
8616+++ b/arch/x86/ia32/ia32_aout.c
c6e2a6c8 8617@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
6892158b
MT
8618 unsigned long dump_start, dump_size;
8619 struct user32 dump;
8620
8621+ memset(&dump, 0, sizeof(dump));
8622+
8623 fs = get_fs();
8624 set_fs(KERNEL_DS);
8625 has_dumped = 1;
fe2de317 8626diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
572b4308 8627index 4f5bfac..e1ef0d3 100644
fe2de317
MT
8628--- a/arch/x86/ia32/ia32_signal.c
8629+++ b/arch/x86/ia32/ia32_signal.c
c6e2a6c8 8630@@ -168,7 +168,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
6e9df6a3
MT
8631 }
8632 seg = get_fs();
8633 set_fs(KERNEL_DS);
8634- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
8635+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
8636 set_fs(seg);
8637 if (ret >= 0 && uoss_ptr) {
8638 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
c6e2a6c8 8639@@ -369,7 +369,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
6e9df6a3
MT
8640 */
8641 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
8642 size_t frame_size,
8643- void **fpstate)
8644+ void __user **fpstate)
8645 {
8646 unsigned long sp;
8647
c6e2a6c8 8648@@ -390,7 +390,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
6e9df6a3
MT
8649
8650 if (used_math()) {
8651 sp = sp - sig_xstate_ia32_size;
8652- *fpstate = (struct _fpstate_ia32 *) sp;
8653+ *fpstate = (struct _fpstate_ia32 __user *) sp;
8654 if (save_i387_xstate_ia32(*fpstate) < 0)
8655 return (void __user *) -1L;
8656 }
c6e2a6c8 8657@@ -398,7 +398,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
57199397
MT
8658 sp -= frame_size;
8659 /* Align the stack pointer according to the i386 ABI,
8660 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
8661- sp = ((sp + 4) & -16ul) - 4;
8662+ sp = ((sp - 12) & -16ul) - 4;
8663 return (void __user *) sp;
8664 }
8665
c6e2a6c8 8666@@ -456,7 +456,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
bc901d79
MT
8667 * These are actually not used anymore, but left because some
8668 * gdb versions depend on them as a marker.
8669 */
8670- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6e9df6a3 8671+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
bc901d79
MT
8672 } put_user_catch(err);
8673
8674 if (err)
c6e2a6c8 8675@@ -498,7 +498,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
57199397
MT
8676 0xb8,
8677 __NR_ia32_rt_sigreturn,
8678 0x80cd,
8679- 0,
8680+ 0
8681 };
8682
8683 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
c6e2a6c8 8684@@ -528,16 +528,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
6892158b
MT
8685
8686 if (ka->sa.sa_flags & SA_RESTORER)
8687 restorer = ka->sa.sa_restorer;
8688+ else if (current->mm->context.vdso)
8689+ /* Return stub is in 32bit vsyscall page */
8690+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
8691 else
8692- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
8693- rt_sigreturn);
8694+ restorer = &frame->retcode;
8695 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
8696
8697 /*
bc901d79
MT
8698 * Not actually used anymore, but left because some gdb
8699 * versions need it.
8700 */
8701- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6e9df6a3 8702+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
bc901d79
MT
8703 } put_user_catch(err);
8704
8705 if (err)
fe2de317 8706diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
5e856224 8707index e3e7340..05ed805 100644
fe2de317
MT
8708--- a/arch/x86/ia32/ia32entry.S
8709+++ b/arch/x86/ia32/ia32entry.S
5e856224 8710@@ -13,8 +13,10 @@
fe2de317
MT
8711 #include <asm/thread_info.h>
8712 #include <asm/segment.h>
8713 #include <asm/irqflags.h>
8714+#include <asm/pgtable.h>
8715 #include <linux/linkage.h>
5e856224 8716 #include <linux/err.h>
fe2de317
MT
8717+#include <asm/alternative-asm.h>
8718
8719 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
8720 #include <linux/elf-em.h>
5e856224 8721@@ -94,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
fe2de317
MT
8722 ENDPROC(native_irq_enable_sysexit)
8723 #endif
8724
8725+ .macro pax_enter_kernel_user
8726+ pax_set_fptr_mask
8727+#ifdef CONFIG_PAX_MEMORY_UDEREF
8728+ call pax_enter_kernel_user
8729+#endif
8730+ .endm
8731+
8732+ .macro pax_exit_kernel_user
8733+#ifdef CONFIG_PAX_MEMORY_UDEREF
8734+ call pax_exit_kernel_user
8735+#endif
8736+#ifdef CONFIG_PAX_RANDKSTACK
8737+ pushq %rax
4c928ab7 8738+ pushq %r11
fe2de317 8739+ call pax_randomize_kstack
4c928ab7 8740+ popq %r11
fe2de317
MT
8741+ popq %rax
8742+#endif
8743+ .endm
8744+
8745+.macro pax_erase_kstack
8746+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
8747+ call pax_erase_kstack
8748+#endif
8749+.endm
8750+
8751 /*
8752 * 32bit SYSENTER instruction entry.
8753 *
5e856224 8754@@ -120,12 +148,6 @@ ENTRY(ia32_sysenter_target)
fe2de317
MT
8755 CFI_REGISTER rsp,rbp
8756 SWAPGS_UNSAFE_STACK
8757 movq PER_CPU_VAR(kernel_stack), %rsp
8758- addq $(KERNEL_STACK_OFFSET),%rsp
8759- /*
8760- * No need to follow this irqs on/off section: the syscall
8761- * disabled irqs, here we enable it straight after entry:
8762- */
8763- ENABLE_INTERRUPTS(CLBR_NONE)
8764 movl %ebp,%ebp /* zero extension */
8765 pushq_cfi $__USER32_DS
8766 /*CFI_REL_OFFSET ss,0*/
5e856224 8767@@ -133,24 +155,39 @@ ENTRY(ia32_sysenter_target)
fe2de317
MT
8768 CFI_REL_OFFSET rsp,0
8769 pushfq_cfi
8770 /*CFI_REL_OFFSET rflags,0*/
5e856224 8771- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
fe2de317 8772- CFI_REGISTER rip,r10
4c928ab7 8773+ orl $X86_EFLAGS_IF,(%rsp)
fe2de317
MT
8774+ GET_THREAD_INFO(%r11)
8775+ movl TI_sysenter_return(%r11), %r11d
8776+ CFI_REGISTER rip,r11
8777 pushq_cfi $__USER32_CS
8778 /*CFI_REL_OFFSET cs,0*/
8779 movl %eax, %eax
8780- pushq_cfi %r10
8781+ pushq_cfi %r11
8782 CFI_REL_OFFSET rip,0
8783 pushq_cfi %rax
8784 cld
8785 SAVE_ARGS 0,1,0
8786+ pax_enter_kernel_user
8787+ /*
8788+ * No need to follow this irqs on/off section: the syscall
8789+ * disabled irqs, here we enable it straight after entry:
8790+ */
8791+ ENABLE_INTERRUPTS(CLBR_NONE)
8792 /* no need to do an access_ok check here because rbp has been
8793 32bit zero extended */
8794+
8795+#ifdef CONFIG_PAX_MEMORY_UDEREF
8796+ mov $PAX_USER_SHADOW_BASE,%r11
8797+ add %r11,%rbp
8798+#endif
8799+
8800 1: movl (%rbp),%ebp
8801 .section __ex_table,"a"
8802 .quad 1b,ia32_badarg
8803 .previous
5e856224
MT
8804- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8805- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8806+ GET_THREAD_INFO(%r11)
8807+ orl $TS_COMPAT,TI_status(%r11)
8808+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8809 CFI_REMEMBER_STATE
8810 jnz sysenter_tracesys
8811 cmpq $(IA32_NR_syscalls-1),%rax
5e856224 8812@@ -160,12 +197,15 @@ sysenter_do_call:
fe2de317
MT
8813 sysenter_dispatch:
8814 call *ia32_sys_call_table(,%rax,8)
8815 movq %rax,RAX-ARGOFFSET(%rsp)
fe2de317
MT
8816+ GET_THREAD_INFO(%r11)
8817 DISABLE_INTERRUPTS(CLBR_NONE)
8818 TRACE_IRQS_OFF
5e856224 8819- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8820+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8821 jnz sysexit_audit
8822 sysexit_from_sys_call:
5e856224 8823- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8824+ pax_exit_kernel_user
8825+ pax_erase_kstack
8826+ andl $~TS_COMPAT,TI_status(%r11)
8827 /* clear IF, that popfq doesn't enable interrupts early */
8828 andl $~0x200,EFLAGS-R11(%rsp)
8829 movl RIP-R11(%rsp),%edx /* User %eip */
5e856224 8830@@ -191,6 +231,9 @@ sysexit_from_sys_call:
fe2de317
MT
8831 movl %eax,%esi /* 2nd arg: syscall number */
8832 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5e856224 8833 call __audit_syscall_entry
fe2de317
MT
8834+
8835+ pax_erase_kstack
8836+
8837 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
8838 cmpq $(IA32_NR_syscalls-1),%rax
8839 ja ia32_badsys
5e856224 8840@@ -202,7 +245,7 @@ sysexit_from_sys_call:
fe2de317
MT
8841 .endm
8842
8843 .macro auditsys_exit exit
5e856224 8844- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8845+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8846 jnz ia32_ret_from_sys_call
8847 TRACE_IRQS_ON
8848 sti
5e856224
MT
8849@@ -213,11 +256,12 @@ sysexit_from_sys_call:
8850 1: setbe %al /* 1 if error, 0 if not */
fe2de317 8851 movzbl %al,%edi /* zero-extend that into %edi */
5e856224 8852 call __audit_syscall_exit
fe2de317 8853+ GET_THREAD_INFO(%r11)
5e856224 8854 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
fe2de317
MT
8855 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8856 cli
8857 TRACE_IRQS_OFF
5e856224 8858- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8859+ testl %edi,TI_flags(%r11)
8860 jz \exit
8861 CLEAR_RREGS -ARGOFFSET
8862 jmp int_with_check
5e856224 8863@@ -235,7 +279,7 @@ sysexit_audit:
fe2de317
MT
8864
8865 sysenter_tracesys:
8866 #ifdef CONFIG_AUDITSYSCALL
5e856224 8867- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8868+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8869 jz sysenter_auditsys
8870 #endif
8871 SAVE_REST
5e856224 8872@@ -243,6 +287,9 @@ sysenter_tracesys:
fe2de317
MT
8873 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8874 movq %rsp,%rdi /* &pt_regs -> arg1 */
8875 call syscall_trace_enter
8876+
8877+ pax_erase_kstack
8878+
8879 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8880 RESTORE_REST
8881 cmpq $(IA32_NR_syscalls-1),%rax
5e856224 8882@@ -274,19 +321,20 @@ ENDPROC(ia32_sysenter_target)
fe2de317
MT
8883 ENTRY(ia32_cstar_target)
8884 CFI_STARTPROC32 simple
8885 CFI_SIGNAL_FRAME
8886- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8887+ CFI_DEF_CFA rsp,0
8888 CFI_REGISTER rip,rcx
8889 /*CFI_REGISTER rflags,r11*/
8890 SWAPGS_UNSAFE_STACK
8891 movl %esp,%r8d
8892 CFI_REGISTER rsp,r8
8893 movq PER_CPU_VAR(kernel_stack),%rsp
8894+ SAVE_ARGS 8*6,0,0
8895+ pax_enter_kernel_user
8896 /*
8897 * No need to follow this irqs on/off section: the syscall
8898 * disabled irqs and here we enable it straight after entry:
8899 */
8900 ENABLE_INTERRUPTS(CLBR_NONE)
8901- SAVE_ARGS 8,0,0
8902 movl %eax,%eax /* zero extension */
8903 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8904 movq %rcx,RIP-ARGOFFSET(%rsp)
5e856224 8905@@ -302,12 +350,19 @@ ENTRY(ia32_cstar_target)
fe2de317
MT
8906 /* no need to do an access_ok check here because r8 has been
8907 32bit zero extended */
8908 /* hardware stack frame is complete now */
8909+
8910+#ifdef CONFIG_PAX_MEMORY_UDEREF
8911+ mov $PAX_USER_SHADOW_BASE,%r11
8912+ add %r11,%r8
8913+#endif
8914+
8915 1: movl (%r8),%r9d
8916 .section __ex_table,"a"
8917 .quad 1b,ia32_badarg
8918 .previous
5e856224
MT
8919- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8920- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8921+ GET_THREAD_INFO(%r11)
8922+ orl $TS_COMPAT,TI_status(%r11)
8923+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8924 CFI_REMEMBER_STATE
8925 jnz cstar_tracesys
8926 cmpq $IA32_NR_syscalls-1,%rax
5e856224 8927@@ -317,12 +372,15 @@ cstar_do_call:
fe2de317
MT
8928 cstar_dispatch:
8929 call *ia32_sys_call_table(,%rax,8)
8930 movq %rax,RAX-ARGOFFSET(%rsp)
fe2de317
MT
8931+ GET_THREAD_INFO(%r11)
8932 DISABLE_INTERRUPTS(CLBR_NONE)
8933 TRACE_IRQS_OFF
5e856224 8934- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8935+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8936 jnz sysretl_audit
8937 sysretl_from_sys_call:
5e856224 8938- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8939+ pax_exit_kernel_user
8940+ pax_erase_kstack
8941+ andl $~TS_COMPAT,TI_status(%r11)
8942 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
8943 movl RIP-ARGOFFSET(%rsp),%ecx
8944 CFI_REGISTER rip,rcx
5e856224 8945@@ -350,7 +408,7 @@ sysretl_audit:
fe2de317
MT
8946
8947 cstar_tracesys:
8948 #ifdef CONFIG_AUDITSYSCALL
5e856224 8949- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8950+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8951 jz cstar_auditsys
8952 #endif
8953 xchgl %r9d,%ebp
5e856224 8954@@ -359,6 +417,9 @@ cstar_tracesys:
fe2de317
MT
8955 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8956 movq %rsp,%rdi /* &pt_regs -> arg1 */
8957 call syscall_trace_enter
8958+
8959+ pax_erase_kstack
8960+
8961 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8962 RESTORE_REST
8963 xchgl %ebp,%r9d
5e856224 8964@@ -404,19 +465,21 @@ ENTRY(ia32_syscall)
fe2de317
MT
8965 CFI_REL_OFFSET rip,RIP-RIP
8966 PARAVIRT_ADJUST_EXCEPTION_FRAME
8967 SWAPGS
8968- /*
8969- * No need to follow this irqs on/off section: the syscall
8970- * disabled irqs and here we enable it straight after entry:
8971- */
8972- ENABLE_INTERRUPTS(CLBR_NONE)
8973 movl %eax,%eax
8974 pushq_cfi %rax
8975 cld
8976 /* note the registers are not zero extended to the sf.
8977 this could be a problem. */
8978 SAVE_ARGS 0,1,0
5e856224
MT
8979- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
8980- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
fe2de317
MT
8981+ pax_enter_kernel_user
8982+ /*
8983+ * No need to follow this irqs on/off section: the syscall
8984+ * disabled irqs and here we enable it straight after entry:
8985+ */
8986+ ENABLE_INTERRUPTS(CLBR_NONE)
8987+ GET_THREAD_INFO(%r11)
8988+ orl $TS_COMPAT,TI_status(%r11)
8989+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8990 jnz ia32_tracesys
8991 cmpq $(IA32_NR_syscalls-1),%rax
8992 ja ia32_badsys
5e856224 8993@@ -435,6 +498,9 @@ ia32_tracesys:
fe2de317
MT
8994 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8995 movq %rsp,%rdi /* &pt_regs -> arg1 */
8996 call syscall_trace_enter
8997+
8998+ pax_erase_kstack
8999+
9000 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
9001 RESTORE_REST
9002 cmpq $(IA32_NR_syscalls-1),%rax
fe2de317 9003diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
c6e2a6c8 9004index aec2202..f76174e 100644
fe2de317
MT
9005--- a/arch/x86/ia32/sys_ia32.c
9006+++ b/arch/x86/ia32/sys_ia32.c
9007@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
6e9df6a3
MT
9008 */
9009 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
9010 {
9011- typeof(ubuf->st_uid) uid = 0;
9012- typeof(ubuf->st_gid) gid = 0;
9013+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
9014+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
9015 SET_UID(uid, stat->uid);
9016 SET_GID(gid, stat->gid);
9017 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
c6e2a6c8 9018@@ -292,7 +292,7 @@ asmlinkage long sys32_alarm(unsigned int seconds)
6e9df6a3
MT
9019 return alarm_setitimer(seconds);
9020 }
9021
9022-asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr,
9023+asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
9024 int options)
9025 {
9026 return compat_sys_wait4(pid, stat_addr, options, NULL);
c6e2a6c8 9027@@ -313,7 +313,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
6e9df6a3
MT
9028 mm_segment_t old_fs = get_fs();
9029
9030 set_fs(KERNEL_DS);
9031- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
9032+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
9033 set_fs(old_fs);
9034 if (put_compat_timespec(&t, interval))
9035 return -EFAULT;
c6e2a6c8 9036@@ -329,7 +329,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
6e9df6a3
MT
9037 mm_segment_t old_fs = get_fs();
9038
9039 set_fs(KERNEL_DS);
9040- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
9041+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
9042 set_fs(old_fs);
9043 if (!ret) {
9044 switch (_NSIG_WORDS) {
c6e2a6c8 9045@@ -354,7 +354,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
6e9df6a3
MT
9046 if (copy_siginfo_from_user32(&info, uinfo))
9047 return -EFAULT;
9048 set_fs(KERNEL_DS);
9049- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
9050+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
9051 set_fs(old_fs);
9052 return ret;
9053 }
c6e2a6c8 9054@@ -399,7 +399,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
6e9df6a3
MT
9055 return -EFAULT;
9056
9057 set_fs(KERNEL_DS);
9058- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
9059+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
9060 count);
9061 set_fs(old_fs);
9062
fe2de317 9063diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
5e856224 9064index 952bd01..7692c6f 100644
fe2de317
MT
9065--- a/arch/x86/include/asm/alternative-asm.h
9066+++ b/arch/x86/include/asm/alternative-asm.h
9067@@ -15,6 +15,45 @@
6e9df6a3
MT
9068 .endm
9069 #endif
9070
4c928ab7 9071+#ifdef KERNEXEC_PLUGIN
fe2de317
MT
9072+ .macro pax_force_retaddr_bts rip=0
9073+ btsq $63,\rip(%rsp)
9074+ .endm
9075+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
9076+ .macro pax_force_retaddr rip=0, reload=0
6e9df6a3
MT
9077+ btsq $63,\rip(%rsp)
9078+ .endm
9079+ .macro pax_force_fptr ptr
9080+ btsq $63,\ptr
9081+ .endm
fe2de317
MT
9082+ .macro pax_set_fptr_mask
9083+ .endm
9084+#endif
9085+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
9086+ .macro pax_force_retaddr rip=0, reload=0
9087+ .if \reload
9088+ pax_set_fptr_mask
9089+ .endif
9090+ orq %r10,\rip(%rsp)
9091+ .endm
9092+ .macro pax_force_fptr ptr
9093+ orq %r10,\ptr
9094+ .endm
9095+ .macro pax_set_fptr_mask
9096+ movabs $0x8000000000000000,%r10
9097+ .endm
9098+#endif
6e9df6a3 9099+#else
fe2de317 9100+ .macro pax_force_retaddr rip=0, reload=0
6e9df6a3
MT
9101+ .endm
9102+ .macro pax_force_fptr ptr
9103+ .endm
fe2de317
MT
9104+ .macro pax_force_retaddr_bts rip=0
9105+ .endm
9106+ .macro pax_set_fptr_mask
9107+ .endm
6e9df6a3
MT
9108+#endif
9109+
9110 .macro altinstruction_entry orig alt feature orig_len alt_len
9111 .long \orig - .
9112 .long \alt - .
fe2de317 9113diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
c6e2a6c8 9114index 49331be..9706065 100644
fe2de317
MT
9115--- a/arch/x86/include/asm/alternative.h
9116+++ b/arch/x86/include/asm/alternative.h
9117@@ -89,7 +89,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
6892158b 9118 ".section .discard,\"aw\",@progbits\n" \
ae4e228f 9119 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
58c5fc13
MT
9120 ".previous\n" \
9121- ".section .altinstr_replacement, \"ax\"\n" \
9122+ ".section .altinstr_replacement, \"a\"\n" \
9123 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
9124 ".previous"
9125
fe2de317 9126diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
c6e2a6c8 9127index d854101..f6ea947 100644
fe2de317
MT
9128--- a/arch/x86/include/asm/apic.h
9129+++ b/arch/x86/include/asm/apic.h
c6e2a6c8 9130@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
15a11c5b
MT
9131
9132 #ifdef CONFIG_X86_LOCAL_APIC
9133
9134-extern unsigned int apic_verbosity;
9135+extern int apic_verbosity;
9136 extern int local_apic_timer_c2_ok;
9137
9138 extern int disable_apic;
fe2de317
MT
9139diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
9140index 20370c6..a2eb9b0 100644
9141--- a/arch/x86/include/asm/apm.h
9142+++ b/arch/x86/include/asm/apm.h
9143@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
58c5fc13
MT
9144 __asm__ __volatile__(APM_DO_ZERO_SEGS
9145 "pushl %%edi\n\t"
9146 "pushl %%ebp\n\t"
9147- "lcall *%%cs:apm_bios_entry\n\t"
9148+ "lcall *%%ss:apm_bios_entry\n\t"
9149 "setc %%al\n\t"
9150 "popl %%ebp\n\t"
9151 "popl %%edi\n\t"
fe2de317 9152@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
58c5fc13
MT
9153 __asm__ __volatile__(APM_DO_ZERO_SEGS
9154 "pushl %%edi\n\t"
9155 "pushl %%ebp\n\t"
9156- "lcall *%%cs:apm_bios_entry\n\t"
9157+ "lcall *%%ss:apm_bios_entry\n\t"
9158 "setc %%bl\n\t"
9159 "popl %%ebp\n\t"
9160 "popl %%edi\n\t"
fe2de317 9161diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
572b4308 9162index 58cb6d4..a4b806c 100644
fe2de317
MT
9163--- a/arch/x86/include/asm/atomic.h
9164+++ b/arch/x86/include/asm/atomic.h
9165@@ -22,7 +22,18 @@
bc901d79 9166 */
fe2de317 9167 static inline int atomic_read(const atomic_t *v)
bc901d79 9168 {
fe2de317
MT
9169- return (*(volatile int *)&(v)->counter);
9170+ return (*(volatile const int *)&(v)->counter);
bc901d79
MT
9171+}
9172+
9173+/**
fe2de317
MT
9174+ * atomic_read_unchecked - read atomic variable
9175+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
9176+ *
9177+ * Atomically reads the value of @v.
ae4e228f 9178+ */
fe2de317 9179+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
ae4e228f 9180+{
fe2de317 9181+ return (*(volatile const int *)&(v)->counter);
bc901d79
MT
9182 }
9183
9184 /**
fe2de317 9185@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
ae4e228f
MT
9186 }
9187
9188 /**
fe2de317
MT
9189+ * atomic_set_unchecked - set atomic variable
9190+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
9191+ * @i: required value
9192+ *
9193+ * Atomically sets the value of @v to @i.
9194+ */
fe2de317 9195+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
ae4e228f
MT
9196+{
9197+ v->counter = i;
9198+}
9199+
9200+/**
fe2de317 9201 * atomic_add - add integer to atomic variable
ae4e228f 9202 * @i: integer value to add
fe2de317
MT
9203 * @v: pointer of type atomic_t
9204@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
58c5fc13 9205 */
fe2de317 9206 static inline void atomic_add(int i, atomic_t *v)
58c5fc13 9207 {
fe2de317
MT
9208- asm volatile(LOCK_PREFIX "addl %1,%0"
9209+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
58c5fc13
MT
9210+
9211+#ifdef CONFIG_PAX_REFCOUNT
9212+ "jno 0f\n"
fe2de317 9213+ LOCK_PREFIX "subl %1,%0\n"
58c5fc13
MT
9214+ "int $4\n0:\n"
9215+ _ASM_EXTABLE(0b, 0b)
9216+#endif
9217+
fe2de317
MT
9218+ : "+m" (v->counter)
9219+ : "ir" (i));
ae4e228f
MT
9220+}
9221+
9222+/**
fe2de317 9223+ * atomic_add_unchecked - add integer to atomic variable
ae4e228f 9224+ * @i: integer value to add
fe2de317 9225+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
9226+ *
9227+ * Atomically adds @i to @v.
9228+ */
fe2de317 9229+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
ae4e228f 9230+{
fe2de317
MT
9231+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
9232 : "+m" (v->counter)
9233 : "ir" (i));
9234 }
9235@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
58c5fc13 9236 */
fe2de317 9237 static inline void atomic_sub(int i, atomic_t *v)
58c5fc13 9238 {
fe2de317
MT
9239- asm volatile(LOCK_PREFIX "subl %1,%0"
9240+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
58c5fc13
MT
9241+
9242+#ifdef CONFIG_PAX_REFCOUNT
9243+ "jno 0f\n"
fe2de317 9244+ LOCK_PREFIX "addl %1,%0\n"
58c5fc13
MT
9245+ "int $4\n0:\n"
9246+ _ASM_EXTABLE(0b, 0b)
9247+#endif
9248+
fe2de317
MT
9249+ : "+m" (v->counter)
9250+ : "ir" (i));
6892158b
MT
9251+}
9252+
9253+/**
fe2de317 9254+ * atomic_sub_unchecked - subtract integer from atomic variable
6892158b 9255+ * @i: integer value to subtract
fe2de317 9256+ * @v: pointer of type atomic_unchecked_t
6892158b
MT
9257+ *
9258+ * Atomically subtracts @i from @v.
9259+ */
fe2de317 9260+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6892158b 9261+{
fe2de317
MT
9262+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
9263 : "+m" (v->counter)
9264 : "ir" (i));
58c5fc13 9265 }
fe2de317 9266@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
58c5fc13
MT
9267 {
9268 unsigned char c;
9269
fe2de317
MT
9270- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
9271+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
58c5fc13
MT
9272+
9273+#ifdef CONFIG_PAX_REFCOUNT
9274+ "jno 0f\n"
fe2de317 9275+ LOCK_PREFIX "addl %2,%0\n"
58c5fc13
MT
9276+ "int $4\n0:\n"
9277+ _ASM_EXTABLE(0b, 0b)
9278+#endif
9279+
9280+ "sete %1\n"
fe2de317
MT
9281 : "+m" (v->counter), "=qm" (c)
9282 : "ir" (i) : "memory");
58c5fc13 9283 return c;
fe2de317 9284@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
58c5fc13 9285 */
fe2de317 9286 static inline void atomic_inc(atomic_t *v)
58c5fc13 9287 {
fe2de317
MT
9288- asm volatile(LOCK_PREFIX "incl %0"
9289+ asm volatile(LOCK_PREFIX "incl %0\n"
58c5fc13
MT
9290+
9291+#ifdef CONFIG_PAX_REFCOUNT
9292+ "jno 0f\n"
fe2de317 9293+ LOCK_PREFIX "decl %0\n"
6892158b
MT
9294+ "int $4\n0:\n"
9295+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9296+#endif
9297+
fe2de317 9298+ : "+m" (v->counter));
ae4e228f
MT
9299+}
9300+
9301+/**
fe2de317
MT
9302+ * atomic_inc_unchecked - increment atomic variable
9303+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
9304+ *
9305+ * Atomically increments @v by 1.
9306+ */
fe2de317 9307+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
ae4e228f 9308+{
fe2de317
MT
9309+ asm volatile(LOCK_PREFIX "incl %0\n"
9310 : "+m" (v->counter));
9311 }
9312
9313@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
58c5fc13 9314 */
fe2de317 9315 static inline void atomic_dec(atomic_t *v)
58c5fc13 9316 {
fe2de317
MT
9317- asm volatile(LOCK_PREFIX "decl %0"
9318+ asm volatile(LOCK_PREFIX "decl %0\n"
58c5fc13
MT
9319+
9320+#ifdef CONFIG_PAX_REFCOUNT
9321+ "jno 0f\n"
fe2de317 9322+ LOCK_PREFIX "incl %0\n"
6892158b
MT
9323+ "int $4\n0:\n"
9324+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9325+#endif
9326+
fe2de317 9327+ : "+m" (v->counter));
df50ba0c
MT
9328+}
9329+
9330+/**
fe2de317
MT
9331+ * atomic_dec_unchecked - decrement atomic variable
9332+ * @v: pointer of type atomic_unchecked_t
df50ba0c
MT
9333+ *
9334+ * Atomically decrements @v by 1.
9335+ */
fe2de317 9336+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
df50ba0c 9337+{
fe2de317
MT
9338+ asm volatile(LOCK_PREFIX "decl %0\n"
9339 : "+m" (v->counter));
58c5fc13 9340 }
fe2de317
MT
9341
9342@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
58c5fc13
MT
9343 {
9344 unsigned char c;
9345
fe2de317
MT
9346- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9347+ asm volatile(LOCK_PREFIX "decl %0\n"
58c5fc13
MT
9348+
9349+#ifdef CONFIG_PAX_REFCOUNT
9350+ "jno 0f\n"
fe2de317 9351+ LOCK_PREFIX "incl %0\n"
6892158b
MT
9352+ "int $4\n0:\n"
9353+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9354+#endif
9355+
9356+ "sete %1\n"
fe2de317
MT
9357 : "+m" (v->counter), "=qm" (c)
9358 : : "memory");
58c5fc13 9359 return c != 0;
fe2de317 9360@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
58c5fc13
MT
9361 {
9362 unsigned char c;
9363
fe2de317
MT
9364- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9365+ asm volatile(LOCK_PREFIX "incl %0\n"
58c5fc13
MT
9366+
9367+#ifdef CONFIG_PAX_REFCOUNT
9368+ "jno 0f\n"
fe2de317 9369+ LOCK_PREFIX "decl %0\n"
6892158b
MT
9370+ "int $4\n0:\n"
9371+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
9372+#endif
9373+
9374+ "sete %1\n"
fe2de317
MT
9375+ : "+m" (v->counter), "=qm" (c)
9376+ : : "memory");
9377+ return c != 0;
9378+}
9379+
9380+/**
9381+ * atomic_inc_and_test_unchecked - increment and test
9382+ * @v: pointer of type atomic_unchecked_t
9383+ *
9384+ * Atomically increments @v by 1
9385+ * and returns true if the result is zero, or false for all
9386+ * other cases.
9387+ */
9388+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9389+{
9390+ unsigned char c;
9391+
9392+ asm volatile(LOCK_PREFIX "incl %0\n"
9393+ "sete %1\n"
9394 : "+m" (v->counter), "=qm" (c)
9395 : : "memory");
58c5fc13 9396 return c != 0;
fe2de317 9397@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
58c5fc13
MT
9398 {
9399 unsigned char c;
9400
fe2de317
MT
9401- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9402+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
58c5fc13
MT
9403+
9404+#ifdef CONFIG_PAX_REFCOUNT
9405+ "jno 0f\n"
fe2de317 9406+ LOCK_PREFIX "subl %2,%0\n"
58c5fc13
MT
9407+ "int $4\n0:\n"
9408+ _ASM_EXTABLE(0b, 0b)
9409+#endif
9410+
9411+ "sets %1\n"
fe2de317
MT
9412 : "+m" (v->counter), "=qm" (c)
9413 : "ir" (i) : "memory");
58c5fc13 9414 return c;
4c928ab7
MT
9415@@ -179,7 +341,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
9416 goto no_xadd;
fe2de317
MT
9417 #endif
9418 /* Modern 486+ processor */
4c928ab7
MT
9419- return i + xadd(&v->counter, i);
9420+ return i + xadd_check_overflow(&v->counter, i);
9421
9422 #ifdef CONFIG_M386
9423 no_xadd: /* Legacy 386 processor */
9424@@ -192,6 +354,34 @@ no_xadd: /* Legacy 386 processor */
9425 }
9426
9427 /**
fe2de317 9428+ * atomic_add_return_unchecked - add integer and return
ae4e228f 9429+ * @i: integer value to add
4c928ab7 9430+ * @v: pointer of type atomic_unchecked_t
ae4e228f
MT
9431+ *
9432+ * Atomically adds @i to @v and returns @i + @v
9433+ */
fe2de317 9434+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
ae4e228f 9435+{
fe2de317 9436+#ifdef CONFIG_M386
4c928ab7 9437+ int __i;
fe2de317
MT
9438+ unsigned long flags;
9439+ if (unlikely(boot_cpu_data.x86 <= 3))
9440+ goto no_xadd;
9441+#endif
9442+ /* Modern 486+ processor */
4c928ab7
MT
9443+ return i + xadd(&v->counter, i);
9444+
9445+#ifdef CONFIG_M386
9446+no_xadd: /* Legacy 386 processor */
9447+ raw_local_irq_save(flags);
9448+ __i = atomic_read_unchecked(v);
9449+ atomic_set_unchecked(v, i + __i);
9450+ raw_local_irq_restore(flags);
9451+ return i + __i;
9452+#endif
9453+}
9454+
9455+/**
9456 * atomic_sub_return - subtract integer and return
9457 * @v: pointer of type atomic_t
9458 * @i: integer value to subtract
9459@@ -204,6 +394,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
ae4e228f
MT
9460 }
9461
fe2de317
MT
9462 #define atomic_inc_return(v) (atomic_add_return(1, v))
9463+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
57199397 9464+{
fe2de317 9465+ return atomic_add_return_unchecked(1, v);
57199397 9466+}
fe2de317 9467 #define atomic_dec_return(v) (atomic_sub_return(1, v))
ae4e228f 9468
fe2de317 9469 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
4c928ab7 9470@@ -211,11 +405,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
66a7e928
MT
9471 return cmpxchg(&v->counter, old, new);
9472 }
9473
fe2de317 9474+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
66a7e928
MT
9475+{
9476+ return cmpxchg(&v->counter, old, new);
9477+}
9478+
fe2de317 9479 static inline int atomic_xchg(atomic_t *v, int new)
66a7e928
MT
9480 {
9481 return xchg(&v->counter, new);
fe2de317
MT
9482 }
9483
9484+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9485+{
9486+ return xchg(&v->counter, new);
9487+}
9488+
9489 /**
9490 * __atomic_add_unless - add unless the number is already a given value
9491 * @v: pointer of type atomic_t
4c928ab7 9492@@ -227,12 +431,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
58c5fc13 9493 */
fe2de317 9494 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
58c5fc13 9495 {
fe2de317
MT
9496- int c, old;
9497+ int c, old, new;
9498 c = atomic_read(v);
58c5fc13
MT
9499 for (;;) {
9500- if (unlikely(c == (u)))
9501+ if (unlikely(c == u))
9502 break;
fe2de317 9503- old = atomic_cmpxchg((v), c, c + (a));
58c5fc13 9504+
fe2de317 9505+ asm volatile("addl %2,%0\n"
58c5fc13
MT
9506+
9507+#ifdef CONFIG_PAX_REFCOUNT
9508+ "jno 0f\n"
fe2de317 9509+ "subl %2,%0\n"
58c5fc13
MT
9510+ "int $4\n0:\n"
9511+ _ASM_EXTABLE(0b, 0b)
9512+#endif
9513+
9514+ : "=r" (new)
9515+ : "0" (c), "ir" (a));
9516+
fe2de317 9517+ old = atomic_cmpxchg(v, c, new);
58c5fc13
MT
9518 if (likely(old == c))
9519 break;
9520 c = old;
4c928ab7 9521@@ -240,6 +457,48 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
fe2de317 9522 return c;
58c5fc13
MT
9523 }
9524
fe2de317
MT
9525+/**
9526+ * atomic_inc_not_zero_hint - increment if not null
9527+ * @v: pointer of type atomic_t
9528+ * @hint: probable value of the atomic before the increment
9529+ *
9530+ * This version of atomic_inc_not_zero() gives a hint of probable
9531+ * value of the atomic. This helps processor to not read the memory
9532+ * before doing the atomic read/modify/write cycle, lowering
9533+ * number of bus transactions on some arches.
9534+ *
9535+ * Returns: 0 if increment was not done, 1 otherwise.
9536+ */
9537+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
9538+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
9539+{
9540+ int val, c = hint, new;
9541+
9542+ /* sanity test, should be removed by compiler if hint is a constant */
9543+ if (!hint)
9544+ return __atomic_add_unless(v, 1, 0);
9545+
9546+ do {
9547+ asm volatile("incl %0\n"
9548+
9549+#ifdef CONFIG_PAX_REFCOUNT
9550+ "jno 0f\n"
9551+ "decl %0\n"
9552+ "int $4\n0:\n"
9553+ _ASM_EXTABLE(0b, 0b)
9554+#endif
9555+
9556+ : "=r" (new)
9557+ : "0" (c));
9558+
9559+ val = atomic_cmpxchg(v, c, new);
9560+ if (val == c)
9561+ return 1;
9562+ c = val;
9563+ } while (c);
9564+
9565+ return 0;
9566+}
9567
9568 /*
9569 * atomic_dec_if_positive - decrement by 1 if old value positive
572b4308
MT
9570@@ -293,14 +552,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
9571 #endif
9572
9573 /* These are x86-specific, used by some header files */
9574-#define atomic_clear_mask(mask, addr) \
9575- asm volatile(LOCK_PREFIX "andl %0,%1" \
9576- : : "r" (~(mask)), "m" (*(addr)) : "memory")
9577+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
9578+{
9579+ asm volatile(LOCK_PREFIX "andl %1,%0"
9580+ : "+m" (v->counter)
9581+ : "r" (~(mask))
9582+ : "memory");
9583+}
9584
9585-#define atomic_set_mask(mask, addr) \
9586- asm volatile(LOCK_PREFIX "orl %0,%1" \
9587- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
9588- : "memory")
9589+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9590+{
9591+ asm volatile(LOCK_PREFIX "andl %1,%0"
9592+ : "+m" (v->counter)
9593+ : "r" (~(mask))
9594+ : "memory");
9595+}
9596+
9597+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
9598+{
9599+ asm volatile(LOCK_PREFIX "orl %1,%0"
9600+ : "+m" (v->counter)
9601+ : "r" (mask)
9602+ : "memory");
9603+}
9604+
9605+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
9606+{
9607+ asm volatile(LOCK_PREFIX "orl %1,%0"
9608+ : "+m" (v->counter)
9609+ : "r" (mask)
9610+ : "memory");
9611+}
9612
9613 /* Atomic operations are already serializing on x86 */
9614 #define smp_mb__before_atomic_dec() barrier()
fe2de317 9615diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
c6e2a6c8 9616index 1981199..36b9dfb 100644
fe2de317
MT
9617--- a/arch/x86/include/asm/atomic64_32.h
9618+++ b/arch/x86/include/asm/atomic64_32.h
9619@@ -12,6 +12,14 @@ typedef struct {
9620 u64 __aligned(8) counter;
9621 } atomic64_t;
9622
9623+#ifdef CONFIG_PAX_REFCOUNT
9624+typedef struct {
9625+ u64 __aligned(8) counter;
9626+} atomic64_unchecked_t;
9627+#else
9628+typedef atomic64_t atomic64_unchecked_t;
9629+#endif
9630+
9631 #define ATOMIC64_INIT(val) { (val) }
9632
c6e2a6c8
MT
9633 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
9634@@ -37,21 +45,31 @@ typedef struct {
9635 ATOMIC64_DECL_ONE(sym##_386)
9636
9637 ATOMIC64_DECL_ONE(add_386);
9638+ATOMIC64_DECL_ONE(add_unchecked_386);
9639 ATOMIC64_DECL_ONE(sub_386);
9640+ATOMIC64_DECL_ONE(sub_unchecked_386);
9641 ATOMIC64_DECL_ONE(inc_386);
9642+ATOMIC64_DECL_ONE(inc_unchecked_386);
9643 ATOMIC64_DECL_ONE(dec_386);
9644+ATOMIC64_DECL_ONE(dec_unchecked_386);
9645 #endif
9646
9647 #define alternative_atomic64(f, out, in...) \
9648 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
9649
9650 ATOMIC64_DECL(read);
9651+ATOMIC64_DECL(read_unchecked);
9652 ATOMIC64_DECL(set);
9653+ATOMIC64_DECL(set_unchecked);
9654 ATOMIC64_DECL(xchg);
9655 ATOMIC64_DECL(add_return);
9656+ATOMIC64_DECL(add_return_unchecked);
9657 ATOMIC64_DECL(sub_return);
9658+ATOMIC64_DECL(sub_return_unchecked);
9659 ATOMIC64_DECL(inc_return);
9660+ATOMIC64_DECL(inc_return_unchecked);
9661 ATOMIC64_DECL(dec_return);
9662+ATOMIC64_DECL(dec_return_unchecked);
9663 ATOMIC64_DECL(dec_if_positive);
9664 ATOMIC64_DECL(inc_not_zero);
9665 ATOMIC64_DECL(add_unless);
9666@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
fe2de317
MT
9667 }
9668
9669 /**
9670+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
9671+ * @p: pointer to type atomic64_unchecked_t
9672+ * @o: expected value
9673+ * @n: new value
9674+ *
9675+ * Atomically sets @v to @n if it was equal to @o and returns
9676+ * the old value.
9677+ */
9678+
9679+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
9680+{
9681+ return cmpxchg64(&v->counter, o, n);
9682+}
9683+
9684+/**
9685 * atomic64_xchg - xchg atomic64 variable
9686 * @v: pointer to type atomic64_t
9687 * @n: value to assign
c6e2a6c8 9688@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
fe2de317
MT
9689 }
9690
9691 /**
9692+ * atomic64_set_unchecked - set atomic64 variable
9693+ * @v: pointer to type atomic64_unchecked_t
9694+ * @n: value to assign
9695+ *
9696+ * Atomically sets the value of @v to @n.
9697+ */
9698+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
9699+{
9700+ unsigned high = (unsigned)(i >> 32);
9701+ unsigned low = (unsigned)i;
c6e2a6c8
MT
9702+ alternative_atomic64(set, /* no output */,
9703+ "S" (v), "b" (low), "c" (high)
9704+ : "eax", "edx", "memory");
fe2de317
MT
9705+}
9706+
9707+/**
9708 * atomic64_read - read atomic64 variable
9709 * @v: pointer to type atomic64_t
9710 *
c6e2a6c8 9711@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
fe2de317
MT
9712 }
9713
9714 /**
9715+ * atomic64_read_unchecked - read atomic64 variable
9716+ * @v: pointer to type atomic64_unchecked_t
9717+ *
9718+ * Atomically reads the value of @v and returns it.
9719+ */
9720+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
9721+{
9722+ long long r;
c6e2a6c8 9723+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
fe2de317
MT
9724+ return r;
9725+ }
9726+
9727+/**
9728 * atomic64_add_return - add and return
9729 * @i: integer value to add
9730 * @v: pointer to type atomic64_t
c6e2a6c8 9731@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
fe2de317
MT
9732 return i;
9733 }
9734
9735+/**
9736+ * atomic64_add_return_unchecked - add and return
9737+ * @i: integer value to add
9738+ * @v: pointer to type atomic64_unchecked_t
9739+ *
9740+ * Atomically adds @i to @v and returns @i + *@v
9741+ */
9742+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
9743+{
c6e2a6c8
MT
9744+ alternative_atomic64(add_return_unchecked,
9745+ ASM_OUTPUT2("+A" (i), "+c" (v)),
9746+ ASM_NO_INPUT_CLOBBER("memory"));
fe2de317
MT
9747+ return i;
9748+}
9749+
9750 /*
9751 * Other variants with different arithmetic operators:
bc901d79 9752 */
c6e2a6c8 9753@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
fe2de317
MT
9754 return a;
9755 }
9756
9757+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9758+{
9759+ long long a;
c6e2a6c8
MT
9760+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
9761+ "S" (v) : "memory", "ecx");
fe2de317
MT
9762+ return a;
9763+}
9764+
9765 static inline long long atomic64_dec_return(atomic64_t *v)
bc901d79 9766 {
fe2de317 9767 long long a;
c6e2a6c8 9768@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
fe2de317
MT
9769 }
9770
9771 /**
9772+ * atomic64_add_unchecked - add integer to atomic64 variable
9773+ * @i: integer value to add
9774+ * @v: pointer to type atomic64_unchecked_t
9775+ *
9776+ * Atomically adds @i to @v.
9777+ */
9778+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
9779+{
c6e2a6c8
MT
9780+ __alternative_atomic64(add_unchecked, add_return_unchecked,
9781+ ASM_OUTPUT2("+A" (i), "+c" (v)),
9782+ ASM_NO_INPUT_CLOBBER("memory"));
fe2de317 9783+ return i;
bc901d79
MT
9784+}
9785+
9786+/**
fe2de317
MT
9787 * atomic64_sub - subtract the atomic64 variable
9788 * @i: integer value to subtract
9789 * @v: pointer to type atomic64_t
9790diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
4c928ab7 9791index 0e1cbfc..5623683 100644
fe2de317
MT
9792--- a/arch/x86/include/asm/atomic64_64.h
9793+++ b/arch/x86/include/asm/atomic64_64.h
9794@@ -18,7 +18,19 @@
9795 */
9796 static inline long atomic64_read(const atomic64_t *v)
9797 {
9798- return (*(volatile long *)&(v)->counter);
9799+ return (*(volatile const long *)&(v)->counter);
9800+}
9801+
9802+/**
9803+ * atomic64_read_unchecked - read atomic64 variable
9804+ * @v: pointer of type atomic64_unchecked_t
57199397
MT
9805+ *
9806+ * Atomically reads the value of @v.
fe2de317 9807+ * Doesn't imply a read memory barrier.
57199397 9808+ */
fe2de317 9809+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
57199397 9810+{
fe2de317 9811+ return (*(volatile const long *)&(v)->counter);
bc901d79
MT
9812 }
9813
9814 /**
fe2de317 9815@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
57199397
MT
9816 }
9817
9818 /**
fe2de317
MT
9819+ * atomic64_set_unchecked - set atomic64 variable
9820+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
9821+ * @i: required value
9822+ *
9823+ * Atomically sets the value of @v to @i.
9824+ */
fe2de317 9825+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
57199397
MT
9826+{
9827+ v->counter = i;
9828+}
9829+
9830+/**
fe2de317 9831 * atomic64_add - add integer to atomic64 variable
57199397 9832 * @i: integer value to add
fe2de317
MT
9833 * @v: pointer to type atomic64_t
9834@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
57199397 9835 */
fe2de317 9836 static inline void atomic64_add(long i, atomic64_t *v)
57199397 9837 {
fe2de317 9838+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
57199397
MT
9839+
9840+#ifdef CONFIG_PAX_REFCOUNT
9841+ "jno 0f\n"
fe2de317 9842+ LOCK_PREFIX "subq %1,%0\n"
bc901d79 9843+ "int $4\n0:\n"
57199397
MT
9844+ _ASM_EXTABLE(0b, 0b)
9845+#endif
9846+
fe2de317
MT
9847+ : "=m" (v->counter)
9848+ : "er" (i), "m" (v->counter));
57199397
MT
9849+}
9850+
9851+/**
fe2de317 9852+ * atomic64_add_unchecked - add integer to atomic64 variable
57199397 9853+ * @i: integer value to add
fe2de317 9854+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
9855+ *
9856+ * Atomically adds @i to @v.
9857+ */
fe2de317 9858+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
57199397 9859+{
fe2de317
MT
9860 asm volatile(LOCK_PREFIX "addq %1,%0"
9861 : "=m" (v->counter)
9862 : "er" (i), "m" (v->counter));
9863@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
57199397 9864 */
fe2de317 9865 static inline void atomic64_sub(long i, atomic64_t *v)
57199397 9866 {
fe2de317
MT
9867- asm volatile(LOCK_PREFIX "subq %1,%0"
9868+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
57199397
MT
9869+
9870+#ifdef CONFIG_PAX_REFCOUNT
9871+ "jno 0f\n"
fe2de317 9872+ LOCK_PREFIX "addq %1,%0\n"
bc901d79 9873+ "int $4\n0:\n"
57199397
MT
9874+ _ASM_EXTABLE(0b, 0b)
9875+#endif
9876+
fe2de317
MT
9877+ : "=m" (v->counter)
9878+ : "er" (i), "m" (v->counter));
57199397
MT
9879+}
9880+
9881+/**
fe2de317 9882+ * atomic64_sub_unchecked - subtract the atomic64 variable
57199397 9883+ * @i: integer value to subtract
fe2de317 9884+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
9885+ *
9886+ * Atomically subtracts @i from @v.
9887+ */
fe2de317 9888+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
57199397 9889+{
fe2de317
MT
9890+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9891 : "=m" (v->counter)
9892 : "er" (i), "m" (v->counter));
57199397 9893 }
fe2de317 9894@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
57199397
MT
9895 {
9896 unsigned char c;
9897
fe2de317
MT
9898- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9899+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
57199397
MT
9900+
9901+#ifdef CONFIG_PAX_REFCOUNT
9902+ "jno 0f\n"
fe2de317 9903+ LOCK_PREFIX "addq %2,%0\n"
bc901d79 9904+ "int $4\n0:\n"
57199397
MT
9905+ _ASM_EXTABLE(0b, 0b)
9906+#endif
9907+
9908+ "sete %1\n"
fe2de317
MT
9909 : "=m" (v->counter), "=qm" (c)
9910 : "er" (i), "m" (v->counter) : "memory");
57199397 9911 return c;
fe2de317 9912@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
57199397 9913 */
fe2de317 9914 static inline void atomic64_inc(atomic64_t *v)
57199397 9915 {
fe2de317 9916+ asm volatile(LOCK_PREFIX "incq %0\n"
57199397
MT
9917+
9918+#ifdef CONFIG_PAX_REFCOUNT
9919+ "jno 0f\n"
fe2de317 9920+ LOCK_PREFIX "decq %0\n"
bc901d79 9921+ "int $4\n0:\n"
57199397
MT
9922+ _ASM_EXTABLE(0b, 0b)
9923+#endif
9924+
fe2de317
MT
9925+ : "=m" (v->counter)
9926+ : "m" (v->counter));
57199397
MT
9927+}
9928+
9929+/**
fe2de317
MT
9930+ * atomic64_inc_unchecked - increment atomic64 variable
9931+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
9932+ *
9933+ * Atomically increments @v by 1.
9934+ */
fe2de317 9935+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
57199397 9936+{
fe2de317
MT
9937 asm volatile(LOCK_PREFIX "incq %0"
9938 : "=m" (v->counter)
9939 : "m" (v->counter));
9940@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
57199397 9941 */
fe2de317 9942 static inline void atomic64_dec(atomic64_t *v)
57199397 9943 {
fe2de317
MT
9944- asm volatile(LOCK_PREFIX "decq %0"
9945+ asm volatile(LOCK_PREFIX "decq %0\n"
57199397
MT
9946+
9947+#ifdef CONFIG_PAX_REFCOUNT
9948+ "jno 0f\n"
fe2de317 9949+ LOCK_PREFIX "incq %0\n"
bc901d79 9950+ "int $4\n0:\n"
57199397
MT
9951+ _ASM_EXTABLE(0b, 0b)
9952+#endif
9953+
fe2de317
MT
9954+ : "=m" (v->counter)
9955+ : "m" (v->counter));
57199397
MT
9956+}
9957+
9958+/**
fe2de317
MT
9959+ * atomic64_dec_unchecked - decrement atomic64 variable
9960+ * @v: pointer to type atomic64_t
57199397
MT
9961+ *
9962+ * Atomically decrements @v by 1.
9963+ */
fe2de317 9964+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
57199397 9965+{
fe2de317
MT
9966+ asm volatile(LOCK_PREFIX "decq %0\n"
9967 : "=m" (v->counter)
9968 : "m" (v->counter));
57199397 9969 }
fe2de317 9970@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
57199397
MT
9971 {
9972 unsigned char c;
9973
fe2de317
MT
9974- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9975+ asm volatile(LOCK_PREFIX "decq %0\n"
57199397
MT
9976+
9977+#ifdef CONFIG_PAX_REFCOUNT
9978+ "jno 0f\n"
fe2de317 9979+ LOCK_PREFIX "incq %0\n"
bc901d79 9980+ "int $4\n0:\n"
57199397
MT
9981+ _ASM_EXTABLE(0b, 0b)
9982+#endif
9983+
9984+ "sete %1\n"
fe2de317
MT
9985 : "=m" (v->counter), "=qm" (c)
9986 : "m" (v->counter) : "memory");
57199397 9987 return c != 0;
fe2de317 9988@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
57199397
MT
9989 {
9990 unsigned char c;
9991
fe2de317
MT
9992- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9993+ asm volatile(LOCK_PREFIX "incq %0\n"
57199397
MT
9994+
9995+#ifdef CONFIG_PAX_REFCOUNT
9996+ "jno 0f\n"
fe2de317 9997+ LOCK_PREFIX "decq %0\n"
bc901d79 9998+ "int $4\n0:\n"
57199397
MT
9999+ _ASM_EXTABLE(0b, 0b)
10000+#endif
10001+
8308f9c9 10002+ "sete %1\n"
fe2de317
MT
10003 : "=m" (v->counter), "=qm" (c)
10004 : "m" (v->counter) : "memory");
57199397 10005 return c != 0;
fe2de317 10006@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
57199397
MT
10007 {
10008 unsigned char c;
10009
fe2de317
MT
10010- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
10011+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
57199397
MT
10012+
10013+#ifdef CONFIG_PAX_REFCOUNT
10014+ "jno 0f\n"
fe2de317 10015+ LOCK_PREFIX "subq %2,%0\n"
bc901d79 10016+ "int $4\n0:\n"
57199397
MT
10017+ _ASM_EXTABLE(0b, 0b)
10018+#endif
10019+
10020+ "sets %1\n"
fe2de317
MT
10021 : "=m" (v->counter), "=qm" (c)
10022 : "er" (i), "m" (v->counter) : "memory");
57199397 10023 return c;
4c928ab7
MT
10024@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
10025 */
fe2de317
MT
10026 static inline long atomic64_add_return(long i, atomic64_t *v)
10027 {
4c928ab7 10028+ return i + xadd_check_overflow(&v->counter, i);
57199397
MT
10029+}
10030+
10031+/**
fe2de317 10032+ * atomic64_add_return_unchecked - add and return
57199397 10033+ * @i: integer value to add
fe2de317 10034+ * @v: pointer to type atomic64_unchecked_t
57199397
MT
10035+ *
10036+ * Atomically adds @i to @v and returns @i + @v
10037+ */
fe2de317 10038+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
57199397 10039+{
4c928ab7
MT
10040 return i + xadd(&v->counter, i);
10041 }
10042
10043@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
57199397
MT
10044 }
10045
fe2de317
MT
10046 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
10047+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
57199397 10048+{
fe2de317 10049+ return atomic64_add_return_unchecked(1, v);
57199397 10050+}
fe2de317 10051 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
57199397 10052
fe2de317 10053 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
4c928ab7 10054@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
8308f9c9
MT
10055 return cmpxchg(&v->counter, old, new);
10056 }
10057
fe2de317 10058+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8308f9c9
MT
10059+{
10060+ return cmpxchg(&v->counter, old, new);
10061+}
10062+
fe2de317 10063 static inline long atomic64_xchg(atomic64_t *v, long new)
8308f9c9
MT
10064 {
10065 return xchg(&v->counter, new);
4c928ab7 10066@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
57199397 10067 */
fe2de317 10068 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
57199397 10069 {
fe2de317
MT
10070- long c, old;
10071+ long c, old, new;
10072 c = atomic64_read(v);
57199397
MT
10073 for (;;) {
10074- if (unlikely(c == (u)))
10075+ if (unlikely(c == u))
10076 break;
fe2de317 10077- old = atomic64_cmpxchg((v), c, c + (a));
57199397 10078+
fe2de317 10079+ asm volatile("add %2,%0\n"
57199397
MT
10080+
10081+#ifdef CONFIG_PAX_REFCOUNT
10082+ "jno 0f\n"
fe2de317 10083+ "sub %2,%0\n"
bc901d79 10084+ "int $4\n0:\n"
57199397
MT
10085+ _ASM_EXTABLE(0b, 0b)
10086+#endif
10087+
10088+ : "=r" (new)
10089+ : "0" (c), "ir" (a));
10090+
fe2de317 10091+ old = atomic64_cmpxchg(v, c, new);
57199397
MT
10092 if (likely(old == c))
10093 break;
10094 c = old;
fe2de317
MT
10095 }
10096- return c != (u);
10097+ return c != u;
57199397
MT
10098 }
10099
fe2de317
MT
10100 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10101diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
5e856224 10102index b97596e..9bd48b06 100644
fe2de317
MT
10103--- a/arch/x86/include/asm/bitops.h
10104+++ b/arch/x86/include/asm/bitops.h
bc901d79
MT
10105@@ -38,7 +38,7 @@
10106 * a mask operation on a byte.
10107 */
10108 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
10109-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
10110+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
10111 #define CONST_MASK(nr) (1 << ((nr) & 7))
10112
10113 /**
fe2de317
MT
10114diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
10115index 5e1a2ee..c9f9533 100644
10116--- a/arch/x86/include/asm/boot.h
10117+++ b/arch/x86/include/asm/boot.h
efbe55a5
MT
10118@@ -11,10 +11,15 @@
10119 #include <asm/pgtable_types.h>
df50ba0c 10120
efbe55a5
MT
10121 /* Physical address where kernel should be loaded. */
10122-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10123+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
10124 + (CONFIG_PHYSICAL_ALIGN - 1)) \
10125 & ~(CONFIG_PHYSICAL_ALIGN - 1))
df50ba0c 10126
efbe55a5
MT
10127+#ifndef __ASSEMBLY__
10128+extern unsigned char __LOAD_PHYSICAL_ADDR[];
10129+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
df50ba0c
MT
10130+#endif
10131+
efbe55a5
MT
10132 /* Minimum kernel alignment, as a power of two */
10133 #ifdef CONFIG_X86_64
10134 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
fe2de317
MT
10135diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
10136index 48f99f1..d78ebf9 100644
10137--- a/arch/x86/include/asm/cache.h
10138+++ b/arch/x86/include/asm/cache.h
8308f9c9
MT
10139@@ -5,12 +5,13 @@
10140
10141 /* L1 cache line size */
10142 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10143-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
15a11c5b 10144+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
efbe55a5 10145
57199397
MT
10146 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
10147+#define __read_only __attribute__((__section__(".data..read_only")))
efbe55a5
MT
10148
10149 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
8308f9c9 10150-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
15a11c5b 10151+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
8308f9c9
MT
10152
10153 #ifdef CONFIG_X86_VSMP
10154 #ifdef CONFIG_SMP
fe2de317 10155diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
c6e2a6c8 10156index 9863ee3..4a1f8e1 100644
fe2de317
MT
10157--- a/arch/x86/include/asm/cacheflush.h
10158+++ b/arch/x86/include/asm/cacheflush.h
c6e2a6c8 10159@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
fe2de317
MT
10160 unsigned long pg_flags = pg->flags & _PGMT_MASK;
10161
10162 if (pg_flags == _PGMT_DEFAULT)
10163- return -1;
10164+ return ~0UL;
10165 else if (pg_flags == _PGMT_WC)
10166 return _PAGE_CACHE_WC;
10167 else if (pg_flags == _PGMT_UC_MINUS)
10168diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
10169index 46fc474..b02b0f9 100644
10170--- a/arch/x86/include/asm/checksum_32.h
10171+++ b/arch/x86/include/asm/checksum_32.h
10172@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
efbe55a5
MT
10173 int len, __wsum sum,
10174 int *src_err_ptr, int *dst_err_ptr);
df50ba0c 10175
efbe55a5
MT
10176+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
10177+ int len, __wsum sum,
10178+ int *src_err_ptr, int *dst_err_ptr);
df50ba0c 10179+
efbe55a5
MT
10180+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
10181+ int len, __wsum sum,
10182+ int *src_err_ptr, int *dst_err_ptr);
df50ba0c 10183+
efbe55a5
MT
10184 /*
10185 * Note: when you get a NULL pointer exception here this means someone
10186 * passed in an incorrect kernel address to one of these functions.
fe2de317 10187@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
efbe55a5 10188 int *err_ptr)
df50ba0c 10189 {
efbe55a5
MT
10190 might_sleep();
10191- return csum_partial_copy_generic((__force void *)src, dst,
10192+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
10193 len, sum, err_ptr, NULL);
58c5fc13
MT
10194 }
10195
fe2de317 10196@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
58c5fc13
MT
10197 {
10198 might_sleep();
10199 if (access_ok(VERIFY_WRITE, dst, len))
10200- return csum_partial_copy_generic(src, (__force void *)dst,
10201+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
10202 len, sum, NULL, err_ptr);
10203
10204 if (len)
4c928ab7 10205diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
5e856224 10206index 99480e5..d81165b 100644
4c928ab7
MT
10207--- a/arch/x86/include/asm/cmpxchg.h
10208+++ b/arch/x86/include/asm/cmpxchg.h
5e856224 10209@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
4c928ab7
MT
10210 __compiletime_error("Bad argument size for cmpxchg");
10211 extern void __xadd_wrong_size(void)
10212 __compiletime_error("Bad argument size for xadd");
10213+extern void __xadd_check_overflow_wrong_size(void)
10214+ __compiletime_error("Bad argument size for xadd_check_overflow");
5e856224
MT
10215 extern void __add_wrong_size(void)
10216 __compiletime_error("Bad argument size for add");
10217+extern void __add_check_overflow_wrong_size(void)
10218+ __compiletime_error("Bad argument size for add_check_overflow");
4c928ab7
MT
10219
10220 /*
10221 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
5e856224 10222@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
4c928ab7
MT
10223 __ret; \
10224 })
10225
5e856224 10226+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
4c928ab7 10227+ ({ \
5e856224 10228+ __typeof__ (*(ptr)) __ret = (arg); \
4c928ab7
MT
10229+ switch (sizeof(*(ptr))) { \
10230+ case __X86_CASE_L: \
5e856224 10231+ asm volatile (lock #op "l %0, %1\n" \
4c928ab7
MT
10232+ "jno 0f\n" \
10233+ "mov %0,%1\n" \
10234+ "int $4\n0:\n" \
10235+ _ASM_EXTABLE(0b, 0b) \
10236+ : "+r" (__ret), "+m" (*(ptr)) \
10237+ : : "memory", "cc"); \
10238+ break; \
10239+ case __X86_CASE_Q: \
5e856224 10240+ asm volatile (lock #op "q %q0, %1\n" \
4c928ab7
MT
10241+ "jno 0f\n" \
10242+ "mov %0,%1\n" \
10243+ "int $4\n0:\n" \
10244+ _ASM_EXTABLE(0b, 0b) \
10245+ : "+r" (__ret), "+m" (*(ptr)) \
10246+ : : "memory", "cc"); \
10247+ break; \
10248+ default: \
5e856224 10249+ __ ## op ## _check_overflow_wrong_size(); \
4c928ab7
MT
10250+ } \
10251+ __ret; \
10252+ })
10253+
10254 /*
5e856224
MT
10255 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
10256 * Since this is generally used to protect other memory information, we
10257@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
4c928ab7
MT
10258 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
10259 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
10260
5e856224
MT
10261+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
10262+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
4c928ab7 10263+
5e856224
MT
10264 #define __add(ptr, inc, lock) \
10265 ({ \
10266 __typeof__ (*(ptr)) __ret = (inc); \
fe2de317 10267diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
572b4308 10268index f91e80f..7f9bd27 100644
fe2de317
MT
10269--- a/arch/x86/include/asm/cpufeature.h
10270+++ b/arch/x86/include/asm/cpufeature.h
c6e2a6c8 10271@@ -371,7 +371,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
6892158b
MT
10272 ".section .discard,\"aw\",@progbits\n"
10273 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
57199397
MT
10274 ".previous\n"
10275- ".section .altinstr_replacement,\"ax\"\n"
10276+ ".section .altinstr_replacement,\"a\"\n"
10277 "3: movb $1,%0\n"
10278 "4:\n"
10279 ".previous\n"
fe2de317 10280diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
5e856224 10281index e95822d..a90010e 100644
fe2de317
MT
10282--- a/arch/x86/include/asm/desc.h
10283+++ b/arch/x86/include/asm/desc.h
ae4e228f
MT
10284@@ -4,6 +4,7 @@
10285 #include <asm/desc_defs.h>
10286 #include <asm/ldt.h>
10287 #include <asm/mmu.h>
10288+#include <asm/pgtable.h>
15a11c5b 10289
ae4e228f
MT
10290 #include <linux/smp.h>
10291
fe2de317 10292@@ -16,6 +17,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
15a11c5b
MT
10293
10294 desc->type = (info->read_exec_only ^ 1) << 1;
10295 desc->type |= info->contents << 2;
10296+ desc->type |= info->seg_not_present ^ 1;
10297
10298 desc->s = 1;
10299 desc->dpl = 0x3;
5e856224 10300@@ -34,19 +36,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
58c5fc13
MT
10301 }
10302
10303 extern struct desc_ptr idt_descr;
10304-extern gate_desc idt_table[];
5e856224
MT
10305 extern struct desc_ptr nmi_idt_descr;
10306-extern gate_desc nmi_idt_table[];
58c5fc13
MT
10307-
10308-struct gdt_page {
10309- struct desc_struct gdt[GDT_ENTRIES];
10310-} __attribute__((aligned(PAGE_SIZE)));
15a11c5b 10311-
58c5fc13
MT
10312-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
10313+extern gate_desc idt_table[256];
5e856224 10314+extern gate_desc nmi_idt_table[256];
58c5fc13
MT
10315
10316+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
10317 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
10318 {
10319- return per_cpu(gdt_page, cpu).gdt;
10320+ return cpu_gdt_table[cpu];
10321 }
10322
10323 #ifdef CONFIG_X86_64
5e856224 10324@@ -71,8 +68,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
317566c1
MT
10325 unsigned long base, unsigned dpl, unsigned flags,
10326 unsigned short seg)
10327 {
10328- gate->a = (seg << 16) | (base & 0xffff);
15a11c5b
MT
10329- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
10330+ gate->gate.offset_low = base;
10331+ gate->gate.seg = seg;
10332+ gate->gate.reserved = 0;
10333+ gate->gate.type = type;
10334+ gate->gate.s = 0;
10335+ gate->gate.dpl = dpl;
10336+ gate->gate.p = 1;
10337+ gate->gate.offset_high = base >> 16;
317566c1
MT
10338 }
10339
10340 #endif
5e856224 10341@@ -117,12 +120,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
15a11c5b
MT
10342
10343 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
58c5fc13 10344 {
ae4e228f 10345+ pax_open_kernel();
58c5fc13 10346 memcpy(&idt[entry], gate, sizeof(*gate));
ae4e228f 10347+ pax_close_kernel();
58c5fc13
MT
10348 }
10349
15a11c5b 10350 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
58c5fc13 10351 {
ae4e228f 10352+ pax_open_kernel();
58c5fc13 10353 memcpy(&ldt[entry], desc, 8);
ae4e228f 10354+ pax_close_kernel();
58c5fc13
MT
10355 }
10356
15a11c5b 10357 static inline void
5e856224 10358@@ -136,7 +143,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
15a11c5b 10359 default: size = sizeof(*gdt); break;
58c5fc13 10360 }
15a11c5b 10361
ae4e228f 10362+ pax_open_kernel();
58c5fc13 10363 memcpy(&gdt[entry], desc, size);
ae4e228f 10364+ pax_close_kernel();
58c5fc13
MT
10365 }
10366
10367 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
5e856224 10368@@ -209,7 +218,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
58c5fc13
MT
10369
10370 static inline void native_load_tr_desc(void)
10371 {
ae4e228f 10372+ pax_open_kernel();
58c5fc13 10373 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
ae4e228f 10374+ pax_close_kernel();
58c5fc13
MT
10375 }
10376
10377 static inline void native_load_gdt(const struct desc_ptr *dtr)
5e856224 10378@@ -246,8 +257,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
58c5fc13 10379 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
15a11c5b 10380 unsigned int i;
58c5fc13 10381
ae4e228f 10382+ pax_open_kernel();
58c5fc13
MT
10383 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
10384 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
ae4e228f 10385+ pax_close_kernel();
58c5fc13
MT
10386 }
10387
10388 #define _LDT_empty(info) \
5e856224 10389@@ -310,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
df50ba0c
MT
10390 }
10391
5e856224
MT
10392 #ifdef CONFIG_X86_64
10393-static inline void set_nmi_gate(int gate, void *addr)
10394+static inline void set_nmi_gate(int gate, const void *addr)
10395 {
10396 gate_desc s;
10397
10398@@ -319,7 +332,7 @@ static inline void set_nmi_gate(int gate, void *addr)
10399 }
10400 #endif
10401
df50ba0c
MT
10402-static inline void _set_gate(int gate, unsigned type, void *addr,
10403+static inline void _set_gate(int gate, unsigned type, const void *addr,
10404 unsigned dpl, unsigned ist, unsigned seg)
10405 {
10406 gate_desc s;
5e856224 10407@@ -338,7 +351,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
df50ba0c
MT
10408 * Pentium F0 0F bugfix can have resulted in the mapped
10409 * IDT being write-protected.
10410 */
10411-static inline void set_intr_gate(unsigned int n, void *addr)
10412+static inline void set_intr_gate(unsigned int n, const void *addr)
10413 {
10414 BUG_ON((unsigned)n > 0xFF);
10415 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
5e856224 10416@@ -368,19 +381,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
df50ba0c
MT
10417 /*
10418 * This routine sets up an interrupt gate at directory privilege level 3.
10419 */
10420-static inline void set_system_intr_gate(unsigned int n, void *addr)
10421+static inline void set_system_intr_gate(unsigned int n, const void *addr)
10422 {
10423 BUG_ON((unsigned)n > 0xFF);
10424 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
10425 }
10426
10427-static inline void set_system_trap_gate(unsigned int n, void *addr)
10428+static inline void set_system_trap_gate(unsigned int n, const void *addr)
10429 {
10430 BUG_ON((unsigned)n > 0xFF);
10431 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
10432 }
10433
10434-static inline void set_trap_gate(unsigned int n, void *addr)
10435+static inline void set_trap_gate(unsigned int n, const void *addr)
10436 {
10437 BUG_ON((unsigned)n > 0xFF);
10438 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
5e856224 10439@@ -389,19 +402,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
df50ba0c
MT
10440 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
10441 {
10442 BUG_ON((unsigned)n > 0xFF);
10443- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
10444+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
10445 }
10446
10447-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
10448+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
10449 {
10450 BUG_ON((unsigned)n > 0xFF);
10451 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
10452 }
10453
10454-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
10455+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
10456 {
10457 BUG_ON((unsigned)n > 0xFF);
58c5fc13
MT
10458 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
10459 }
10460
10461+#ifdef CONFIG_X86_32
10462+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
10463+{
10464+ struct desc_struct d;
10465+
10466+ if (likely(limit))
10467+ limit = (limit - 1UL) >> PAGE_SHIFT;
10468+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
10469+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
10470+}
10471+#endif
10472+
10473 #endif /* _ASM_X86_DESC_H */
fe2de317
MT
10474diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
10475index 278441f..b95a174 100644
10476--- a/arch/x86/include/asm/desc_defs.h
10477+++ b/arch/x86/include/asm/desc_defs.h
10478@@ -31,6 +31,12 @@ struct desc_struct {
10479 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
10480 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
10481 };
10482+ struct {
10483+ u16 offset_low;
10484+ u16 seg;
10485+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
10486+ unsigned offset_high: 16;
10487+ } gate;
10488 };
10489 } __attribute__((packed));
10490
10491diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
5e856224 10492index 3778256..c5d4fce 100644
fe2de317
MT
10493--- a/arch/x86/include/asm/e820.h
10494+++ b/arch/x86/include/asm/e820.h
57199397 10495@@ -69,7 +69,7 @@ struct e820map {
ae4e228f 10496 #define ISA_START_ADDRESS 0xa0000
58c5fc13 10497 #define ISA_END_ADDRESS 0x100000
58c5fc13
MT
10498
10499-#define BIOS_BEGIN 0x000a0000
10500+#define BIOS_BEGIN 0x000c0000
10501 #define BIOS_END 0x00100000
10502
bc901d79 10503 #define BIOS_ROM_BASE 0xffe00000
fe2de317 10504diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
c6e2a6c8 10505index 5939f44..f8845f6 100644
fe2de317
MT
10506--- a/arch/x86/include/asm/elf.h
10507+++ b/arch/x86/include/asm/elf.h
c6e2a6c8 10508@@ -243,7 +243,25 @@ extern int force_personality32;
58c5fc13
MT
10509 the loader. We need to make sure that it is out of the way of the program
10510 that it will "exec", and that there is sufficient room for the brk. */
10511
10512+#ifdef CONFIG_PAX_SEGMEXEC
10513+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
10514+#else
10515 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
10516+#endif
10517+
10518+#ifdef CONFIG_PAX_ASLR
10519+#ifdef CONFIG_X86_32
10520+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
10521+
10522+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10523+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
10524+#else
10525+#define PAX_ELF_ET_DYN_BASE 0x400000UL
10526+
c6e2a6c8
MT
10527+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
10528+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
58c5fc13
MT
10529+#endif
10530+#endif
10531
10532 /* This yields a mask that user programs can use to figure out what
10533 instruction set this CPU supports. This could be done in user space,
c6e2a6c8 10534@@ -296,16 +314,12 @@ do { \
15a11c5b 10535
58c5fc13
MT
10536 #define ARCH_DLINFO \
10537 do { \
15a11c5b 10538- if (vdso_enabled) \
58c5fc13
MT
10539- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10540- (unsigned long)current->mm->context.vdso); \
15a11c5b 10541+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
58c5fc13
MT
10542 } while (0)
10543
c6e2a6c8
MT
10544 #define ARCH_DLINFO_X32 \
10545 do { \
10546- if (vdso_enabled) \
10547- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10548- (unsigned long)current->mm->context.vdso); \
10549+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10550 } while (0)
10551
58c5fc13 10552 #define AT_SYSINFO 32
c6e2a6c8 10553@@ -320,7 +334,7 @@ else \
58c5fc13
MT
10554
10555 #endif /* !CONFIG_X86_32 */
10556
10557-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10558+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10559
10560 #define VDSO_ENTRY \
10561 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
c6e2a6c8 10562@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
58c5fc13
MT
10563 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10564 #define compat_arch_setup_additional_pages syscall32_setup_pages
10565
10566-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10567-#define arch_randomize_brk arch_randomize_brk
10568-
4c928ab7
MT
10569 /*
10570 * True on X86_32 or when emulating IA32 on X86_64
10571 */
fe2de317
MT
10572diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10573index cc70c1c..d96d011 100644
10574--- a/arch/x86/include/asm/emergency-restart.h
10575+++ b/arch/x86/include/asm/emergency-restart.h
66a7e928
MT
10576@@ -15,6 +15,6 @@ enum reboot_type {
10577
10578 extern enum reboot_type reboot_type;
10579
10580-extern void machine_emergency_restart(void);
10581+extern void machine_emergency_restart(void) __noreturn;
10582
10583 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
c6e2a6c8
MT
10584diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
10585index 4fa8815..71b121a 100644
10586--- a/arch/x86/include/asm/fpu-internal.h
10587+++ b/arch/x86/include/asm/fpu-internal.h
10588@@ -86,6 +86,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10589 {
10590 int err;
4c928ab7 10591
c6e2a6c8
MT
10592+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10593+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10594+ fx = (struct i387_fxsave_struct __user *)((void *)fx + PAX_USER_SHADOW_BASE);
10595+#endif
10596+
10597 /* See comment in fxsave() below. */
10598 #ifdef CONFIG_AS_FXSAVEQ
10599 asm volatile("1: fxrstorq %[fx]\n\t"
10600@@ -115,6 +120,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
4c928ab7 10601 {
c6e2a6c8
MT
10602 int err;
10603
10604+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10605+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10606+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10607+#endif
10608+
10609 /*
10610 * Clear the bytes not touched by the fxsave and reserved
10611 * for the SW usage.
10612@@ -271,7 +281,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
10613 "emms\n\t" /* clear stack tags */
10614 "fildl %P[addr]", /* set F?P to defined value */
10615 X86_FEATURE_FXSAVE_LEAK,
10616- [addr] "m" (tsk->thread.fpu.has_fpu));
10617+ [addr] "m" (init_tss[smp_processor_id()].x86_tss.sp0));
10618
10619 return fpu_restore_checking(&tsk->thread.fpu);
10620 }
fe2de317 10621diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
c6e2a6c8 10622index 71ecbcb..bac10b7 100644
fe2de317
MT
10623--- a/arch/x86/include/asm/futex.h
10624+++ b/arch/x86/include/asm/futex.h
c6e2a6c8
MT
10625@@ -11,16 +11,18 @@
10626 #include <asm/processor.h>
58c5fc13 10627
df50ba0c 10628 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
6e9df6a3 10629+ typecheck(u32 __user *, uaddr); \
58c5fc13
MT
10630 asm volatile("1:\t" insn "\n" \
10631 "2:\t.section .fixup,\"ax\"\n" \
df50ba0c
MT
10632 "3:\tmov\t%3, %1\n" \
10633 "\tjmp\t2b\n" \
10634 "\t.previous\n" \
10635 _ASM_EXTABLE(1b, 3b) \
10636- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
6e9df6a3 10637+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
df50ba0c
MT
10638 : "i" (-EFAULT), "0" (oparg), "1" (0))
10639
10640 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
6e9df6a3 10641+ typecheck(u32 __user *, uaddr); \
df50ba0c
MT
10642 asm volatile("1:\tmovl %2, %0\n" \
10643 "\tmovl\t%0, %3\n" \
10644 "\t" insn "\n" \
c6e2a6c8 10645@@ -33,7 +35,7 @@
df50ba0c
MT
10646 _ASM_EXTABLE(1b, 4b) \
10647 _ASM_EXTABLE(2b, 4b) \
58c5fc13 10648 : "=&a" (oldval), "=&r" (ret), \
df50ba0c 10649- "+m" (*uaddr), "=&r" (tem) \
6e9df6a3 10650+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
58c5fc13 10651 : "r" (oparg), "i" (-EFAULT), "1" (0))
58c5fc13 10652
66a7e928 10653 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
c6e2a6c8 10654@@ -60,10 +62,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
58c5fc13
MT
10655
10656 switch (op) {
10657 case FUTEX_OP_SET:
bc901d79 10658- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16454cff 10659+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
58c5fc13
MT
10660 break;
10661 case FUTEX_OP_ADD:
bc901d79 10662- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16454cff 10663+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
58c5fc13 10664 uaddr, oparg);
58c5fc13
MT
10665 break;
10666 case FUTEX_OP_OR:
c6e2a6c8 10667@@ -122,13 +124,13 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
66a7e928 10668 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
58c5fc13
MT
10669 return -EFAULT;
10670
66a7e928
MT
10671- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
10672+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
bc901d79 10673 "2:\t.section .fixup, \"ax\"\n"
66a7e928 10674 "3:\tmov %3, %0\n"
58c5fc13
MT
10675 "\tjmp 2b\n"
10676 "\t.previous\n"
10677 _ASM_EXTABLE(1b, 3b)
66a7e928 10678- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
6e9df6a3 10679+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
66a7e928 10680 : "i" (-EFAULT), "r" (newval), "1" (oldval)
58c5fc13
MT
10681 : "memory"
10682 );
fe2de317 10683diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
4c928ab7 10684index eb92a6e..b98b2f4 100644
fe2de317
MT
10685--- a/arch/x86/include/asm/hw_irq.h
10686+++ b/arch/x86/include/asm/hw_irq.h
6e9df6a3 10687@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
8308f9c9
MT
10688 extern void enable_IO_APIC(void);
10689
10690 /* Statistics */
10691-extern atomic_t irq_err_count;
10692-extern atomic_t irq_mis_count;
10693+extern atomic_unchecked_t irq_err_count;
10694+extern atomic_unchecked_t irq_mis_count;
10695
10696 /* EISA */
10697 extern void eisa_set_level_irq(unsigned int irq);
fe2de317
MT
10698diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
10699index d8e8eef..99f81ae 100644
10700--- a/arch/x86/include/asm/io.h
10701+++ b/arch/x86/include/asm/io.h
6e9df6a3 10702@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
58c5fc13
MT
10703
10704 #include <linux/vmalloc.h>
10705
10706+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
ae4e228f 10707+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
58c5fc13 10708+{
c52201e0 10709+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
58c5fc13
MT
10710+}
10711+
ae4e228f 10712+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
58c5fc13 10713+{
c52201e0 10714+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
58c5fc13
MT
10715+}
10716+
df50ba0c
MT
10717 /*
10718 * Convert a virtual cached pointer to an uncached pointer
10719 */
fe2de317
MT
10720diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10721index bba3cf8..06bc8da 100644
10722--- a/arch/x86/include/asm/irqflags.h
10723+++ b/arch/x86/include/asm/irqflags.h
10724@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
ae4e228f
MT
10725 sti; \
10726 sysexit
10727
df50ba0c
MT
10728+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10729+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10730+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10731+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
ae4e228f
MT
10732+
10733 #else
58c5fc13
MT
10734 #define INTERRUPT_RETURN iret
10735 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
fe2de317
MT
10736diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10737index 5478825..839e88c 100644
10738--- a/arch/x86/include/asm/kprobes.h
10739+++ b/arch/x86/include/asm/kprobes.h
71d190be
MT
10740@@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
10741 #define RELATIVEJUMP_SIZE 5
10742 #define RELATIVECALL_OPCODE 0xe8
10743 #define RELATIVE_ADDR_SIZE 4
10744-#define MAX_STACK_SIZE 64
10745-#define MIN_STACK_SIZE(ADDR) \
10746- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10747- THREAD_SIZE - (unsigned long)(ADDR))) \
10748- ? (MAX_STACK_SIZE) \
10749- : (((unsigned long)current_thread_info()) + \
10750- THREAD_SIZE - (unsigned long)(ADDR)))
10751+#define MAX_STACK_SIZE 64UL
10752+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10753
10754 #define flush_insn_slot(p) do { } while (0)
10755
fe2de317 10756diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
c6e2a6c8 10757index e216ba0..453f6ec 100644
fe2de317
MT
10758--- a/arch/x86/include/asm/kvm_host.h
10759+++ b/arch/x86/include/asm/kvm_host.h
c6e2a6c8 10760@@ -679,7 +679,7 @@ struct kvm_x86_ops {
4c928ab7
MT
10761 int (*check_intercept)(struct kvm_vcpu *vcpu,
10762 struct x86_instruction_info *info,
15a11c5b 10763 enum x86_intercept_stage stage);
15a11c5b
MT
10764-};
10765+} __do_const;
10766
10767 struct kvm_arch_async_pf {
10768 u32 token;
fe2de317 10769diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
c6e2a6c8 10770index c8bed0d..e5721fa 100644
fe2de317
MT
10771--- a/arch/x86/include/asm/local.h
10772+++ b/arch/x86/include/asm/local.h
c6e2a6c8 10773@@ -17,26 +17,58 @@ typedef struct {
58c5fc13
MT
10774
10775 static inline void local_inc(local_t *l)
10776 {
10777- asm volatile(_ASM_INC "%0"
10778+ asm volatile(_ASM_INC "%0\n"
10779+
10780+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10781+ "jno 0f\n"
58c5fc13 10782+ _ASM_DEC "%0\n"
bc901d79
MT
10783+ "int $4\n0:\n"
10784+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10785+#endif
10786+
10787 : "+m" (l->a.counter));
10788 }
10789
10790 static inline void local_dec(local_t *l)
10791 {
10792- asm volatile(_ASM_DEC "%0"
10793+ asm volatile(_ASM_DEC "%0\n"
10794+
10795+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10796+ "jno 0f\n"
58c5fc13 10797+ _ASM_INC "%0\n"
bc901d79
MT
10798+ "int $4\n0:\n"
10799+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10800+#endif
10801+
10802 : "+m" (l->a.counter));
10803 }
10804
10805 static inline void local_add(long i, local_t *l)
10806 {
10807- asm volatile(_ASM_ADD "%1,%0"
10808+ asm volatile(_ASM_ADD "%1,%0\n"
10809+
10810+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10811+ "jno 0f\n"
58c5fc13 10812+ _ASM_SUB "%1,%0\n"
bc901d79
MT
10813+ "int $4\n0:\n"
10814+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10815+#endif
10816+
10817 : "+m" (l->a.counter)
10818 : "ir" (i));
10819 }
10820
10821 static inline void local_sub(long i, local_t *l)
10822 {
10823- asm volatile(_ASM_SUB "%1,%0"
10824+ asm volatile(_ASM_SUB "%1,%0\n"
10825+
10826+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10827+ "jno 0f\n"
58c5fc13 10828+ _ASM_ADD "%1,%0\n"
bc901d79
MT
10829+ "int $4\n0:\n"
10830+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10831+#endif
10832+
10833 : "+m" (l->a.counter)
10834 : "ir" (i));
10835 }
c6e2a6c8 10836@@ -54,7 +86,16 @@ static inline int local_sub_and_test(long i, local_t *l)
58c5fc13
MT
10837 {
10838 unsigned char c;
10839
10840- asm volatile(_ASM_SUB "%2,%0; sete %1"
10841+ asm volatile(_ASM_SUB "%2,%0\n"
10842+
10843+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10844+ "jno 0f\n"
58c5fc13 10845+ _ASM_ADD "%2,%0\n"
bc901d79
MT
10846+ "int $4\n0:\n"
10847+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10848+#endif
10849+
10850+ "sete %1\n"
10851 : "+m" (l->a.counter), "=qm" (c)
10852 : "ir" (i) : "memory");
10853 return c;
c6e2a6c8 10854@@ -72,7 +113,16 @@ static inline int local_dec_and_test(local_t *l)
58c5fc13
MT
10855 {
10856 unsigned char c;
10857
10858- asm volatile(_ASM_DEC "%0; sete %1"
10859+ asm volatile(_ASM_DEC "%0\n"
10860+
10861+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10862+ "jno 0f\n"
58c5fc13 10863+ _ASM_INC "%0\n"
bc901d79
MT
10864+ "int $4\n0:\n"
10865+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10866+#endif
10867+
10868+ "sete %1\n"
10869 : "+m" (l->a.counter), "=qm" (c)
10870 : : "memory");
10871 return c != 0;
c6e2a6c8 10872@@ -90,7 +140,16 @@ static inline int local_inc_and_test(local_t *l)
58c5fc13
MT
10873 {
10874 unsigned char c;
10875
10876- asm volatile(_ASM_INC "%0; sete %1"
10877+ asm volatile(_ASM_INC "%0\n"
10878+
10879+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10880+ "jno 0f\n"
58c5fc13 10881+ _ASM_DEC "%0\n"
bc901d79
MT
10882+ "int $4\n0:\n"
10883+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10884+#endif
10885+
10886+ "sete %1\n"
10887 : "+m" (l->a.counter), "=qm" (c)
10888 : : "memory");
10889 return c != 0;
c6e2a6c8 10890@@ -109,7 +168,16 @@ static inline int local_add_negative(long i, local_t *l)
58c5fc13
MT
10891 {
10892 unsigned char c;
10893
10894- asm volatile(_ASM_ADD "%2,%0; sets %1"
10895+ asm volatile(_ASM_ADD "%2,%0\n"
10896+
10897+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10898+ "jno 0f\n"
58c5fc13 10899+ _ASM_SUB "%2,%0\n"
bc901d79
MT
10900+ "int $4\n0:\n"
10901+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10902+#endif
10903+
10904+ "sets %1\n"
10905 : "+m" (l->a.counter), "=qm" (c)
10906 : "ir" (i) : "memory");
10907 return c;
c6e2a6c8 10908@@ -132,7 +200,15 @@ static inline long local_add_return(long i, local_t *l)
58c5fc13
MT
10909 #endif
10910 /* Modern 486+ processor */
10911 __i = i;
10912- asm volatile(_ASM_XADD "%0, %1;"
10913+ asm volatile(_ASM_XADD "%0, %1\n"
10914+
10915+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 10916+ "jno 0f\n"
58c5fc13 10917+ _ASM_MOV "%0,%1\n"
bc901d79
MT
10918+ "int $4\n0:\n"
10919+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
10920+#endif
10921+
10922 : "+r" (i), "+m" (l->a.counter)
10923 : : "memory");
10924 return i + __i;
fe2de317
MT
10925diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10926index 593e51d..fa69c9a 100644
10927--- a/arch/x86/include/asm/mman.h
10928+++ b/arch/x86/include/asm/mman.h
ae4e228f
MT
10929@@ -5,4 +5,14 @@
10930
10931 #include <asm-generic/mman.h>
58c5fc13
MT
10932
10933+#ifdef __KERNEL__
10934+#ifndef __ASSEMBLY__
10935+#ifdef CONFIG_X86_32
10936+#define arch_mmap_check i386_mmap_check
10937+int i386_mmap_check(unsigned long addr, unsigned long len,
10938+ unsigned long flags);
10939+#endif
10940+#endif
10941+#endif
10942+
10943 #endif /* _ASM_X86_MMAN_H */
fe2de317
MT
10944diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10945index 5f55e69..e20bfb1 100644
10946--- a/arch/x86/include/asm/mmu.h
10947+++ b/arch/x86/include/asm/mmu.h
10948@@ -9,7 +9,7 @@
10949 * we put the segment information here.
10950 */
10951 typedef struct {
10952- void *ldt;
10953+ struct desc_struct *ldt;
10954 int size;
10955
10956 #ifdef CONFIG_X86_64
10957@@ -18,7 +18,19 @@ typedef struct {
10958 #endif
10959
10960 struct mutex lock;
10961- void *vdso;
10962+ unsigned long vdso;
10963+
10964+#ifdef CONFIG_X86_32
10965+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10966+ unsigned long user_cs_base;
10967+ unsigned long user_cs_limit;
10968+
10969+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10970+ cpumask_t cpu_user_cs_mask;
10971+#endif
10972+
10973+#endif
10974+#endif
10975 } mm_context_t;
10976
10977 #ifdef CONFIG_SMP
10978diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
c6e2a6c8 10979index 6902152..da4283a 100644
fe2de317
MT
10980--- a/arch/x86/include/asm/mmu_context.h
10981+++ b/arch/x86/include/asm/mmu_context.h
10982@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
df50ba0c
MT
10983
10984 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10985 {
10986+
10987+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10988+ unsigned int i;
10989+ pgd_t *pgd;
10990+
10991+ pax_open_kernel();
10992+ pgd = get_cpu_pgd(smp_processor_id());
10993+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
15a11c5b 10994+ set_pgd_batched(pgd+i, native_make_pgd(0));
df50ba0c
MT
10995+ pax_close_kernel();
10996+#endif
10997+
10998 #ifdef CONFIG_SMP
10999 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
11000 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
fe2de317 11001@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
58c5fc13
MT
11002 struct task_struct *tsk)
11003 {
11004 unsigned cpu = smp_processor_id();
15a11c5b 11005+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
58c5fc13
MT
11006+ int tlbstate = TLBSTATE_OK;
11007+#endif
11008
11009 if (likely(prev != next)) {
58c5fc13 11010 #ifdef CONFIG_SMP
15a11c5b 11011+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
58c5fc13
MT
11012+ tlbstate = percpu_read(cpu_tlbstate.state);
11013+#endif
11014 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11015 percpu_write(cpu_tlbstate.active_mm, next);
11016 #endif
df50ba0c
MT
11017 cpumask_set_cpu(cpu, mm_cpumask(next));
11018
11019 /* Re-load page tables */
11020+#ifdef CONFIG_PAX_PER_CPU_PGD
11021+ pax_open_kernel();
c6e2a6c8
MT
11022+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11023+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
df50ba0c
MT
11024+ pax_close_kernel();
11025+ load_cr3(get_cpu_pgd(cpu));
11026+#else
11027 load_cr3(next->pgd);
11028+#endif
ea610fa8 11029
c52201e0
MT
11030 /* stop flush ipis for the previous mm */
11031 cpumask_clear_cpu(cpu, mm_cpumask(prev));
fe2de317 11032@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
58c5fc13
MT
11033 */
11034 if (unlikely(prev->context.ldt != next->context.ldt))
11035 load_LDT_nolock(&next->context);
df50ba0c 11036- }
58c5fc13
MT
11037+
11038+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
ae4e228f 11039+ if (!(__supported_pte_mask & _PAGE_NX)) {
58c5fc13
MT
11040+ smp_mb__before_clear_bit();
11041+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
11042+ smp_mb__after_clear_bit();
11043+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11044+ }
11045+#endif
11046+
11047+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11048+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
ae4e228f
MT
11049+ prev->context.user_cs_limit != next->context.user_cs_limit))
11050+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
df50ba0c 11051 #ifdef CONFIG_SMP
ae4e228f 11052+ else if (unlikely(tlbstate != TLBSTATE_OK))
58c5fc13
MT
11053+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11054+#endif
ae4e228f 11055+#endif
58c5fc13 11056+
df50ba0c 11057+ }
58c5fc13 11058 else {
df50ba0c
MT
11059+
11060+#ifdef CONFIG_PAX_PER_CPU_PGD
11061+ pax_open_kernel();
c6e2a6c8
MT
11062+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
11063+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
df50ba0c
MT
11064+ pax_close_kernel();
11065+ load_cr3(get_cpu_pgd(cpu));
11066+#endif
11067+
11068+#ifdef CONFIG_SMP
11069 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
11070 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
11071
fe2de317 11072@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
df50ba0c
MT
11073 * tlb flush IPI delivery. We must reload CR3
11074 * to make sure to use no freed page tables.
58c5fc13 11075 */
df50ba0c
MT
11076+
11077+#ifndef CONFIG_PAX_PER_CPU_PGD
58c5fc13 11078 load_cr3(next->pgd);
df50ba0c
MT
11079+#endif
11080+
58c5fc13
MT
11081 load_LDT_nolock(&next->context);
11082+
11083+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
ae4e228f 11084+ if (!(__supported_pte_mask & _PAGE_NX))
58c5fc13
MT
11085+ cpu_set(cpu, next->context.cpu_user_cs_mask);
11086+#endif
11087+
11088+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
11089+#ifdef CONFIG_PAX_PAGEEXEC
ae4e228f 11090+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
58c5fc13
MT
11091+#endif
11092+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
11093+#endif
11094+
11095 }
fe2de317
MT
11096+#endif
11097 }
11098-#endif
df50ba0c
MT
11099 }
11100
11101 #define activate_mm(prev, next) \
fe2de317
MT
11102diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
11103index 9eae775..c914fea 100644
11104--- a/arch/x86/include/asm/module.h
11105+++ b/arch/x86/include/asm/module.h
71d190be
MT
11106@@ -5,6 +5,7 @@
11107
11108 #ifdef CONFIG_X86_64
11109 /* X86_64 does not define MODULE_PROC_FAMILY */
11110+#define MODULE_PROC_FAMILY ""
11111 #elif defined CONFIG_M386
11112 #define MODULE_PROC_FAMILY "386 "
11113 #elif defined CONFIG_M486
fe2de317 11114@@ -59,8 +60,20 @@
df50ba0c
MT
11115 #error unknown processor family
11116 #endif
11117
71d190be
MT
11118-#ifdef CONFIG_X86_32
11119-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
fe2de317
MT
11120+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11121+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
11122+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
11123+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
df50ba0c 11124+#else
71d190be 11125+#define MODULE_PAX_KERNEXEC ""
58c5fc13
MT
11126 #endif
11127
6e9df6a3
MT
11128+#ifdef CONFIG_PAX_MEMORY_UDEREF
11129+#define MODULE_PAX_UDEREF "UDEREF "
71d190be 11130+#else
6e9df6a3 11131+#define MODULE_PAX_UDEREF ""
71d190be
MT
11132+#endif
11133+
6e9df6a3 11134+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
71d190be 11135+
58c5fc13 11136 #endif /* _ASM_X86_MODULE_H */
fe2de317
MT
11137diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
11138index 7639dbf..e08a58c 100644
11139--- a/arch/x86/include/asm/page_64_types.h
11140+++ b/arch/x86/include/asm/page_64_types.h
bc901d79
MT
11141@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
11142
11143 /* duplicated to the one in bootmem.h */
11144 extern unsigned long max_pfn;
11145-extern unsigned long phys_base;
11146+extern const unsigned long phys_base;
11147
11148 extern unsigned long __phys_addr(unsigned long);
11149 #define __phys_reloc_hide(x) (x)
fe2de317 11150diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
c6e2a6c8 11151index aa0f913..0c5bc6a 100644
fe2de317
MT
11152--- a/arch/x86/include/asm/paravirt.h
11153+++ b/arch/x86/include/asm/paravirt.h
c6e2a6c8 11154@@ -668,6 +668,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
15a11c5b
MT
11155 val);
11156 }
11157
11158+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11159+{
11160+ pgdval_t val = native_pgd_val(pgd);
11161+
11162+ if (sizeof(pgdval_t) > sizeof(long))
11163+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
11164+ val, (u64)val >> 32);
11165+ else
11166+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
11167+ val);
11168+}
11169+
11170 static inline void pgd_clear(pgd_t *pgdp)
11171 {
11172 set_pgd(pgdp, __pgd(0));
c6e2a6c8 11173@@ -749,6 +761,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
ae4e228f
MT
11174 pv_mmu_ops.set_fixmap(idx, phys, flags);
11175 }
11176
11177+#ifdef CONFIG_PAX_KERNEXEC
11178+static inline unsigned long pax_open_kernel(void)
11179+{
efbe55a5 11180+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
ae4e228f
MT
11181+}
11182+
11183+static inline unsigned long pax_close_kernel(void)
11184+{
efbe55a5 11185+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
ae4e228f
MT
11186+}
11187+#else
11188+static inline unsigned long pax_open_kernel(void) { return 0; }
11189+static inline unsigned long pax_close_kernel(void) { return 0; }
11190+#endif
11191+
11192 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
11193
11194 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
c6e2a6c8 11195@@ -965,7 +992,7 @@ extern void default_banner(void);
58c5fc13
MT
11196
11197 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
11198 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
11199-#define PARA_INDIRECT(addr) *%cs:addr
11200+#define PARA_INDIRECT(addr) *%ss:addr
11201 #endif
11202
11203 #define INTERRUPT_RETURN \
c6e2a6c8 11204@@ -1042,6 +1069,21 @@ extern void default_banner(void);
df50ba0c 11205 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
ae4e228f
MT
11206 CLBR_NONE, \
11207 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
58c5fc13 11208+
df50ba0c 11209+#define GET_CR0_INTO_RDI \
ae4e228f 11210+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
df50ba0c 11211+ mov %rax,%rdi
ae4e228f 11212+
df50ba0c
MT
11213+#define SET_RDI_INTO_CR0 \
11214+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
ae4e228f 11215+
df50ba0c
MT
11216+#define GET_CR3_INTO_RDI \
11217+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
11218+ mov %rax,%rdi
11219+
11220+#define SET_RDI_INTO_CR3 \
11221+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
ae4e228f
MT
11222+
11223 #endif /* CONFIG_X86_32 */
11224
11225 #endif /* __ASSEMBLY__ */
fe2de317
MT
11226diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
11227index 8e8b9a4..f07d725 100644
11228--- a/arch/x86/include/asm/paravirt_types.h
11229+++ b/arch/x86/include/asm/paravirt_types.h
6e9df6a3 11230@@ -84,20 +84,20 @@ struct pv_init_ops {
15a11c5b
MT
11231 */
11232 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
11233 unsigned long addr, unsigned len);
11234-};
11235+} __no_const;
11236
11237
11238 struct pv_lazy_ops {
11239 /* Set deferred update mode, used for batching operations. */
11240 void (*enter)(void);
11241 void (*leave)(void);
11242-};
11243+} __no_const;
11244
11245 struct pv_time_ops {
11246 unsigned long long (*sched_clock)(void);
6e9df6a3 11247 unsigned long long (*steal_clock)(int cpu);
15a11c5b
MT
11248 unsigned long (*get_tsc_khz)(void);
11249-};
11250+} __no_const;
11251
11252 struct pv_cpu_ops {
11253 /* hooks for various privileged instructions */
6e9df6a3 11254@@ -193,7 +193,7 @@ struct pv_cpu_ops {
15a11c5b
MT
11255
11256 void (*start_context_switch)(struct task_struct *prev);
11257 void (*end_context_switch)(struct task_struct *next);
11258-};
11259+} __no_const;
11260
11261 struct pv_irq_ops {
11262 /*
6e9df6a3 11263@@ -224,7 +224,7 @@ struct pv_apic_ops {
15a11c5b
MT
11264 unsigned long start_eip,
11265 unsigned long start_esp);
11266 #endif
11267-};
11268+} __no_const;
11269
11270 struct pv_mmu_ops {
11271 unsigned long (*read_cr2)(void);
6e9df6a3 11272@@ -313,6 +313,7 @@ struct pv_mmu_ops {
15a11c5b
MT
11273 struct paravirt_callee_save make_pud;
11274
11275 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
11276+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
11277 #endif /* PAGETABLE_LEVELS == 4 */
11278 #endif /* PAGETABLE_LEVELS >= 3 */
11279
6e9df6a3 11280@@ -324,6 +325,12 @@ struct pv_mmu_ops {
ae4e228f
MT
11281 an mfn. We can tell which is which from the index. */
11282 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
11283 phys_addr_t phys, pgprot_t flags);
11284+
11285+#ifdef CONFIG_PAX_KERNEXEC
11286+ unsigned long (*pax_open_kernel)(void);
11287+ unsigned long (*pax_close_kernel)(void);
11288+#endif
11289+
11290 };
11291
11292 struct arch_spinlock;
6e9df6a3 11293@@ -334,7 +341,7 @@ struct pv_lock_ops {
15a11c5b
MT
11294 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
11295 int (*spin_trylock)(struct arch_spinlock *lock);
11296 void (*spin_unlock)(struct arch_spinlock *lock);
11297-};
11298+} __no_const;
11299
11300 /* This contains all the paravirt structures: we get a convenient
11301 * number for each function using the offset which we use to indicate
fe2de317 11302diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
5e856224 11303index b4389a4..7024269 100644
fe2de317
MT
11304--- a/arch/x86/include/asm/pgalloc.h
11305+++ b/arch/x86/include/asm/pgalloc.h
11306@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
58c5fc13
MT
11307 pmd_t *pmd, pte_t *pte)
11308 {
11309 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11310+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
11311+}
11312+
11313+static inline void pmd_populate_user(struct mm_struct *mm,
11314+ pmd_t *pmd, pte_t *pte)
11315+{
11316+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
11317 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
11318 }
11319
5e856224
MT
11320@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
11321
11322 #ifdef CONFIG_X86_PAE
11323 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
11324+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
11325+{
11326+ pud_populate(mm, pudp, pmd);
11327+}
11328 #else /* !CONFIG_X86_PAE */
11329 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11330 {
11331 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11332 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
11333 }
11334+
11335+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
11336+{
11337+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
11338+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
11339+}
11340 #endif /* CONFIG_X86_PAE */
11341
11342 #if PAGETABLE_LEVELS > 3
11343@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11344 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
11345 }
11346
11347+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
11348+{
11349+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
11350+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
11351+}
11352+
11353 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
11354 {
11355 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
fe2de317
MT
11356diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
11357index 98391db..8f6984e 100644
11358--- a/arch/x86/include/asm/pgtable-2level.h
11359+++ b/arch/x86/include/asm/pgtable-2level.h
11360@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
58c5fc13
MT
11361
11362 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11363 {
ae4e228f 11364+ pax_open_kernel();
58c5fc13 11365 *pmdp = pmd;
ae4e228f 11366+ pax_close_kernel();
58c5fc13
MT
11367 }
11368
11369 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
fe2de317 11370diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
572b4308 11371index cb00ccc..17e9054 100644
fe2de317
MT
11372--- a/arch/x86/include/asm/pgtable-3level.h
11373+++ b/arch/x86/include/asm/pgtable-3level.h
572b4308 11374@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
58c5fc13
MT
11375
11376 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11377 {
ae4e228f 11378+ pax_open_kernel();
58c5fc13 11379 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
ae4e228f 11380+ pax_close_kernel();
58c5fc13
MT
11381 }
11382
11383 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11384 {
ae4e228f 11385+ pax_open_kernel();
58c5fc13 11386 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
ae4e228f 11387+ pax_close_kernel();
58c5fc13
MT
11388 }
11389
11390 /*
fe2de317 11391diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
c6e2a6c8 11392index 49afb3f..91a8c63 100644
fe2de317
MT
11393--- a/arch/x86/include/asm/pgtable.h
11394+++ b/arch/x86/include/asm/pgtable.h
11395@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
15a11c5b
MT
11396
11397 #ifndef __PAGETABLE_PUD_FOLDED
11398 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
11399+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
11400 #define pgd_clear(pgd) native_pgd_clear(pgd)
11401 #endif
11402
fe2de317 11403@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
ae4e228f
MT
11404
11405 #define arch_end_context_switch(prev) do {} while(0)
11406
11407+#define pax_open_kernel() native_pax_open_kernel()
11408+#define pax_close_kernel() native_pax_close_kernel()
11409 #endif /* CONFIG_PARAVIRT */
11410
11411+#define __HAVE_ARCH_PAX_OPEN_KERNEL
11412+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
58c5fc13
MT
11413+
11414+#ifdef CONFIG_PAX_KERNEXEC
ae4e228f
MT
11415+static inline unsigned long native_pax_open_kernel(void)
11416+{
58c5fc13
MT
11417+ unsigned long cr0;
11418+
ae4e228f
MT
11419+ preempt_disable();
11420+ barrier();
11421+ cr0 = read_cr0() ^ X86_CR0_WP;
11422+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
11423+ write_cr0(cr0);
11424+ return cr0 ^ X86_CR0_WP;
11425+}
58c5fc13 11426+
ae4e228f
MT
11427+static inline unsigned long native_pax_close_kernel(void)
11428+{
11429+ unsigned long cr0;
58c5fc13 11430+
ae4e228f
MT
11431+ cr0 = read_cr0() ^ X86_CR0_WP;
11432+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11433+ write_cr0(cr0);
11434+ barrier();
11435+ preempt_enable_no_resched();
11436+ return cr0 ^ X86_CR0_WP;
11437+}
11438+#else
11439+static inline unsigned long native_pax_open_kernel(void) { return 0; }
11440+static inline unsigned long native_pax_close_kernel(void) { return 0; }
58c5fc13
MT
11441+#endif
11442+
ae4e228f 11443 /*
58c5fc13
MT
11444 * The following only work if pte_present() is true.
11445 * Undefined behaviour if not..
11446 */
11447+static inline int pte_user(pte_t pte)
11448+{
11449+ return pte_val(pte) & _PAGE_USER;
11450+}
11451+
11452 static inline int pte_dirty(pte_t pte)
11453 {
11454 return pte_flags(pte) & _PAGE_DIRTY;
fe2de317 11455@@ -196,9 +236,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
58c5fc13
MT
11456 return pte_clear_flags(pte, _PAGE_RW);
11457 }
11458
11459+static inline pte_t pte_mkread(pte_t pte)
11460+{
11461+ return __pte(pte_val(pte) | _PAGE_USER);
11462+}
11463+
11464 static inline pte_t pte_mkexec(pte_t pte)
11465 {
11466- return pte_clear_flags(pte, _PAGE_NX);
11467+#ifdef CONFIG_X86_PAE
11468+ if (__supported_pte_mask & _PAGE_NX)
11469+ return pte_clear_flags(pte, _PAGE_NX);
11470+ else
11471+#endif
11472+ return pte_set_flags(pte, _PAGE_USER);
11473+}
11474+
11475+static inline pte_t pte_exprotect(pte_t pte)
11476+{
11477+#ifdef CONFIG_X86_PAE
11478+ if (__supported_pte_mask & _PAGE_NX)
11479+ return pte_set_flags(pte, _PAGE_NX);
11480+ else
11481+#endif
11482+ return pte_clear_flags(pte, _PAGE_USER);
11483 }
11484
11485 static inline pte_t pte_mkdirty(pte_t pte)
fe2de317 11486@@ -390,6 +450,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
df50ba0c
MT
11487 #endif
11488
11489 #ifndef __ASSEMBLY__
11490+
11491+#ifdef CONFIG_PAX_PER_CPU_PGD
11492+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11493+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11494+{
11495+ return cpu_pgd[cpu];
11496+}
11497+#endif
11498+
11499 #include <linux/mm_types.h>
11500
11501 static inline int pte_none(pte_t pte)
fe2de317 11502@@ -560,7 +629,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
58c5fc13
MT
11503
11504 static inline int pgd_bad(pgd_t pgd)
11505 {
11506- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11507+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11508 }
11509
11510 static inline int pgd_none(pgd_t pgd)
15a11c5b 11511@@ -583,7 +652,12 @@ static inline int pgd_none(pgd_t pgd)
df50ba0c
MT
11512 * pgd_offset() returns a (pgd_t *)
11513 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11514 */
11515-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11516+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11517+
11518+#ifdef CONFIG_PAX_PER_CPU_PGD
11519+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11520+#endif
11521+
11522 /*
11523 * a shortcut which implies the use of the kernel's pgd, instead
11524 * of a process's
15a11c5b 11525@@ -594,6 +668,20 @@ static inline int pgd_none(pgd_t pgd)
df50ba0c
MT
11526 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11527 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11528
11529+#ifdef CONFIG_X86_32
11530+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11531+#else
11532+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11533+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11534+
11535+#ifdef CONFIG_PAX_MEMORY_UDEREF
11536+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11537+#else
11538+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11539+#endif
11540+
11541+#endif
11542+
11543 #ifndef __ASSEMBLY__
11544
11545 extern int direct_gbpages;
fe2de317 11546@@ -758,11 +846,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
ae4e228f
MT
11547 * dst and src can be on the same page, but the range must not overlap,
11548 * and must not cross a page boundary.
58c5fc13 11549 */
ae4e228f
MT
11550-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11551+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
58c5fc13
MT
11552 {
11553- memcpy(dst, src, count * sizeof(pgd_t));
ae4e228f
MT
11554+ pax_open_kernel();
11555+ while (count--)
11556+ *dst++ = *src++;
11557+ pax_close_kernel();
58c5fc13
MT
11558 }
11559
df50ba0c 11560+#ifdef CONFIG_PAX_PER_CPU_PGD
c6e2a6c8 11561+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
df50ba0c
MT
11562+#endif
11563+
11564+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
c6e2a6c8 11565+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
df50ba0c 11566+#else
c6e2a6c8 11567+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
df50ba0c 11568+#endif
58c5fc13 11569
df50ba0c
MT
11570 #include <asm-generic/pgtable.h>
11571 #endif /* __ASSEMBLY__ */
fe2de317
MT
11572diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11573index 0c92113..34a77c6 100644
11574--- a/arch/x86/include/asm/pgtable_32.h
11575+++ b/arch/x86/include/asm/pgtable_32.h
11576@@ -25,9 +25,6 @@
11577 struct mm_struct;
11578 struct vm_area_struct;
11579
11580-extern pgd_t swapper_pg_dir[1024];
11581-extern pgd_t initial_page_table[1024];
11582-
11583 static inline void pgtable_cache_init(void) { }
11584 static inline void check_pgt_cache(void) { }
11585 void paging_init(void);
11586@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11587 # include <asm/pgtable-2level.h>
11588 #endif
11589
11590+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11591+extern pgd_t initial_page_table[PTRS_PER_PGD];
11592+#ifdef CONFIG_X86_PAE
11593+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11594+#endif
11595+
11596 #if defined(CONFIG_HIGHPTE)
11597 #define pte_offset_map(dir, address) \
11598 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
11599@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11600 /* Clear a kernel PTE and flush it from the TLB */
11601 #define kpte_clear_flush(ptep, vaddr) \
11602 do { \
11603+ pax_open_kernel(); \
11604 pte_clear(&init_mm, (vaddr), (ptep)); \
11605+ pax_close_kernel(); \
11606 __flush_tlb_one((vaddr)); \
11607 } while (0)
11608
11609@@ -74,6 +79,9 @@ do { \
11610
11611 #endif /* !__ASSEMBLY__ */
11612
11613+#define HAVE_ARCH_UNMAPPED_AREA
11614+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11615+
11616 /*
11617 * kern_addr_valid() is (1) for FLATMEM and (0) for
11618 * SPARSEMEM and DISCONTIGMEM
11619diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11620index ed5903b..c7fe163 100644
11621--- a/arch/x86/include/asm/pgtable_32_types.h
11622+++ b/arch/x86/include/asm/pgtable_32_types.h
11623@@ -8,7 +8,7 @@
11624 */
11625 #ifdef CONFIG_X86_PAE
11626 # include <asm/pgtable-3level_types.h>
11627-# define PMD_SIZE (1UL << PMD_SHIFT)
11628+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11629 # define PMD_MASK (~(PMD_SIZE - 1))
11630 #else
11631 # include <asm/pgtable-2level_types.h>
11632@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11633 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11634 #endif
11635
11636+#ifdef CONFIG_PAX_KERNEXEC
11637+#ifndef __ASSEMBLY__
11638+extern unsigned char MODULES_EXEC_VADDR[];
11639+extern unsigned char MODULES_EXEC_END[];
11640+#endif
11641+#include <asm/boot.h>
11642+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11643+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11644+#else
11645+#define ktla_ktva(addr) (addr)
11646+#define ktva_ktla(addr) (addr)
11647+#endif
11648+
11649 #define MODULES_VADDR VMALLOC_START
11650 #define MODULES_END VMALLOC_END
11651 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11652diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
5e856224 11653index 975f709..9f779c9 100644
fe2de317
MT
11654--- a/arch/x86/include/asm/pgtable_64.h
11655+++ b/arch/x86/include/asm/pgtable_64.h
11656@@ -16,10 +16,14 @@
11657
11658 extern pud_t level3_kernel_pgt[512];
11659 extern pud_t level3_ident_pgt[512];
11660+extern pud_t level3_vmalloc_start_pgt[512];
11661+extern pud_t level3_vmalloc_end_pgt[512];
11662+extern pud_t level3_vmemmap_pgt[512];
11663+extern pud_t level2_vmemmap_pgt[512];
11664 extern pmd_t level2_kernel_pgt[512];
11665 extern pmd_t level2_fixmap_pgt[512];
11666-extern pmd_t level2_ident_pgt[512];
11667-extern pgd_t init_level4_pgt[];
11668+extern pmd_t level2_ident_pgt[512*2];
11669+extern pgd_t init_level4_pgt[512];
11670
11671 #define swapper_pg_dir init_level4_pgt
11672
11673@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
11674
11675 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11676 {
11677+ pax_open_kernel();
11678 *pmdp = pmd;
11679+ pax_close_kernel();
11680 }
11681
11682 static inline void native_pmd_clear(pmd_t *pmd)
5e856224
MT
11683@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
11684
11685 static inline void native_set_pud(pud_t *pudp, pud_t pud)
11686 {
11687+ pax_open_kernel();
11688 *pudp = pud;
11689+ pax_close_kernel();
11690 }
11691
11692 static inline void native_pud_clear(pud_t *pud)
11693@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
fe2de317
MT
11694
11695 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11696 {
11697+ pax_open_kernel();
11698+ *pgdp = pgd;
11699+ pax_close_kernel();
11700+}
11701+
11702+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11703+{
11704 *pgdp = pgd;
11705 }
11706
11707diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11708index 766ea16..5b96cb3 100644
11709--- a/arch/x86/include/asm/pgtable_64_types.h
11710+++ b/arch/x86/include/asm/pgtable_64_types.h
11711@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11712 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11713 #define MODULES_END _AC(0xffffffffff000000, UL)
11714 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11715+#define MODULES_EXEC_VADDR MODULES_VADDR
11716+#define MODULES_EXEC_END MODULES_END
11717+
11718+#define ktla_ktva(addr) (addr)
11719+#define ktva_ktla(addr) (addr)
11720
11721 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11722diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11723index 013286a..8b42f4f 100644
11724--- a/arch/x86/include/asm/pgtable_types.h
11725+++ b/arch/x86/include/asm/pgtable_types.h
16454cff 11726@@ -16,13 +16,12 @@
58c5fc13
MT
11727 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11728 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11729 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11730-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11731+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11732 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11733 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11734 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11735-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11736-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
16454cff 11737-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
58c5fc13 11738+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
16454cff 11739+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
58c5fc13
MT
11740 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11741
11742 /* If _PAGE_BIT_PRESENT is clear, we use these: */
16454cff 11743@@ -40,7 +39,6 @@
58c5fc13
MT
11744 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11745 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11746 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11747-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11748 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11749 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11750 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
16454cff 11751@@ -57,8 +55,10 @@
58c5fc13
MT
11752
11753 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11754 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11755-#else
11756+#elif defined(CONFIG_KMEMCHECK)
11757 #define _PAGE_NX (_AT(pteval_t, 0))
11758+#else
11759+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11760 #endif
11761
11762 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
16454cff 11763@@ -96,6 +96,9 @@
58c5fc13
MT
11764 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11765 _PAGE_ACCESSED)
11766
11767+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11768+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11769+
11770 #define __PAGE_KERNEL_EXEC \
11771 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11772 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
6e9df6a3 11773@@ -106,7 +109,7 @@
58c5fc13
MT
11774 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11775 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11776 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11777-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
58c5fc13 11778+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
6e9df6a3
MT
11779 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
11780 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
58c5fc13 11781 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
6e9df6a3 11782@@ -168,8 +171,8 @@
58c5fc13
MT
11783 * bits are combined, this will alow user to access the high address mapped
11784 * VDSO in the presence of CONFIG_COMPAT_VDSO
11785 */
11786-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11787-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11788+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11789+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11790 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11791 #endif
11792
fe2de317 11793@@ -207,7 +210,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
57199397
MT
11794 {
11795 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11796 }
11797+#endif
11798
11799+#if PAGETABLE_LEVELS == 3
11800+#include <asm-generic/pgtable-nopud.h>
11801+#endif
11802+
11803+#if PAGETABLE_LEVELS == 2
11804+#include <asm-generic/pgtable-nopmd.h>
11805+#endif
11806+
11807+#ifndef __ASSEMBLY__
11808 #if PAGETABLE_LEVELS > 3
11809 typedef struct { pudval_t pud; } pud_t;
11810
fe2de317 11811@@ -221,8 +234,6 @@ static inline pudval_t native_pud_val(pud_t pud)
57199397
MT
11812 return pud.pud;
11813 }
11814 #else
11815-#include <asm-generic/pgtable-nopud.h>
11816-
11817 static inline pudval_t native_pud_val(pud_t pud)
11818 {
11819 return native_pgd_val(pud.pgd);
fe2de317 11820@@ -242,8 +253,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
57199397
MT
11821 return pmd.pmd;
11822 }
11823 #else
11824-#include <asm-generic/pgtable-nopmd.h>
11825-
11826 static inline pmdval_t native_pmd_val(pmd_t pmd)
11827 {
11828 return native_pgd_val(pmd.pud.pgd);
6e9df6a3 11829@@ -283,7 +292,6 @@ typedef struct page *pgtable_t;
58c5fc13
MT
11830
11831 extern pteval_t __supported_pte_mask;
ae4e228f
MT
11832 extern void set_nx(void);
11833-extern int nx_enabled;
58c5fc13
MT
11834
11835 #define pgprot_writecombine pgprot_writecombine
11836 extern pgprot_t pgprot_writecombine(pgprot_t prot);
fe2de317 11837diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
c6e2a6c8 11838index 4fa7dcc..764e33a 100644
fe2de317
MT
11839--- a/arch/x86/include/asm/processor.h
11840+++ b/arch/x86/include/asm/processor.h
c6e2a6c8 11841@@ -276,7 +276,7 @@ struct tss_struct {
58c5fc13
MT
11842
11843 } ____cacheline_aligned;
11844
11845-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11846+extern struct tss_struct init_tss[NR_CPUS];
11847
11848 /*
11849 * Save the original ist values for checking stack pointers during debugging
c6e2a6c8 11850@@ -807,11 +807,18 @@ static inline void spin_lock_prefetch(const void *x)
58c5fc13
MT
11851 */
11852 #define TASK_SIZE PAGE_OFFSET
11853 #define TASK_SIZE_MAX TASK_SIZE
11854+
11855+#ifdef CONFIG_PAX_SEGMEXEC
11856+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
58c5fc13
MT
11857+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11858+#else
11859 #define STACK_TOP TASK_SIZE
11860-#define STACK_TOP_MAX STACK_TOP
11861+#endif
ae4e228f 11862+
58c5fc13
MT
11863+#define STACK_TOP_MAX TASK_SIZE
11864
11865 #define INIT_THREAD { \
66a7e928
MT
11866- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11867+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11868 .vm86_info = NULL, \
11869 .sysenter_cs = __KERNEL_CS, \
11870 .io_bitmap_ptr = NULL, \
c6e2a6c8 11871@@ -825,7 +832,7 @@ static inline void spin_lock_prefetch(const void *x)
58c5fc13
MT
11872 */
11873 #define INIT_TSS { \
11874 .x86_tss = { \
11875- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11876+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11877 .ss0 = __KERNEL_DS, \
11878 .ss1 = __KERNEL_CS, \
11879 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
c6e2a6c8 11880@@ -836,11 +843,7 @@ static inline void spin_lock_prefetch(const void *x)
58c5fc13
MT
11881 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11882
11883 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11884-#define KSTK_TOP(info) \
11885-({ \
11886- unsigned long *__ptr = (unsigned long *)(info); \
11887- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11888-})
71d190be 11889+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
58c5fc13
MT
11890
11891 /*
11892 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
c6e2a6c8 11893@@ -855,7 +858,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
58c5fc13
MT
11894 #define task_pt_regs(task) \
11895 ({ \
11896 struct pt_regs *__regs__; \
11897- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11898+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11899 __regs__ - 1; \
11900 })
11901
c6e2a6c8 11902@@ -865,13 +868,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
df50ba0c
MT
11903 /*
11904 * User space process size. 47bits minus one guard page.
11905 */
11906-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11907+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11908
11909 /* This decides where the kernel will search for a free chunk of vm
58c5fc13
MT
11910 * space during mmap's.
11911 */
11912 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11913- 0xc0000000 : 0xFFFFe000)
11914+ 0xc0000000 : 0xFFFFf000)
11915
c6e2a6c8 11916 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
58c5fc13 11917 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
c6e2a6c8 11918@@ -882,11 +885,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
66a7e928
MT
11919 #define STACK_TOP_MAX TASK_SIZE_MAX
11920
11921 #define INIT_THREAD { \
11922- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11923+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11924 }
11925
11926 #define INIT_TSS { \
11927- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11928+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11929 }
11930
11931 /*
c6e2a6c8 11932@@ -914,6 +917,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
58c5fc13
MT
11933 */
11934 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11935
11936+#ifdef CONFIG_PAX_SEGMEXEC
11937+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11938+#endif
11939+
11940 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11941
11942 /* Get/set a process' ability to use the timestamp counter instruction */
c6e2a6c8
MT
11943@@ -976,12 +983,12 @@ extern bool cpu_has_amd_erratum(const int *);
11944
11945 void cpu_idle_wait(void);
11946
11947-extern unsigned long arch_align_stack(unsigned long sp);
11948+#define arch_align_stack(x) ((x) & ~0xfUL)
11949 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11950
11951 void default_idle(void);
11952 bool set_pm_idle_to_default(void);
11953
11954-void stop_this_cpu(void *dummy);
11955+void stop_this_cpu(void *dummy) __noreturn;
11956
11957 #endif /* _ASM_X86_PROCESSOR_H */
fe2de317 11958diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
c6e2a6c8 11959index dcfde52..dbfea06 100644
fe2de317
MT
11960--- a/arch/x86/include/asm/ptrace.h
11961+++ b/arch/x86/include/asm/ptrace.h
c6e2a6c8 11962@@ -155,28 +155,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
58c5fc13
MT
11963 }
11964
11965 /*
11966- * user_mode_vm(regs) determines whether a register set came from user mode.
11967+ * user_mode(regs) determines whether a register set came from user mode.
11968 * This is true if V8086 mode was enabled OR if the register set was from
11969 * protected mode with RPL-3 CS value. This tricky test checks that with
11970 * one comparison. Many places in the kernel can bypass this full check
11971- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11972+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11973+ * be used.
11974 */
11975-static inline int user_mode(struct pt_regs *regs)
11976+static inline int user_mode_novm(struct pt_regs *regs)
11977 {
11978 #ifdef CONFIG_X86_32
11979 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11980 #else
11981- return !!(regs->cs & 3);
11982+ return !!(regs->cs & SEGMENT_RPL_MASK);
11983 #endif
11984 }
11985
11986-static inline int user_mode_vm(struct pt_regs *regs)
11987+static inline int user_mode(struct pt_regs *regs)
11988 {
11989 #ifdef CONFIG_X86_32
11990 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11991 USER_RPL;
11992 #else
11993- return user_mode(regs);
11994+ return user_mode_novm(regs);
11995 #endif
11996 }
11997
c6e2a6c8 11998@@ -192,15 +193,16 @@ static inline int v8086_mode(struct pt_regs *regs)
6e9df6a3
MT
11999 #ifdef CONFIG_X86_64
12000 static inline bool user_64bit_mode(struct pt_regs *regs)
12001 {
12002+ unsigned long cs = regs->cs & 0xffff;
12003 #ifndef CONFIG_PARAVIRT
12004 /*
12005 * On non-paravirt systems, this is the only long mode CPL 3
12006 * selector. We do not allow long mode selectors in the LDT.
12007 */
12008- return regs->cs == __USER_CS;
12009+ return cs == __USER_CS;
12010 #else
12011 /* Headers are too twisted for this to go in paravirt.h. */
12012- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
12013+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
12014 #endif
12015 }
12016 #endif
fe2de317 12017diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
4c928ab7 12018index 92f29706..a79cbbb 100644
fe2de317
MT
12019--- a/arch/x86/include/asm/reboot.h
12020+++ b/arch/x86/include/asm/reboot.h
66a7e928
MT
12021@@ -6,19 +6,19 @@
12022 struct pt_regs;
12023
12024 struct machine_ops {
12025- void (*restart)(char *cmd);
12026- void (*halt)(void);
12027- void (*power_off)(void);
12028+ void (* __noreturn restart)(char *cmd);
12029+ void (* __noreturn halt)(void);
12030+ void (* __noreturn power_off)(void);
12031 void (*shutdown)(void);
12032 void (*crash_shutdown)(struct pt_regs *);
12033- void (*emergency_restart)(void);
15a11c5b 12034-};
66a7e928 12035+ void (* __noreturn emergency_restart)(void);
15a11c5b 12036+} __no_const;
66a7e928
MT
12037
12038 extern struct machine_ops machine_ops;
58c5fc13
MT
12039
12040 void native_machine_crash_shutdown(struct pt_regs *regs);
12041 void native_machine_shutdown(void);
66a7e928
MT
12042-void machine_real_restart(unsigned int type);
12043+void machine_real_restart(unsigned int type) __noreturn;
12044 /* These must match dispatch_table in reboot_32.S */
12045 #define MRR_BIOS 0
12046 #define MRR_APM 1
fe2de317 12047diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
4c928ab7 12048index 2dbe4a7..ce1db00 100644
fe2de317
MT
12049--- a/arch/x86/include/asm/rwsem.h
12050+++ b/arch/x86/include/asm/rwsem.h
12051@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
58c5fc13
MT
12052 {
12053 asm volatile("# beginning down_read\n\t"
df50ba0c 12054 LOCK_PREFIX _ASM_INC "(%1)\n\t"
58c5fc13
MT
12055+
12056+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12057+ "jno 0f\n"
df50ba0c 12058+ LOCK_PREFIX _ASM_DEC "(%1)\n"
bc901d79
MT
12059+ "int $4\n0:\n"
12060+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12061+#endif
12062+
6892158b 12063 /* adds 0x00000001 */
bc901d79 12064 " jns 1f\n"
58c5fc13 12065 " call call_rwsem_down_read_failed\n"
fe2de317 12066@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
bc901d79 12067 "1:\n\t"
df50ba0c
MT
12068 " mov %1,%2\n\t"
12069 " add %3,%2\n\t"
58c5fc13
MT
12070+
12071+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12072+ "jno 0f\n"
df50ba0c 12073+ "sub %3,%2\n"
bc901d79
MT
12074+ "int $4\n0:\n"
12075+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12076+#endif
12077+
bc901d79 12078 " jle 2f\n\t"
df50ba0c 12079 LOCK_PREFIX " cmpxchg %2,%0\n\t"
bc901d79 12080 " jnz 1b\n\t"
fe2de317 12081@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
66a7e928 12082 long tmp;
58c5fc13 12083 asm volatile("# beginning down_write\n\t"
df50ba0c 12084 LOCK_PREFIX " xadd %1,(%2)\n\t"
58c5fc13
MT
12085+
12086+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12087+ "jno 0f\n"
df50ba0c 12088+ "mov %1,(%2)\n"
bc901d79
MT
12089+ "int $4\n0:\n"
12090+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12091+#endif
12092+
6892158b 12093 /* adds 0xffff0001, returns the old value */
df50ba0c 12094 " test %1,%1\n\t"
58c5fc13 12095 /* was the count 0 before? */
fe2de317 12096@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
66a7e928 12097 long tmp;
58c5fc13 12098 asm volatile("# beginning __up_read\n\t"
df50ba0c 12099 LOCK_PREFIX " xadd %1,(%2)\n\t"
58c5fc13
MT
12100+
12101+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12102+ "jno 0f\n"
df50ba0c 12103+ "mov %1,(%2)\n"
bc901d79
MT
12104+ "int $4\n0:\n"
12105+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12106+#endif
12107+
12108 /* subtracts 1, returns the old value */
bc901d79 12109 " jns 1f\n\t"
6892158b 12110 " call call_rwsem_wake\n" /* expects old value in %edx */
fe2de317 12111@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
66a7e928 12112 long tmp;
58c5fc13 12113 asm volatile("# beginning __up_write\n\t"
df50ba0c 12114 LOCK_PREFIX " xadd %1,(%2)\n\t"
58c5fc13
MT
12115+
12116+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12117+ "jno 0f\n"
df50ba0c 12118+ "mov %1,(%2)\n"
bc901d79
MT
12119+ "int $4\n0:\n"
12120+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12121+#endif
12122+
6892158b 12123 /* subtracts 0xffff0001, returns the old value */
bc901d79 12124 " jns 1f\n\t"
6892158b 12125 " call call_rwsem_wake\n" /* expects old value in %edx */
fe2de317 12126@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
58c5fc13
MT
12127 {
12128 asm volatile("# beginning __downgrade_write\n\t"
df50ba0c 12129 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
58c5fc13
MT
12130+
12131+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12132+ "jno 0f\n"
df50ba0c 12133+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
bc901d79
MT
12134+ "int $4\n0:\n"
12135+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12136+#endif
12137+
df50ba0c
MT
12138 /*
12139 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
12140 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
fe2de317 12141@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
66a7e928
MT
12142 */
12143 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
58c5fc13 12144 {
df50ba0c
MT
12145- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
12146+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
58c5fc13
MT
12147+
12148+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12149+ "jno 0f\n"
df50ba0c 12150+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
bc901d79
MT
12151+ "int $4\n0:\n"
12152+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12153+#endif
12154+
12155 : "+m" (sem->count)
df50ba0c 12156 : "er" (delta));
58c5fc13 12157 }
4c928ab7
MT
12158@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
12159 */
12160 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
58c5fc13 12161 {
4c928ab7
MT
12162- return delta + xadd(&sem->count, delta);
12163+ return delta + xadd_check_overflow(&sem->count, delta);
12164 }
58c5fc13 12165
4c928ab7 12166 #endif /* __KERNEL__ */
fe2de317 12167diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
c6e2a6c8 12168index 1654662..5af4157 100644
fe2de317
MT
12169--- a/arch/x86/include/asm/segment.h
12170+++ b/arch/x86/include/asm/segment.h
15a11c5b 12171@@ -64,10 +64,15 @@
ae4e228f
MT
12172 * 26 - ESPFIX small SS
12173 * 27 - per-cpu [ offset to per-cpu data area ]
12174 * 28 - stack_canary-20 [ for stack protector ]
12175- * 29 - unused
12176- * 30 - unused
12177+ * 29 - PCI BIOS CS
12178+ * 30 - PCI BIOS DS
12179 * 31 - TSS for double fault handler
12180 */
15a11c5b
MT
12181+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
12182+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
12183+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
12184+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
12185+
ae4e228f 12186 #define GDT_ENTRY_TLS_MIN 6
15a11c5b
MT
12187 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
12188
12189@@ -79,6 +84,8 @@
ae4e228f 12190
bc901d79 12191 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
ae4e228f
MT
12192
12193+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
12194+
bc901d79 12195 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
58c5fc13 12196
bc901d79 12197 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
15a11c5b 12198@@ -104,6 +111,12 @@
58c5fc13
MT
12199 #define __KERNEL_STACK_CANARY 0
12200 #endif
12201
bc901d79 12202+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
58c5fc13
MT
12203+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
12204+
bc901d79 12205+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
58c5fc13
MT
12206+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
12207+
12208 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
12209
12210 /*
15a11c5b 12211@@ -141,7 +154,7 @@
58c5fc13
MT
12212 */
12213
12214 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
12215-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
12216+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
12217
12218
12219 #else
15a11c5b 12220@@ -165,6 +178,8 @@
6e9df6a3 12221 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
ae4e228f
MT
12222 #define __USER32_DS __USER_DS
12223
12224+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
12225+
12226 #define GDT_ENTRY_TSS 8 /* needs two entries */
12227 #define GDT_ENTRY_LDT 10 /* needs two entries */
12228 #define GDT_ENTRY_TLS_MIN 12
15a11c5b 12229@@ -185,6 +200,7 @@
ae4e228f
MT
12230 #endif
12231
bc901d79
MT
12232 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
12233+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
12234 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
12235 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
12236 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
c6e2a6c8
MT
12237@@ -263,7 +279,7 @@ static inline unsigned long get_limit(unsigned long segment)
12238 {
12239 unsigned long __limit;
12240 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
12241- return __limit + 1;
12242+ return __limit;
12243 }
12244
12245 #endif /* !__ASSEMBLY__ */
fe2de317 12246diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
5e856224 12247index 0434c40..1714bf0 100644
fe2de317
MT
12248--- a/arch/x86/include/asm/smp.h
12249+++ b/arch/x86/include/asm/smp.h
12250@@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
66a7e928
MT
12251 /* cpus sharing the last level cache: */
12252 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
6892158b
MT
12253 DECLARE_PER_CPU(u16, cpu_llc_id);
12254-DECLARE_PER_CPU(int, cpu_number);
12255+DECLARE_PER_CPU(unsigned int, cpu_number);
12256
12257 static inline struct cpumask *cpu_sibling_mask(int cpu)
12258 {
15a11c5b
MT
12259@@ -77,7 +77,7 @@ struct smp_ops {
12260
12261 void (*send_call_func_ipi)(const struct cpumask *mask);
12262 void (*send_call_func_single_ipi)(int cpu);
12263-};
12264+} __no_const;
12265
12266 /* Globals due to paravirt */
12267 extern void set_cpu_sibling_map(int cpu);
fe2de317 12268@@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitdata;
71d190be
MT
12269 extern int safe_smp_processor_id(void);
12270
12271 #elif defined(CONFIG_X86_64_SMP)
12272-#define raw_smp_processor_id() (percpu_read(cpu_number))
12273-
12274-#define stack_smp_processor_id() \
12275-({ \
12276- struct thread_info *ti; \
12277- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
12278- ti->cpu; \
12279-})
12280+#define raw_smp_processor_id() (percpu_read(cpu_number))
12281+#define stack_smp_processor_id() raw_smp_processor_id()
12282 #define safe_smp_processor_id() smp_processor_id()
12283
12284 #endif
fe2de317 12285diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
c6e2a6c8 12286index 76bfa2c..12d3fe7 100644
fe2de317
MT
12287--- a/arch/x86/include/asm/spinlock.h
12288+++ b/arch/x86/include/asm/spinlock.h
5e856224 12289@@ -175,6 +175,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
ae4e228f 12290 static inline void arch_read_lock(arch_rwlock_t *rw)
58c5fc13 12291 {
6e9df6a3 12292 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
58c5fc13
MT
12293+
12294+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12295+ "jno 0f\n"
6e9df6a3 12296+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
bc901d79
MT
12297+ "int $4\n0:\n"
12298+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12299+#endif
12300+
bc901d79
MT
12301 "jns 1f\n"
12302 "call __read_lock_failed\n\t"
12303 "1:\n"
5e856224 12304@@ -184,6 +192,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
ae4e228f 12305 static inline void arch_write_lock(arch_rwlock_t *rw)
58c5fc13 12306 {
6e9df6a3 12307 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
58c5fc13
MT
12308+
12309+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12310+ "jno 0f\n"
6e9df6a3 12311+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
bc901d79
MT
12312+ "int $4\n0:\n"
12313+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12314+#endif
12315+
bc901d79
MT
12316 "jz 1f\n"
12317 "call __write_lock_failed\n\t"
12318 "1:\n"
5e856224 12319@@ -213,13 +229,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
58c5fc13 12320
ae4e228f 12321 static inline void arch_read_unlock(arch_rwlock_t *rw)
58c5fc13 12322 {
6e9df6a3
MT
12323- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
12324+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
58c5fc13
MT
12325+
12326+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12327+ "jno 0f\n"
6e9df6a3 12328+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
bc901d79
MT
12329+ "int $4\n0:\n"
12330+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12331+#endif
12332+
6e9df6a3 12333 :"+m" (rw->lock) : : "memory");
58c5fc13
MT
12334 }
12335
ae4e228f 12336 static inline void arch_write_unlock(arch_rwlock_t *rw)
58c5fc13 12337 {
6e9df6a3
MT
12338- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
12339+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
58c5fc13
MT
12340+
12341+#ifdef CONFIG_PAX_REFCOUNT
58c5fc13 12342+ "jno 0f\n"
6e9df6a3 12343+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
58c5fc13 12344+ "int $4\n0:\n"
bc901d79 12345+ _ASM_EXTABLE(0b, 0b)
58c5fc13
MT
12346+#endif
12347+
6e9df6a3 12348 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
58c5fc13
MT
12349 }
12350
fe2de317 12351diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
c6e2a6c8 12352index b5d9533..41655fa 100644
fe2de317
MT
12353--- a/arch/x86/include/asm/stackprotector.h
12354+++ b/arch/x86/include/asm/stackprotector.h
c6e2a6c8 12355@@ -47,7 +47,7 @@
15a11c5b
MT
12356 * head_32 for boot CPU and setup_per_cpu_areas() for others.
12357 */
12358 #define GDT_STACK_CANARY_INIT \
12359- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
12360+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
12361
12362 /*
12363 * Initialize the stackprotector canary value.
c6e2a6c8 12364@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
bc901d79
MT
12365
12366 static inline void load_stack_canary_segment(void)
12367 {
12368-#ifdef CONFIG_X86_32
12369+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
12370 asm volatile ("mov %0, %%gs" : : "r" (0));
12371 #endif
12372 }
fe2de317
MT
12373diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
12374index 70bbe39..4ae2bd4 100644
12375--- a/arch/x86/include/asm/stacktrace.h
12376+++ b/arch/x86/include/asm/stacktrace.h
71d190be
MT
12377@@ -11,28 +11,20 @@
12378
12379 extern int kstack_depth_to_print;
12380
12381-struct thread_info;
12382+struct task_struct;
12383 struct stacktrace_ops;
12384
12385-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
12386- unsigned long *stack,
12387- unsigned long bp,
12388- const struct stacktrace_ops *ops,
12389- void *data,
12390- unsigned long *end,
12391- int *graph);
fe2de317
MT
12392+typedef unsigned long walk_stack_t(struct task_struct *task,
12393+ void *stack_start,
12394+ unsigned long *stack,
12395+ unsigned long bp,
12396+ const struct stacktrace_ops *ops,
12397+ void *data,
12398+ unsigned long *end,
12399+ int *graph);
12400
71d190be
MT
12401-extern unsigned long
12402-print_context_stack(struct thread_info *tinfo,
12403- unsigned long *stack, unsigned long bp,
12404- const struct stacktrace_ops *ops, void *data,
12405- unsigned long *end, int *graph);
12406-
12407-extern unsigned long
12408-print_context_stack_bp(struct thread_info *tinfo,
12409- unsigned long *stack, unsigned long bp,
12410- const struct stacktrace_ops *ops, void *data,
12411- unsigned long *end, int *graph);
71d190be
MT
12412+extern walk_stack_t print_context_stack;
12413+extern walk_stack_t print_context_stack_bp;
12414
12415 /* Generic stack tracer with callbacks */
12416
15a11c5b 12417@@ -40,7 +32,7 @@ struct stacktrace_ops {
71d190be
MT
12418 void (*address)(void *data, unsigned long address, int reliable);
12419 /* On negative return stop dumping */
12420 int (*stack)(void *data, char *name);
12421- walk_stack_t walk_stack;
12422+ walk_stack_t *walk_stack;
12423 };
12424
12425 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
c6e2a6c8
MT
12426diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
12427index 4ec45b3..a4f0a8a 100644
12428--- a/arch/x86/include/asm/switch_to.h
12429+++ b/arch/x86/include/asm/switch_to.h
12430@@ -108,7 +108,7 @@ do { \
71d190be
MT
12431 "call __switch_to\n\t" \
12432 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
12433 __switch_canary \
12434- "movq %P[thread_info](%%rsi),%%r8\n\t" \
12435+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
12436 "movq %%rax,%%rdi\n\t" \
12437 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
12438 "jnz ret_from_fork\n\t" \
c6e2a6c8 12439@@ -119,7 +119,7 @@ do { \
71d190be
MT
12440 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
12441 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
12442 [_tif_fork] "i" (_TIF_FORK), \
12443- [thread_info] "i" (offsetof(struct task_struct, stack)), \
12444+ [thread_info] "m" (current_tinfo), \
12445 [current_task] "m" (current_task) \
12446 __switch_canary_iparam \
12447 : "memory", "cc" __EXTRA_CLOBBER)
c6e2a6c8
MT
12448diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
12449index 3fda9db4..4ca1c61 100644
12450--- a/arch/x86/include/asm/sys_ia32.h
12451+++ b/arch/x86/include/asm/sys_ia32.h
12452@@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *,
12453 struct old_sigaction32 __user *);
12454 asmlinkage long sys32_alarm(unsigned int);
66a7e928 12455
c6e2a6c8
MT
12456-asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int);
12457+asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int);
12458 asmlinkage long sys32_sysfs(int, u32, u32);
66a7e928 12459
c6e2a6c8 12460 asmlinkage long sys32_sched_rr_get_interval(compat_pid_t,
fe2de317 12461diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
c6e2a6c8 12462index ad6df8c..5e0cf6e 100644
fe2de317
MT
12463--- a/arch/x86/include/asm/thread_info.h
12464+++ b/arch/x86/include/asm/thread_info.h
71d190be
MT
12465@@ -10,6 +10,7 @@
12466 #include <linux/compiler.h>
12467 #include <asm/page.h>
12468 #include <asm/types.h>
12469+#include <asm/percpu.h>
12470
12471 /*
12472 * low level task data that entry.S needs immediate access to
12473@@ -24,7 +25,6 @@ struct exec_domain;
6e9df6a3 12474 #include <linux/atomic.h>
71d190be
MT
12475
12476 struct thread_info {
12477- struct task_struct *task; /* main task structure */
12478 struct exec_domain *exec_domain; /* execution domain */
12479 __u32 flags; /* low level flags */
12480 __u32 status; /* thread synchronous flags */
5e856224 12481@@ -34,19 +34,13 @@ struct thread_info {
71d190be
MT
12482 mm_segment_t addr_limit;
12483 struct restart_block restart_block;
12484 void __user *sysenter_return;
12485-#ifdef CONFIG_X86_32
12486- unsigned long previous_esp; /* ESP of the previous stack in
12487- case of nested (IRQ) stacks
12488- */
12489- __u8 supervisor_stack[0];
12490-#endif
66a7e928 12491+ unsigned long lowest_stack;
5e856224
MT
12492 unsigned int sig_on_uaccess_error:1;
12493 unsigned int uaccess_err:1; /* uaccess failed */
71d190be
MT
12494 };
12495
12496-#define INIT_THREAD_INFO(tsk) \
12497+#define INIT_THREAD_INFO \
12498 { \
12499- .task = &tsk, \
12500 .exec_domain = &default_exec_domain, \
12501 .flags = 0, \
12502 .cpu = 0, \
5e856224 12503@@ -57,7 +51,7 @@ struct thread_info {
71d190be
MT
12504 }, \
12505 }
12506
12507-#define init_thread_info (init_thread_union.thread_info)
66a7e928 12508+#define init_thread_info (init_thread_union.stack)
71d190be
MT
12509 #define init_stack (init_thread_union.stack)
12510
12511 #else /* !__ASSEMBLY__ */
c6e2a6c8 12512@@ -97,6 +91,7 @@ struct thread_info {
5e856224 12513 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
c6e2a6c8
MT
12514 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
12515 #define TIF_X32 30 /* 32-bit native x86-64 binary */
12516+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
5e856224
MT
12517
12518 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
12519 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
c6e2a6c8 12520@@ -120,16 +115,18 @@ struct thread_info {
5e856224 12521 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
c6e2a6c8
MT
12522 #define _TIF_ADDR32 (1 << TIF_ADDR32)
12523 #define _TIF_X32 (1 << TIF_X32)
5e856224
MT
12524+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
12525
12526 /* work to do in syscall_trace_enter() */
12527 #define _TIF_WORK_SYSCALL_ENTRY \
12528 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
12529- _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
c6e2a6c8
MT
12530+ _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
12531+ _TIF_GRSEC_SETXID)
5e856224
MT
12532
12533 /* work to do in syscall_trace_leave() */
12534 #define _TIF_WORK_SYSCALL_EXIT \
12535 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
12536- _TIF_SYSCALL_TRACEPOINT)
12537+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
12538
12539 /* work to do on interrupt/exception return */
12540 #define _TIF_WORK_MASK \
c6e2a6c8 12541@@ -139,7 +136,8 @@ struct thread_info {
5e856224
MT
12542
12543 /* work to do on any return to user space */
12544 #define _TIF_ALLWORK_MASK \
12545- ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT)
12546+ ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
12547+ _TIF_GRSEC_SETXID)
12548
12549 /* Only used for 64 bit */
12550 #define _TIF_DO_NOTIFY_MASK \
c6e2a6c8 12551@@ -173,45 +171,40 @@ struct thread_info {
66a7e928
MT
12552 ret; \
12553 })
71d190be 12554
fe2de317
MT
12555-#ifdef CONFIG_X86_32
12556-
12557-#define STACK_WARN (THREAD_SIZE/8)
12558-/*
12559- * macros/functions for gaining access to the thread information structure
12560- *
12561- * preempt_count needs to be 1 initially, until the scheduler is functional.
12562- */
12563-#ifndef __ASSEMBLY__
12564-
12565-
12566-/* how to get the current stack pointer from C */
12567-register unsigned long current_stack_pointer asm("esp") __used;
71d190be 12568-
71d190be
MT
12569-/* how to get the thread information struct from C */
12570-static inline struct thread_info *current_thread_info(void)
12571-{
12572- return (struct thread_info *)
12573- (current_stack_pointer & ~(THREAD_SIZE - 1));
12574-}
12575-
12576-#else /* !__ASSEMBLY__ */
12577-
fe2de317
MT
12578+#ifdef __ASSEMBLY__
12579 /* how to get the thread information struct from ASM */
12580 #define GET_THREAD_INFO(reg) \
71d190be
MT
12581- movl $-THREAD_SIZE, reg; \
12582- andl %esp, reg
fe2de317
MT
12583+ mov PER_CPU_VAR(current_tinfo), reg
12584
12585 /* use this one if reg already contains %esp */
71d190be
MT
12586-#define GET_THREAD_INFO_WITH_ESP(reg) \
12587- andl $-THREAD_SIZE, reg
fe2de317
MT
12588+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12589+#else
12590+/* how to get the thread information struct from C */
12591+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12592+
12593+static __always_inline struct thread_info *current_thread_info(void)
12594+{
12595+ return percpu_read_stable(current_tinfo);
12596+}
12597+#endif
12598+
12599+#ifdef CONFIG_X86_32
12600+
12601+#define STACK_WARN (THREAD_SIZE/8)
12602+/*
12603+ * macros/functions for gaining access to the thread information structure
12604+ *
12605+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12606+ */
12607+#ifndef __ASSEMBLY__
12608+
12609+/* how to get the current stack pointer from C */
12610+register unsigned long current_stack_pointer asm("esp") __used;
12611
71d190be
MT
12612 #endif
12613
12614 #else /* X86_32 */
12615
12616-#include <asm/percpu.h>
12617-#define KERNEL_STACK_OFFSET (5*8)
12618-
12619 /*
12620 * macros/functions for gaining access to the thread information structure
12621 * preempt_count needs to be 1 initially, until the scheduler is functional.
c6e2a6c8 12622@@ -219,27 +212,8 @@ static inline struct thread_info *current_thread_info(void)
71d190be
MT
12623 #ifndef __ASSEMBLY__
12624 DECLARE_PER_CPU(unsigned long, kernel_stack);
12625
12626-static inline struct thread_info *current_thread_info(void)
12627-{
12628- struct thread_info *ti;
12629- ti = (void *)(percpu_read_stable(kernel_stack) +
12630- KERNEL_STACK_OFFSET - THREAD_SIZE);
12631- return ti;
12632-}
12633-
12634-#else /* !__ASSEMBLY__ */
12635-
12636-/* how to get the thread information struct from ASM */
12637-#define GET_THREAD_INFO(reg) \
12638- movq PER_CPU_VAR(kernel_stack),reg ; \
12639- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12640-
5e856224
MT
12641-/*
12642- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
12643- * a certain register (to be used in assembler memory operands).
12644- */
12645-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
12646-
66a7e928
MT
12647+/* how to get the current stack pointer from C */
12648+register unsigned long current_stack_pointer asm("rsp") __used;
71d190be
MT
12649 #endif
12650
12651 #endif /* !X86_32 */
c6e2a6c8 12652@@ -285,5 +259,16 @@ extern void arch_task_cache_init(void);
71d190be
MT
12653 extern void free_thread_info(struct thread_info *ti);
12654 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12655 #define arch_task_cache_init arch_task_cache_init
12656+
12657+#define __HAVE_THREAD_FUNCTIONS
12658+#define task_thread_info(task) (&(task)->tinfo)
12659+#define task_stack_page(task) ((task)->stack)
12660+#define setup_thread_stack(p, org) do {} while (0)
12661+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12662+
12663+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
66a7e928 12664+extern struct task_struct *alloc_task_struct_node(int node);
71d190be
MT
12665+extern void free_task_struct(struct task_struct *);
12666+
12667 #endif
12668 #endif /* _ASM_X86_THREAD_INFO_H */
fe2de317 12669diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
c6e2a6c8 12670index e054459..14bc8a7 100644
fe2de317
MT
12671--- a/arch/x86/include/asm/uaccess.h
12672+++ b/arch/x86/include/asm/uaccess.h
12673@@ -7,12 +7,15 @@
12674 #include <linux/compiler.h>
12675 #include <linux/thread_info.h>
12676 #include <linux/string.h>
12677+#include <linux/sched.h>
12678 #include <asm/asm.h>
12679 #include <asm/page.h>
12680
12681 #define VERIFY_READ 0
12682 #define VERIFY_WRITE 1
12683
12684+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12685+
12686 /*
12687 * The fs value determines whether argument validity checking should be
12688 * performed or not. If get_fs() == USER_DS, checking is performed, with
12689@@ -28,7 +31,12 @@
12690
12691 #define get_ds() (KERNEL_DS)
12692 #define get_fs() (current_thread_info()->addr_limit)
12693+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12694+void __set_fs(mm_segment_t x);
12695+void set_fs(mm_segment_t x);
12696+#else
12697 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12698+#endif
12699
12700 #define segment_eq(a, b) ((a).seg == (b).seg)
12701
12702@@ -76,7 +84,33 @@
12703 * checks that the pointer is in the user space range - after calling
12704 * this function, memory access functions may still return -EFAULT.
12705 */
12706-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12707+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12708+#define access_ok(type, addr, size) \
12709+({ \
12710+ long __size = size; \
12711+ unsigned long __addr = (unsigned long)addr; \
12712+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12713+ unsigned long __end_ao = __addr + __size - 1; \
12714+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12715+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12716+ while(__addr_ao <= __end_ao) { \
12717+ char __c_ao; \
12718+ __addr_ao += PAGE_SIZE; \
12719+ if (__size > PAGE_SIZE) \
12720+ cond_resched(); \
12721+ if (__get_user(__c_ao, (char __user *)__addr)) \
12722+ break; \
12723+ if (type != VERIFY_WRITE) { \
12724+ __addr = __addr_ao; \
12725+ continue; \
12726+ } \
12727+ if (__put_user(__c_ao, (char __user *)__addr)) \
12728+ break; \
12729+ __addr = __addr_ao; \
12730+ } \
12731+ } \
12732+ __ret_ao; \
12733+})
12734
12735 /*
12736 * The exception table consists of pairs of addresses: the first is the
12737@@ -182,12 +216,20 @@ extern int __get_user_bad(void);
12738 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12739 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12740
12741-
12742+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12743+#define __copyuser_seg "gs;"
12744+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12745+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12746+#else
12747+#define __copyuser_seg
12748+#define __COPYUSER_SET_ES
12749+#define __COPYUSER_RESTORE_ES
12750+#endif
12751
12752 #ifdef CONFIG_X86_32
12753 #define __put_user_asm_u64(x, addr, err, errret) \
12754- asm volatile("1: movl %%eax,0(%2)\n" \
12755- "2: movl %%edx,4(%2)\n" \
12756+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12757+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12758 "3:\n" \
12759 ".section .fixup,\"ax\"\n" \
12760 "4: movl %3,%0\n" \
12761@@ -199,8 +241,8 @@ extern int __get_user_bad(void);
12762 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12763
12764 #define __put_user_asm_ex_u64(x, addr) \
12765- asm volatile("1: movl %%eax,0(%1)\n" \
12766- "2: movl %%edx,4(%1)\n" \
12767+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12768+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12769 "3:\n" \
12770 _ASM_EXTABLE(1b, 2b - 1b) \
12771 _ASM_EXTABLE(2b, 3b - 2b) \
12772@@ -252,7 +294,7 @@ extern void __put_user_8(void);
12773 __typeof__(*(ptr)) __pu_val; \
12774 __chk_user_ptr(ptr); \
12775 might_fault(); \
12776- __pu_val = x; \
12777+ __pu_val = (x); \
12778 switch (sizeof(*(ptr))) { \
12779 case 1: \
12780 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12781@@ -373,7 +415,7 @@ do { \
12782 } while (0)
12783
12784 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12785- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12786+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12787 "2:\n" \
12788 ".section .fixup,\"ax\"\n" \
12789 "3: mov %3,%0\n" \
12790@@ -381,7 +423,7 @@ do { \
12791 " jmp 2b\n" \
12792 ".previous\n" \
12793 _ASM_EXTABLE(1b, 3b) \
12794- : "=r" (err), ltype(x) \
12795+ : "=r" (err), ltype (x) \
12796 : "m" (__m(addr)), "i" (errret), "0" (err))
12797
12798 #define __get_user_size_ex(x, ptr, size) \
12799@@ -406,7 +448,7 @@ do { \
12800 } while (0)
12801
12802 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12803- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12804+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12805 "2:\n" \
12806 _ASM_EXTABLE(1b, 2b - 1b) \
12807 : ltype(x) : "m" (__m(addr)))
12808@@ -423,13 +465,24 @@ do { \
12809 int __gu_err; \
12810 unsigned long __gu_val; \
12811 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12812- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12813+ (x) = (__typeof__(*(ptr)))__gu_val; \
12814 __gu_err; \
12815 })
12816
12817 /* FIXME: this hack is definitely wrong -AK */
12818 struct __large_struct { unsigned long buf[100]; };
12819-#define __m(x) (*(struct __large_struct __user *)(x))
12820+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12821+#define ____m(x) \
12822+({ \
12823+ unsigned long ____x = (unsigned long)(x); \
12824+ if (____x < PAX_USER_SHADOW_BASE) \
12825+ ____x += PAX_USER_SHADOW_BASE; \
12826+ (void __user *)____x; \
12827+})
12828+#else
12829+#define ____m(x) (x)
12830+#endif
12831+#define __m(x) (*(struct __large_struct __user *)____m(x))
12832
12833 /*
12834 * Tell gcc we read from memory instead of writing: this is because
12835@@ -437,7 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
12836 * aliasing issues.
12837 */
12838 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12839- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12840+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12841 "2:\n" \
12842 ".section .fixup,\"ax\"\n" \
12843 "3: mov %3,%0\n" \
12844@@ -445,10 +498,10 @@ struct __large_struct { unsigned long buf[100]; };
12845 ".previous\n" \
12846 _ASM_EXTABLE(1b, 3b) \
12847 : "=r"(err) \
12848- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12849+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12850
12851 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12852- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12853+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12854 "2:\n" \
12855 _ASM_EXTABLE(1b, 2b - 1b) \
12856 : : ltype(x), "m" (__m(addr)))
12857@@ -487,8 +540,12 @@ struct __large_struct { unsigned long buf[100]; };
12858 * On error, the variable @x is set to zero.
12859 */
12860
12861+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12862+#define __get_user(x, ptr) get_user((x), (ptr))
12863+#else
12864 #define __get_user(x, ptr) \
12865 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12866+#endif
12867
12868 /**
12869 * __put_user: - Write a simple value into user space, with less checking.
12870@@ -510,8 +567,12 @@ struct __large_struct { unsigned long buf[100]; };
12871 * Returns zero on success, or -EFAULT on error.
12872 */
12873
12874+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12875+#define __put_user(x, ptr) put_user((x), (ptr))
12876+#else
12877 #define __put_user(x, ptr) \
12878 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12879+#endif
12880
12881 #define __get_user_unaligned __get_user
12882 #define __put_user_unaligned __put_user
12883@@ -529,7 +590,7 @@ struct __large_struct { unsigned long buf[100]; };
12884 #define get_user_ex(x, ptr) do { \
12885 unsigned long __gue_val; \
12886 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12887- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12888+ (x) = (__typeof__(*(ptr)))__gue_val; \
12889 } while (0)
12890
12891 #ifdef CONFIG_X86_WP_WORKS_OK
12892diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
c1e3898a 12893index 8084bc7..3d6ec37 100644
fe2de317
MT
12894--- a/arch/x86/include/asm/uaccess_32.h
12895+++ b/arch/x86/include/asm/uaccess_32.h
4c928ab7
MT
12896@@ -11,15 +11,15 @@
12897 #include <asm/page.h>
12898
12899 unsigned long __must_check __copy_to_user_ll
12900- (void __user *to, const void *from, unsigned long n);
12901+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
12902 unsigned long __must_check __copy_from_user_ll
12903- (void *to, const void __user *from, unsigned long n);
12904+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12905 unsigned long __must_check __copy_from_user_ll_nozero
12906- (void *to, const void __user *from, unsigned long n);
12907+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12908 unsigned long __must_check __copy_from_user_ll_nocache
12909- (void *to, const void __user *from, unsigned long n);
12910+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12911 unsigned long __must_check __copy_from_user_ll_nocache_nozero
12912- (void *to, const void __user *from, unsigned long n);
12913+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
12914
12915 /**
12916 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
c6e2a6c8 12917@@ -43,6 +43,9 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
58c5fc13
MT
12918 static __always_inline unsigned long __must_check
12919 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12920 {
12921+ if ((long)n < 0)
12922+ return n;
12923+
12924 if (__builtin_constant_p(n)) {
12925 unsigned long ret;
12926
c6e2a6c8 12927@@ -61,6 +64,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
58c5fc13
MT
12928 return ret;
12929 }
12930 }
12931+ if (!__builtin_constant_p(n))
12932+ check_object_size(from, n, true);
12933 return __copy_to_user_ll(to, from, n);
12934 }
12935
c6e2a6c8 12936@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
66a7e928
MT
12937 __copy_to_user(void __user *to, const void *from, unsigned long n)
12938 {
12939 might_fault();
12940+
12941 return __copy_to_user_inatomic(to, from, n);
12942 }
12943
58c5fc13
MT
12944 static __always_inline unsigned long
12945 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12946 {
12947+ if ((long)n < 0)
12948+ return n;
12949+
12950 /* Avoid zeroing the tail if the copy fails..
12951 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12952 * but as the zeroing behaviour is only significant when n is not
c6e2a6c8 12953@@ -137,6 +146,10 @@ static __always_inline unsigned long
58c5fc13
MT
12954 __copy_from_user(void *to, const void __user *from, unsigned long n)
12955 {
12956 might_fault();
12957+
12958+ if ((long)n < 0)
12959+ return n;
12960+
12961 if (__builtin_constant_p(n)) {
12962 unsigned long ret;
12963
c6e2a6c8 12964@@ -152,6 +165,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
58c5fc13
MT
12965 return ret;
12966 }
12967 }
12968+ if (!__builtin_constant_p(n))
12969+ check_object_size(to, n, false);
12970 return __copy_from_user_ll(to, from, n);
12971 }
12972
c6e2a6c8 12973@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
58c5fc13
MT
12974 const void __user *from, unsigned long n)
12975 {
12976 might_fault();
12977+
12978+ if ((long)n < 0)
12979+ return n;
12980+
12981 if (__builtin_constant_p(n)) {
12982 unsigned long ret;
12983
c6e2a6c8 12984@@ -181,15 +200,19 @@ static __always_inline unsigned long
58c5fc13
MT
12985 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12986 unsigned long n)
12987 {
12988- return __copy_from_user_ll_nocache_nozero(to, from, n);
12989+ if ((long)n < 0)
12990+ return n;
fe2de317
MT
12991+
12992+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12993 }
ae4e228f
MT
12994
12995-unsigned long __must_check copy_to_user(void __user *to,
12996- const void *from, unsigned long n);
12997-unsigned long __must_check _copy_from_user(void *to,
12998- const void __user *from,
12999- unsigned long n);
fe2de317 13000-
ae4e228f
MT
13001+extern void copy_to_user_overflow(void)
13002+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13003+ __compiletime_error("copy_to_user() buffer size is not provably correct")
13004+#else
13005+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
13006+#endif
13007+;
13008
13009 extern void copy_from_user_overflow(void)
13010 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
c6e2a6c8 13011@@ -199,17 +222,61 @@ extern void copy_from_user_overflow(void)
ae4e228f
MT
13012 #endif
13013 ;
13014
13015-static inline unsigned long __must_check copy_from_user(void *to,
13016- const void __user *from,
13017- unsigned long n)
58c5fc13
MT
13018+/**
13019+ * copy_to_user: - Copy a block of data into user space.
13020+ * @to: Destination address, in user space.
13021+ * @from: Source address, in kernel space.
13022+ * @n: Number of bytes to copy.
13023+ *
13024+ * Context: User context only. This function may sleep.
13025+ *
13026+ * Copy data from kernel space to user space.
13027+ *
13028+ * Returns number of bytes that could not be copied.
13029+ * On success, this will be zero.
13030+ */
ae4e228f 13031+static inline unsigned long __must_check
58c5fc13 13032+copy_to_user(void __user *to, const void *from, unsigned long n)
c1e3898a
MT
13033 {
13034- int sz = __compiletime_object_size(to);
13035+ size_t sz = __compiletime_object_size(from);
13036
13037- if (likely(sz == -1 || sz >= n))
13038- n = _copy_from_user(to, from, n);
13039- else
13040+ if (unlikely(sz != (size_t)-1 && sz < n))
ae4e228f
MT
13041+ copy_to_user_overflow();
13042+ else if (access_ok(VERIFY_WRITE, to, n))
58c5fc13
MT
13043+ n = __copy_to_user(to, from, n);
13044+ return n;
13045+}
13046+
13047+/**
13048+ * copy_from_user: - Copy a block of data from user space.
13049+ * @to: Destination address, in kernel space.
13050+ * @from: Source address, in user space.
13051+ * @n: Number of bytes to copy.
13052+ *
13053+ * Context: User context only. This function may sleep.
13054+ *
13055+ * Copy data from user space to kernel space.
13056+ *
13057+ * Returns number of bytes that could not be copied.
13058+ * On success, this will be zero.
13059+ *
13060+ * If some data could not be copied, this function will pad the copied
13061+ * data to the requested size using zero bytes.
13062+ */
ae4e228f 13063+static inline unsigned long __must_check
58c5fc13 13064+copy_from_user(void *to, const void __user *from, unsigned long n)
c1e3898a
MT
13065+{
13066+ size_t sz = __compiletime_object_size(to);
13067+
13068+ if (unlikely(sz != (size_t)-1 && sz < n))
ae4e228f
MT
13069 copy_from_user_overflow();
13070-
13071+ else if (access_ok(VERIFY_READ, from, n))
58c5fc13
MT
13072+ n = __copy_from_user(to, from, n);
13073+ else if ((long)n > 0) {
13074+ if (!__builtin_constant_p(n))
13075+ check_object_size(to, n, false);
13076+ memset(to, 0, n);
13077+ }
ae4e228f 13078 return n;
58c5fc13
MT
13079 }
13080
c6e2a6c8 13081@@ -230,7 +297,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
4c928ab7
MT
13082 #define strlen_user(str) strnlen_user(str, LONG_MAX)
13083
13084 long strnlen_user(const char __user *str, long n);
13085-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
13086-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
13087+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13088+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13089
13090 #endif /* _ASM_X86_UACCESS_32_H */
fe2de317 13091diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
c1e3898a 13092index fcd4b6f..ef04f8f 100644
fe2de317
MT
13093--- a/arch/x86/include/asm/uaccess_64.h
13094+++ b/arch/x86/include/asm/uaccess_64.h
15a11c5b 13095@@ -10,6 +10,9 @@
df50ba0c
MT
13096 #include <asm/alternative.h>
13097 #include <asm/cpufeature.h>
58c5fc13 13098 #include <asm/page.h>
df50ba0c
MT
13099+#include <asm/pgtable.h>
13100+
58c5fc13 13101+#define set_fs(x) (current_thread_info()->addr_limit = (x))
df50ba0c 13102
58c5fc13
MT
13103 /*
13104 * Copy To/From Userspace
4c928ab7 13105@@ -17,12 +20,14 @@
fe2de317
MT
13106
13107 /* Handles exceptions in both to and from, but doesn't do access_ok */
13108 __must_check unsigned long
13109-copy_user_generic_string(void *to, const void *from, unsigned len);
4c928ab7 13110+copy_user_generic_string(void *to, const void *from, unsigned long len) __size_overflow(3);
fe2de317
MT
13111 __must_check unsigned long
13112-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
4c928ab7 13113+copy_user_generic_unrolled(void *to, const void *from, unsigned long len) __size_overflow(3);
fe2de317
MT
13114
13115 static __always_inline __must_check unsigned long
13116-copy_user_generic(void *to, const void *from, unsigned len)
4c928ab7
MT
13117+copy_user_generic(void *to, const void *from, unsigned long len) __size_overflow(3);
13118+static __always_inline __must_check unsigned long
fe2de317
MT
13119+copy_user_generic(void *to, const void *from, unsigned long len)
13120 {
13121 unsigned ret;
13122
c6e2a6c8 13123@@ -32,142 +37,238 @@ copy_user_generic(void *to, const void *from, unsigned len)
4c928ab7
MT
13124 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
13125 "=d" (len)),
13126 "1" (to), "2" (from), "3" (len)
13127- : "memory", "rcx", "r8", "r9", "r10", "r11");
13128+ : "memory", "rcx", "r8", "r9", "r11");
df50ba0c
MT
13129 return ret;
13130 }
13131
ae4e228f 13132+static __always_inline __must_check unsigned long
4c928ab7 13133+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
ae4e228f 13134+static __always_inline __must_check unsigned long
4c928ab7 13135+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
ae4e228f 13136 __must_check unsigned long
fe2de317
MT
13137-_copy_to_user(void __user *to, const void *from, unsigned len);
13138-__must_check unsigned long
13139-_copy_from_user(void *to, const void __user *from, unsigned len);
13140-__must_check unsigned long
13141-copy_in_user(void __user *to, const void __user *from, unsigned len);
4c928ab7 13142+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
c6e2a6c8
MT
13143+
13144+extern void copy_to_user_overflow(void)
13145+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13146+ __compiletime_error("copy_to_user() buffer size is not provably correct")
13147+#else
13148+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
13149+#endif
13150+;
13151+
13152+extern void copy_from_user_overflow(void)
13153+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
13154+ __compiletime_error("copy_from_user() buffer size is not provably correct")
13155+#else
13156+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
13157+#endif
13158+;
58c5fc13 13159
ae4e228f
MT
13160 static inline unsigned long __must_check copy_from_user(void *to,
13161 const void __user *from,
4c928ab7 13162 unsigned long n)
ae4e228f
MT
13163 {
13164- int sz = __compiletime_object_size(to);
13165-
13166 might_fault();
13167- if (likely(sz == -1 || sz >= n))
13168- n = _copy_from_user(to, from, n);
13169-#ifdef CONFIG_DEBUG_VM
13170- else
13171- WARN(1, "Buffer overflow detected!\n");
13172-#endif
13173+
13174+ if (access_ok(VERIFY_READ, from, n))
13175+ n = __copy_from_user(to, from, n);
fe2de317 13176+ else if (n < INT_MAX) {
ae4e228f
MT
13177+ if (!__builtin_constant_p(n))
13178+ check_object_size(to, n, false);
13179+ memset(to, 0, n);
13180+ }
13181 return n;
13182 }
13183
fe2de317
MT
13184 static __always_inline __must_check
13185-int copy_to_user(void __user *dst, const void *src, unsigned size)
13186+int copy_to_user(void __user *dst, const void *src, unsigned long size)
ae4e228f
MT
13187 {
13188 might_fault();
13189
13190- return _copy_to_user(dst, src, size);
13191+ if (access_ok(VERIFY_WRITE, dst, size))
13192+ size = __copy_to_user(dst, src, size);
13193+ return size;
13194 }
13195
58c5fc13
MT
13196 static __always_inline __must_check
13197-int __copy_from_user(void *dst, const void __user *src, unsigned size)
fe2de317 13198+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
58c5fc13
MT
13199 {
13200- int ret = 0;
c1e3898a 13201+ size_t sz = __compiletime_object_size(dst);
58c5fc13
MT
13202+ unsigned ret = 0;
13203
13204 might_fault();
13205- if (!__builtin_constant_p(size))
bc901d79 13206- return copy_user_generic(dst, (__force void *)src, size);
58c5fc13 13207+
fe2de317 13208+ if (size > INT_MAX)
58c5fc13
MT
13209+ return size;
13210+
bc901d79
MT
13211+#ifdef CONFIG_PAX_MEMORY_UDEREF
13212+ if (!__access_ok(VERIFY_READ, src, size))
13213+ return size;
13214+#endif
13215+
c1e3898a 13216+ if (unlikely(sz != (size_t)-1 && sz < size)) {
c6e2a6c8 13217+ copy_from_user_overflow();
ae4e228f
MT
13218+ return size;
13219+ }
13220+
58c5fc13
MT
13221+ if (!__builtin_constant_p(size)) {
13222+ check_object_size(dst, size, false);
8308f9c9
MT
13223+
13224+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
13225+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13226+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13227+#endif
13228+
6e9df6a3 13229+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
58c5fc13
MT
13230+ }
13231 switch (size) {
bc901d79
MT
13232- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
13233+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
58c5fc13 13234 ret, "b", "b", "=q", 1);
bc901d79
MT
13235 return ret;
13236- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
13237+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
13238 ret, "w", "w", "=r", 2);
13239 return ret;
13240- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
13241+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
13242 ret, "l", "k", "=r", 4);
13243 return ret;
13244- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
13245+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13246 ret, "q", "", "=r", 8);
13247 return ret;
13248 case 10:
13249- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13250+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13251 ret, "q", "", "=r", 10);
13252 if (unlikely(ret))
13253 return ret;
13254 __get_user_asm(*(u16 *)(8 + (char *)dst),
13255- (u16 __user *)(8 + (char __user *)src),
13256+ (const u16 __user *)(8 + (const char __user *)src),
13257 ret, "w", "w", "=r", 2);
13258 return ret;
13259 case 16:
13260- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
13261+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
13262 ret, "q", "", "=r", 16);
13263 if (unlikely(ret))
13264 return ret;
13265 __get_user_asm(*(u64 *)(8 + (char *)dst),
13266- (u64 __user *)(8 + (char __user *)src),
13267+ (const u64 __user *)(8 + (const char __user *)src),
df50ba0c
MT
13268 ret, "q", "", "=r", 8);
13269 return ret;
13270 default:
bc901d79 13271- return copy_user_generic(dst, (__force void *)src, size);
8308f9c9
MT
13272+
13273+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
13274+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13275+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13276+#endif
13277+
6e9df6a3 13278+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
df50ba0c 13279 }
58c5fc13
MT
13280 }
13281
13282 static __always_inline __must_check
13283-int __copy_to_user(void __user *dst, const void *src, unsigned size)
fe2de317 13284+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
58c5fc13
MT
13285 {
13286- int ret = 0;
c1e3898a 13287+ size_t sz = __compiletime_object_size(src);
58c5fc13
MT
13288+ unsigned ret = 0;
13289
13290 might_fault();
13291- if (!__builtin_constant_p(size))
6e9df6a3 13292- return copy_user_generic((__force void *)dst, src, size);
58c5fc13 13293+
fe2de317 13294+ if (size > INT_MAX)
58c5fc13
MT
13295+ return size;
13296+
bc901d79
MT
13297+#ifdef CONFIG_PAX_MEMORY_UDEREF
13298+ if (!__access_ok(VERIFY_WRITE, dst, size))
13299+ return size;
13300+#endif
13301+
c1e3898a 13302+ if (unlikely(sz != (size_t)-1 && sz < size)) {
c6e2a6c8 13303+ copy_to_user_overflow();
ae4e228f
MT
13304+ return size;
13305+ }
13306+
58c5fc13
MT
13307+ if (!__builtin_constant_p(size)) {
13308+ check_object_size(src, size, true);
8308f9c9
MT
13309+
13310+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
13311+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13312+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13313+#endif
13314+
6e9df6a3 13315+ return copy_user_generic((__force_kernel void *)dst, src, size);
58c5fc13
MT
13316+ }
13317 switch (size) {
bc901d79
MT
13318- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
13319+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
58c5fc13 13320 ret, "b", "b", "iq", 1);
bc901d79
MT
13321 return ret;
13322- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
13323+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
13324 ret, "w", "w", "ir", 2);
13325 return ret;
13326- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
13327+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
13328 ret, "l", "k", "ir", 4);
13329 return ret;
13330- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
13331+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13332 ret, "q", "", "er", 8);
13333 return ret;
13334 case 10:
13335- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13336+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13337 ret, "q", "", "er", 10);
13338 if (unlikely(ret))
13339 return ret;
13340 asm("":::"memory");
13341- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
13342+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
13343 ret, "w", "w", "ir", 2);
13344 return ret;
13345 case 16:
13346- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
13347+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
13348 ret, "q", "", "er", 16);
13349 if (unlikely(ret))
13350 return ret;
13351 asm("":::"memory");
13352- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
13353+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
df50ba0c
MT
13354 ret, "q", "", "er", 8);
13355 return ret;
13356 default:
6e9df6a3 13357- return copy_user_generic((__force void *)dst, src, size);
8308f9c9
MT
13358+
13359+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
13360+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13361+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13362+#endif
13363+
6e9df6a3 13364+ return copy_user_generic((__force_kernel void *)dst, src, size);
df50ba0c 13365 }
58c5fc13
MT
13366 }
13367
13368 static __always_inline __must_check
13369-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
fe2de317 13370+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
58c5fc13
MT
13371 {
13372- int ret = 0;
58c5fc13
MT
13373+ unsigned ret = 0;
13374
13375 might_fault();
df50ba0c 13376- if (!__builtin_constant_p(size))
6e9df6a3
MT
13377- return copy_user_generic((__force void *)dst,
13378- (__force void *)src, size);
58c5fc13 13379+
fe2de317 13380+ if (size > INT_MAX)
58c5fc13
MT
13381+ return size;
13382+
bc901d79
MT
13383+#ifdef CONFIG_PAX_MEMORY_UDEREF
13384+ if (!__access_ok(VERIFY_READ, src, size))
13385+ return size;
13386+ if (!__access_ok(VERIFY_WRITE, dst, size))
13387+ return size;
13388+#endif
13389+
df50ba0c 13390+ if (!__builtin_constant_p(size)) {
8308f9c9
MT
13391+
13392+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
13393+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13394+ src += PAX_USER_SHADOW_BASE;
13395+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13396+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13397+#endif
13398+
6e9df6a3
MT
13399+ return copy_user_generic((__force_kernel void *)dst,
13400+ (__force_kernel const void *)src, size);
df50ba0c
MT
13401+ }
13402 switch (size) {
13403 case 1: {
13404 u8 tmp;
bc901d79
MT
13405- __get_user_asm(tmp, (u8 __user *)src,
13406+ __get_user_asm(tmp, (const u8 __user *)src,
13407 ret, "b", "b", "=q", 1);
13408 if (likely(!ret))
13409 __put_user_asm(tmp, (u8 __user *)dst,
c6e2a6c8 13410@@ -176,7 +277,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
bc901d79
MT
13411 }
13412 case 2: {
13413 u16 tmp;
13414- __get_user_asm(tmp, (u16 __user *)src,
13415+ __get_user_asm(tmp, (const u16 __user *)src,
13416 ret, "w", "w", "=r", 2);
13417 if (likely(!ret))
13418 __put_user_asm(tmp, (u16 __user *)dst,
c6e2a6c8 13419@@ -186,7 +287,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
bc901d79
MT
13420
13421 case 4: {
13422 u32 tmp;
13423- __get_user_asm(tmp, (u32 __user *)src,
13424+ __get_user_asm(tmp, (const u32 __user *)src,
13425 ret, "l", "k", "=r", 4);
13426 if (likely(!ret))
13427 __put_user_asm(tmp, (u32 __user *)dst,
c6e2a6c8 13428@@ -195,7 +296,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
bc901d79
MT
13429 }
13430 case 8: {
13431 u64 tmp;
13432- __get_user_asm(tmp, (u64 __user *)src,
13433+ __get_user_asm(tmp, (const u64 __user *)src,
13434 ret, "q", "", "=r", 8);
13435 if (likely(!ret))
13436 __put_user_asm(tmp, (u64 __user *)dst,
c6e2a6c8 13437@@ -203,47 +304,92 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
df50ba0c
MT
13438 return ret;
13439 }
13440 default:
6e9df6a3
MT
13441- return copy_user_generic((__force void *)dst,
13442- (__force void *)src, size);
8308f9c9
MT
13443+
13444+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
13445+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13446+ src += PAX_USER_SHADOW_BASE;
13447+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13448+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13449+#endif
13450+
6e9df6a3
MT
13451+ return copy_user_generic((__force_kernel void *)dst,
13452+ (__force_kernel const void *)src, size);
df50ba0c 13453 }
bc901d79
MT
13454 }
13455
4c928ab7
MT
13456 __must_check long strnlen_user(const char __user *str, long n);
13457 __must_check long __strnlen_user(const char __user *str, long n);
13458 __must_check long strlen_user(const char __user *str);
13459-__must_check unsigned long clear_user(void __user *mem, unsigned long len);
13460-__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
13461+__must_check unsigned long clear_user(void __user *mem, unsigned long len) __size_overflow(2);
13462+__must_check unsigned long __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
fe2de317 13463
df50ba0c 13464 static __must_check __always_inline int
fe2de317
MT
13465-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
13466+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
df50ba0c 13467 {
6e9df6a3 13468- return copy_user_generic(dst, (__force const void *)src, size);
fe2de317 13469+ if (size > INT_MAX)
bc901d79
MT
13470+ return size;
13471+
13472+#ifdef CONFIG_PAX_MEMORY_UDEREF
13473+ if (!__access_ok(VERIFY_READ, src, size))
13474+ return size;
bc901d79 13475+
df50ba0c
MT
13476+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
13477+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13478+#endif
13479+
6e9df6a3 13480+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
ae4e228f 13481 }
58c5fc13
MT
13482
13483-static __must_check __always_inline int
fe2de317 13484-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
58c5fc13 13485+static __must_check __always_inline unsigned long
fe2de317 13486+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
58c5fc13 13487 {
6e9df6a3 13488- return copy_user_generic((__force void *)dst, src, size);
fe2de317 13489+ if (size > INT_MAX)
58c5fc13
MT
13490+ return size;
13491+
bc901d79
MT
13492+#ifdef CONFIG_PAX_MEMORY_UDEREF
13493+ if (!__access_ok(VERIFY_WRITE, dst, size))
13494+ return size;
bc901d79 13495+
df50ba0c
MT
13496+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
13497+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
13498+#endif
13499+
6e9df6a3 13500+ return copy_user_generic((__force_kernel void *)dst, src, size);
58c5fc13
MT
13501 }
13502
13503-extern long __copy_user_nocache(void *dst, const void __user *src,
fe2de317 13504- unsigned size, int zerorest);
58c5fc13 13505+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
4c928ab7 13506+ unsigned long size, int zerorest) __size_overflow(3);
58c5fc13
MT
13507
13508-static inline int
13509-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
fe2de317 13510+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
58c5fc13
MT
13511 {
13512 might_sleep();
13513+
fe2de317 13514+ if (size > INT_MAX)
58c5fc13 13515+ return size;
bc901d79
MT
13516+
13517+#ifdef CONFIG_PAX_MEMORY_UDEREF
13518+ if (!__access_ok(VERIFY_READ, src, size))
13519+ return size;
13520+#endif
58c5fc13
MT
13521+
13522 return __copy_user_nocache(dst, src, size, 1);
13523 }
13524
13525-static inline int
13526-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
fe2de317 13527- unsigned size)
58c5fc13 13528+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
fe2de317 13529+ unsigned long size)
58c5fc13 13530 {
fe2de317 13531+ if (size > INT_MAX)
58c5fc13 13532+ return size;
bc901d79
MT
13533+
13534+#ifdef CONFIG_PAX_MEMORY_UDEREF
13535+ if (!__access_ok(VERIFY_READ, src, size))
13536+ return size;
13537+#endif
58c5fc13
MT
13538+
13539 return __copy_user_nocache(dst, src, size, 0);
13540 }
13541
13542-unsigned long
6e9df6a3 13543-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
58c5fc13 13544+extern unsigned long
4c928ab7 13545+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
58c5fc13
MT
13546
13547 #endif /* _ASM_X86_UACCESS_64_H */
fe2de317
MT
13548diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
13549index bb05228..d763d5b 100644
13550--- a/arch/x86/include/asm/vdso.h
13551+++ b/arch/x86/include/asm/vdso.h
6e9df6a3
MT
13552@@ -11,7 +11,7 @@ extern const char VDSO32_PRELINK[];
13553 #define VDSO32_SYMBOL(base, name) \
13554 ({ \
13555 extern const char VDSO32_##name[]; \
13556- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13557+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
13558 })
13559 #endif
13560
fe2de317 13561diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
c6e2a6c8 13562index 764b66a..ad3cfc8 100644
fe2de317
MT
13563--- a/arch/x86/include/asm/x86_init.h
13564+++ b/arch/x86/include/asm/x86_init.h
5e856224 13565@@ -29,7 +29,7 @@ struct x86_init_mpparse {
15a11c5b
MT
13566 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13567 void (*find_smp_config)(void);
13568 void (*get_smp_config)(unsigned int early);
13569-};
13570+} __no_const;
57199397 13571
15a11c5b
MT
13572 /**
13573 * struct x86_init_resources - platform specific resource related ops
5e856224 13574@@ -43,7 +43,7 @@ struct x86_init_resources {
15a11c5b
MT
13575 void (*probe_roms)(void);
13576 void (*reserve_resources)(void);
13577 char *(*memory_setup)(void);
13578-};
13579+} __no_const;
58c5fc13 13580
15a11c5b
MT
13581 /**
13582 * struct x86_init_irqs - platform specific interrupt setup
5e856224 13583@@ -56,7 +56,7 @@ struct x86_init_irqs {
15a11c5b
MT
13584 void (*pre_vector_init)(void);
13585 void (*intr_init)(void);
13586 void (*trap_init)(void);
13587-};
13588+} __no_const;
58c5fc13 13589
15a11c5b
MT
13590 /**
13591 * struct x86_init_oem - oem platform specific customizing functions
5e856224 13592@@ -66,7 +66,7 @@ struct x86_init_irqs {
15a11c5b
MT
13593 struct x86_init_oem {
13594 void (*arch_setup)(void);
13595 void (*banner)(void);
13596-};
13597+} __no_const;
58c5fc13 13598
15a11c5b
MT
13599 /**
13600 * struct x86_init_mapping - platform specific initial kernel pagetable setup
5e856224 13601@@ -77,7 +77,7 @@ struct x86_init_oem {
15a11c5b
MT
13602 */
13603 struct x86_init_mapping {
13604 void (*pagetable_reserve)(u64 start, u64 end);
13605-};
13606+} __no_const;
58c5fc13 13607
15a11c5b
MT
13608 /**
13609 * struct x86_init_paging - platform specific paging functions
5e856224 13610@@ -87,7 +87,7 @@ struct x86_init_mapping {
15a11c5b
MT
13611 struct x86_init_paging {
13612 void (*pagetable_setup_start)(pgd_t *base);
13613 void (*pagetable_setup_done)(pgd_t *base);
13614-};
13615+} __no_const;
58c5fc13 13616
15a11c5b
MT
13617 /**
13618 * struct x86_init_timers - platform specific timer setup
5e856224 13619@@ -102,7 +102,7 @@ struct x86_init_timers {
15a11c5b
MT
13620 void (*tsc_pre_init)(void);
13621 void (*timer_init)(void);
13622 void (*wallclock_init)(void);
13623-};
13624+} __no_const;
58c5fc13 13625
15a11c5b
MT
13626 /**
13627 * struct x86_init_iommu - platform specific iommu setup
5e856224 13628@@ -110,7 +110,7 @@ struct x86_init_timers {
15a11c5b
MT
13629 */
13630 struct x86_init_iommu {
13631 int (*iommu_init)(void);
13632-};
13633+} __no_const;
58c5fc13 13634
15a11c5b
MT
13635 /**
13636 * struct x86_init_pci - platform specific pci init functions
5e856224 13637@@ -124,7 +124,7 @@ struct x86_init_pci {
15a11c5b
MT
13638 int (*init)(void);
13639 void (*init_irq)(void);
13640 void (*fixup_irqs)(void);
13641-};
13642+} __no_const;
58c5fc13 13643
15a11c5b
MT
13644 /**
13645 * struct x86_init_ops - functions for platform specific setup
5e856224 13646@@ -140,7 +140,7 @@ struct x86_init_ops {
15a11c5b
MT
13647 struct x86_init_timers timers;
13648 struct x86_init_iommu iommu;
13649 struct x86_init_pci pci;
13650-};
13651+} __no_const;
66a7e928 13652
15a11c5b
MT
13653 /**
13654 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
c6e2a6c8 13655@@ -151,7 +151,7 @@ struct x86_cpuinit_ops {
15a11c5b 13656 void (*setup_percpu_clockev)(void);
c6e2a6c8 13657 void (*early_percpu_clock_init)(void);
5e856224 13658 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
15a11c5b
MT
13659-};
13660+} __no_const;
66a7e928 13661
15a11c5b
MT
13662 /**
13663 * struct x86_platform_ops - platform specific runtime functions
c6e2a6c8 13664@@ -177,7 +177,7 @@ struct x86_platform_ops {
15a11c5b 13665 int (*i8042_detect)(void);
c6e2a6c8
MT
13666 void (*save_sched_clock_state)(void);
13667 void (*restore_sched_clock_state)(void);
15a11c5b
MT
13668-};
13669+} __no_const;
13670
13671 struct pci_dev;
13672
c6e2a6c8 13673@@ -186,7 +186,7 @@ struct x86_msi_ops {
15a11c5b
MT
13674 void (*teardown_msi_irq)(unsigned int irq);
13675 void (*teardown_msi_irqs)(struct pci_dev *dev);
5e856224 13676 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
15a11c5b
MT
13677-};
13678+} __no_const;
13679
13680 extern struct x86_init_ops x86_init;
13681 extern struct x86_cpuinit_ops x86_cpuinit;
fe2de317
MT
13682diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13683index c6ce245..ffbdab7 100644
13684--- a/arch/x86/include/asm/xsave.h
13685+++ b/arch/x86/include/asm/xsave.h
13686@@ -65,6 +65,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
df50ba0c
MT
13687 {
13688 int err;
6892158b 13689
df50ba0c
MT
13690+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13691+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13692+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13693+#endif
13694+
6892158b
MT
13695 /*
13696 * Clear the xsave header first, so that reserved fields are
13697 * initialized to zero.
fe2de317 13698@@ -96,10 +101,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
6e9df6a3
MT
13699 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13700 {
13701 int err;
13702- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13703+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
df50ba0c
MT
13704 u32 lmask = mask;
13705 u32 hmask = mask >> 32;
13706
13707+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13708+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13709+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13710+#endif
13711+
13712 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13713 "2:\n"
13714 ".section .fixup,\"ax\"\n"
fe2de317
MT
13715diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13716index 6a564ac..9b1340c 100644
13717--- a/arch/x86/kernel/acpi/realmode/Makefile
13718+++ b/arch/x86/kernel/acpi/realmode/Makefile
13719@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
15a11c5b
MT
13720 $(call cc-option, -fno-stack-protector) \
13721 $(call cc-option, -mpreferred-stack-boundary=2)
13722 KBUILD_CFLAGS += $(call cc-option, -m32)
13723+ifdef CONSTIFY_PLUGIN
13724+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13725+endif
13726 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13727 GCOV_PROFILE := n
13728
fe2de317
MT
13729diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13730index b4fd836..4358fe3 100644
13731--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13732+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
15a11c5b
MT
13733@@ -108,6 +108,9 @@ wakeup_code:
13734 /* Do any other stuff... */
13735
13736 #ifndef CONFIG_64BIT
13737+ /* Recheck NX bit overrides (64bit path does this in trampoline */
13738+ call verify_cpu
13739+
13740 /* This could also be done in C code... */
13741 movl pmode_cr3, %eax
13742 movl %eax, %cr3
13743@@ -131,6 +134,7 @@ wakeup_code:
13744 movl pmode_cr0, %eax
13745 movl %eax, %cr0
13746 jmp pmode_return
13747+# include "../../verify_cpu.S"
13748 #else
13749 pushw $0
13750 pushw trampoline_segment
fe2de317 13751diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
c6e2a6c8 13752index 146a49c..1b5338b 100644
fe2de317
MT
13753--- a/arch/x86/kernel/acpi/sleep.c
13754+++ b/arch/x86/kernel/acpi/sleep.c
c6e2a6c8 13755@@ -98,8 +98,12 @@ int acpi_suspend_lowlevel(void)
66a7e928 13756 header->trampoline_segment = trampoline_address() >> 4;
58c5fc13 13757 #ifdef CONFIG_SMP
16454cff 13758 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
58c5fc13 13759+
ae4e228f 13760+ pax_open_kernel();
58c5fc13
MT
13761 early_gdt_descr.address =
13762 (unsigned long)get_cpu_gdt_table(smp_processor_id());
ae4e228f 13763+ pax_close_kernel();
58c5fc13
MT
13764+
13765 initial_gs = per_cpu_offset(smp_processor_id());
13766 #endif
13767 initial_code = (unsigned long)wakeup_long64;
fe2de317 13768diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
c6e2a6c8 13769index 7261083..5c12053 100644
fe2de317
MT
13770--- a/arch/x86/kernel/acpi/wakeup_32.S
13771+++ b/arch/x86/kernel/acpi/wakeup_32.S
58c5fc13
MT
13772@@ -30,13 +30,11 @@ wakeup_pmode_return:
13773 # and restore the stack ... but you need gdt for this to work
13774 movl saved_context_esp, %esp
13775
13776- movl %cs:saved_magic, %eax
13777- cmpl $0x12345678, %eax
13778+ cmpl $0x12345678, saved_magic
13779 jne bogus_magic
13780
13781 # jump to place where we left off
13782- movl saved_eip, %eax
13783- jmp *%eax
13784+ jmp *(saved_eip)
13785
13786 bogus_magic:
13787 jmp bogus_magic
fe2de317 13788diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
4c928ab7 13789index 1f84794..e23f862 100644
fe2de317
MT
13790--- a/arch/x86/kernel/alternative.c
13791+++ b/arch/x86/kernel/alternative.c
13792@@ -276,6 +276,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
6e9df6a3
MT
13793 */
13794 for (a = start; a < end; a++) {
13795 instr = (u8 *)&a->instr_offset + a->instr_offset;
13796+
13797+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13798+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13799+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
13800+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13801+#endif
13802+
13803 replacement = (u8 *)&a->repl_offset + a->repl_offset;
13804 BUG_ON(a->replacementlen > a->instrlen);
13805 BUG_ON(a->instrlen > sizeof(insnbuf));
fe2de317 13806@@ -307,10 +314,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
6e9df6a3
MT
13807 for (poff = start; poff < end; poff++) {
13808 u8 *ptr = (u8 *)poff + *poff;
13809
13810+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13811+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13812+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13813+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13814+#endif
13815+
57199397
MT
13816 if (!*poff || ptr < text || ptr >= text_end)
13817 continue;
13818 /* turn DS segment override prefix into lock prefix */
13819- if (*ptr == 0x3e)
13820+ if (*ktla_ktva(ptr) == 0x3e)
13821 text_poke(ptr, ((unsigned char []){0xf0}), 1);
13822 };
13823 mutex_unlock(&text_mutex);
fe2de317 13824@@ -328,10 +341,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
6e9df6a3
MT
13825 for (poff = start; poff < end; poff++) {
13826 u8 *ptr = (u8 *)poff + *poff;
13827
13828+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
13829+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13830+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
13831+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
13832+#endif
13833+
57199397
MT
13834 if (!*poff || ptr < text || ptr >= text_end)
13835 continue;
13836 /* turn lock prefix into DS segment override prefix */
13837- if (*ptr == 0xf0)
13838+ if (*ktla_ktva(ptr) == 0xf0)
13839 text_poke(ptr, ((unsigned char []){0x3E}), 1);
13840 };
13841 mutex_unlock(&text_mutex);
fe2de317 13842@@ -500,7 +519,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
58c5fc13
MT
13843
13844 BUG_ON(p->len > MAX_PATCH_LEN);
13845 /* prep the buffer with the original instructions */
13846- memcpy(insnbuf, p->instr, p->len);
13847+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13848 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13849 (unsigned long)p->instr, p->len);
13850
fe2de317 13851@@ -568,7 +587,7 @@ void __init alternative_instructions(void)
df50ba0c
MT
13852 if (smp_alt_once)
13853 free_init_pages("SMP alternatives",
13854 (unsigned long)__smp_locks,
13855- (unsigned long)__smp_locks_end);
13856+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13857
13858 restart_nmi();
13859 }
fe2de317 13860@@ -585,13 +604,17 @@ void __init alternative_instructions(void)
58c5fc13
MT
13861 * instructions. And on the local CPU you need to be protected again NMI or MCE
13862 * handlers seeing an inconsistent instruction while you patch.
13863 */
bc901d79
MT
13864-void *__init_or_module text_poke_early(void *addr, const void *opcode,
13865+void *__kprobes text_poke_early(void *addr, const void *opcode,
ae4e228f 13866 size_t len)
58c5fc13
MT
13867 {
13868 unsigned long flags;
58c5fc13
MT
13869 local_irq_save(flags);
13870- memcpy(addr, opcode, len);
13871+
ae4e228f 13872+ pax_open_kernel();
58c5fc13 13873+ memcpy(ktla_ktva(addr), opcode, len);
57199397 13874 sync_core();
ae4e228f 13875+ pax_close_kernel();
58c5fc13 13876+
ae4e228f 13877 local_irq_restore(flags);
58c5fc13 13878 /* Could also do a CLFLUSH here to speed up CPU recovery; but
57199397 13879 that causes hangs on some VIA CPUs. */
fe2de317 13880@@ -613,36 +636,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
58c5fc13
MT
13881 */
13882 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13883 {
13884- unsigned long flags;
13885- char *vaddr;
13886+ unsigned char *vaddr = ktla_ktva(addr);
13887 struct page *pages[2];
13888- int i;
13889+ size_t i;
58c5fc13 13890
ae4e228f 13891 if (!core_kernel_text((unsigned long)addr)) {
58c5fc13
MT
13892- pages[0] = vmalloc_to_page(addr);
13893- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
58c5fc13
MT
13894+ pages[0] = vmalloc_to_page(vaddr);
13895+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13896 } else {
13897- pages[0] = virt_to_page(addr);
13898+ pages[0] = virt_to_page(vaddr);
13899 WARN_ON(!PageReserved(pages[0]));
13900- pages[1] = virt_to_page(addr + PAGE_SIZE);
13901+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13902 }
13903 BUG_ON(!pages[0]);
13904- local_irq_save(flags);
13905- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13906- if (pages[1])
13907- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13908- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13909- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13910- clear_fixmap(FIX_TEXT_POKE0);
13911- if (pages[1])
13912- clear_fixmap(FIX_TEXT_POKE1);
13913- local_flush_tlb();
13914- sync_core();
13915- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13916- that causes hangs on some VIA CPUs. */
13917+ text_poke_early(addr, opcode, len);
13918 for (i = 0; i < len; i++)
13919- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13920- local_irq_restore(flags);
bc901d79 13921+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
58c5fc13
MT
13922 return addr;
13923 }
df50ba0c 13924
fe2de317 13925diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
c6e2a6c8 13926index edc2448..553e7c5 100644
fe2de317
MT
13927--- a/arch/x86/kernel/apic/apic.c
13928+++ b/arch/x86/kernel/apic/apic.c
5e856224 13929@@ -184,7 +184,7 @@ int first_system_vector = 0xfe;
66a7e928 13930 /*
15a11c5b
MT
13931 * Debug level, exported for io_apic.c
13932 */
13933-unsigned int apic_verbosity;
13934+int apic_verbosity;
ae4e228f 13935
15a11c5b 13936 int pic_mode;
66a7e928 13937
c6e2a6c8 13938@@ -1917,7 +1917,7 @@ void smp_error_interrupt(struct pt_regs *regs)
8308f9c9
MT
13939 apic_write(APIC_ESR, 0);
13940 v1 = apic_read(APIC_ESR);
13941 ack_APIC_irq();
13942- atomic_inc(&irq_err_count);
13943+ atomic_inc_unchecked(&irq_err_count);
13944
15a11c5b
MT
13945 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
13946 smp_processor_id(), v0 , v1);
fe2de317 13947diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
c6e2a6c8 13948index e88300d..cd5a87a 100644
fe2de317
MT
13949--- a/arch/x86/kernel/apic/io_apic.c
13950+++ b/arch/x86/kernel/apic/io_apic.c
c6e2a6c8
MT
13951@@ -83,7 +83,9 @@ static struct io_apic_ops io_apic_ops = {
13952
13953 void __init set_io_apic_ops(const struct io_apic_ops *ops)
13954 {
13955- io_apic_ops = *ops;
13956+ pax_open_kernel();
13957+ memcpy((void*)&io_apic_ops, ops, sizeof io_apic_ops);
13958+ pax_close_kernel();
13959 }
13960
13961 /*
13962@@ -1135,7 +1137,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
ae4e228f
MT
13963 }
13964 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13965
13966-void lock_vector_lock(void)
13967+void lock_vector_lock(void) __acquires(vector_lock)
13968 {
13969 /* Used to the online set of cpus does not change
13970 * during assign_irq_vector.
c6e2a6c8 13971@@ -1143,7 +1145,7 @@ void lock_vector_lock(void)
df50ba0c 13972 raw_spin_lock(&vector_lock);
ae4e228f
MT
13973 }
13974
13975-void unlock_vector_lock(void)
13976+void unlock_vector_lock(void) __releases(vector_lock)
13977 {
df50ba0c 13978 raw_spin_unlock(&vector_lock);
ae4e228f 13979 }
c6e2a6c8 13980@@ -2549,7 +2551,7 @@ static void ack_apic_edge(struct irq_data *data)
8308f9c9
MT
13981 ack_APIC_irq();
13982 }
13983
13984-atomic_t irq_mis_count;
13985+atomic_unchecked_t irq_mis_count;
13986
c6e2a6c8
MT
13987 #ifdef CONFIG_GENERIC_PENDING_IRQ
13988 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
13989@@ -2667,7 +2669,7 @@ static void ack_apic_level(struct irq_data *data)
8308f9c9
MT
13990 * at the cpu.
13991 */
13992 if (!(v & (1 << (i & 0x1f)))) {
13993- atomic_inc(&irq_mis_count);
13994+ atomic_inc_unchecked(&irq_mis_count);
13995
13996 eoi_ioapic_irq(irq, cfg);
13997 }
fe2de317 13998diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
c6e2a6c8 13999index 459e78c..f037006 100644
fe2de317
MT
14000--- a/arch/x86/kernel/apm_32.c
14001+++ b/arch/x86/kernel/apm_32.c
c6e2a6c8 14002@@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
ae4e228f
MT
14003 * This is for buggy BIOS's that refer to (real mode) segment 0x40
14004 * even though they are called in protected mode.
14005 */
14006-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
14007+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
14008 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
14009
14010 static const char driver_version[] = "1.16ac"; /* no spaces */
c6e2a6c8 14011@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
58c5fc13
MT
14012 BUG_ON(cpu != 0);
14013 gdt = get_cpu_gdt_table(cpu);
14014 save_desc_40 = gdt[0x40 / 8];
14015+
ae4e228f 14016+ pax_open_kernel();
58c5fc13 14017 gdt[0x40 / 8] = bad_bios_desc;
ae4e228f 14018+ pax_close_kernel();
58c5fc13 14019
58c5fc13
MT
14020 apm_irq_save(flags);
14021 APM_DO_SAVE_SEGS;
c6e2a6c8 14022@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
58c5fc13
MT
14023 &call->esi);
14024 APM_DO_RESTORE_SEGS;
14025 apm_irq_restore(flags);
14026+
ae4e228f 14027+ pax_open_kernel();
58c5fc13 14028 gdt[0x40 / 8] = save_desc_40;
ae4e228f 14029+ pax_close_kernel();
58c5fc13
MT
14030+
14031 put_cpu();
14032
14033 return call->eax & 0xff;
c6e2a6c8 14034@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
58c5fc13
MT
14035 BUG_ON(cpu != 0);
14036 gdt = get_cpu_gdt_table(cpu);
14037 save_desc_40 = gdt[0x40 / 8];
14038+
ae4e228f 14039+ pax_open_kernel();
58c5fc13 14040 gdt[0x40 / 8] = bad_bios_desc;
ae4e228f 14041+ pax_close_kernel();
58c5fc13 14042
58c5fc13
MT
14043 apm_irq_save(flags);
14044 APM_DO_SAVE_SEGS;
c6e2a6c8 14045@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
58c5fc13
MT
14046 &call->eax);
14047 APM_DO_RESTORE_SEGS;
14048 apm_irq_restore(flags);
14049+
ae4e228f 14050+ pax_open_kernel();
58c5fc13 14051 gdt[0x40 / 8] = save_desc_40;
ae4e228f 14052+ pax_close_kernel();
58c5fc13
MT
14053+
14054 put_cpu();
14055 return error;
14056 }
c6e2a6c8 14057@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
58c5fc13
MT
14058 * code to that CPU.
14059 */
14060 gdt = get_cpu_gdt_table(0);
14061+
ae4e228f
MT
14062+ pax_open_kernel();
14063 set_desc_base(&gdt[APM_CS >> 3],
14064 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
14065 set_desc_base(&gdt[APM_CS_16 >> 3],
14066 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
14067 set_desc_base(&gdt[APM_DS >> 3],
14068 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
14069+ pax_close_kernel();
58c5fc13 14070
58c5fc13
MT
14071 proc_create("apm", 0, NULL, &apm_file_ops);
14072
fe2de317 14073diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
5e856224 14074index 68de2dc..1f3c720 100644
fe2de317
MT
14075--- a/arch/x86/kernel/asm-offsets.c
14076+++ b/arch/x86/kernel/asm-offsets.c
66a7e928 14077@@ -33,6 +33,8 @@ void common(void) {
71d190be 14078 OFFSET(TI_status, thread_info, status);
66a7e928
MT
14079 OFFSET(TI_addr_limit, thread_info, addr_limit);
14080 OFFSET(TI_preempt_count, thread_info, preempt_count);
14081+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
14082+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
58c5fc13 14083
66a7e928
MT
14084 BLANK();
14085 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
15a11c5b 14086@@ -53,8 +55,26 @@ void common(void) {
ae4e228f 14087 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
66a7e928 14088 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
ae4e228f
MT
14089 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
14090+
14091+#ifdef CONFIG_PAX_KERNEXEC
ae4e228f 14092+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
fe2de317
MT
14093 #endif
14094
df50ba0c
MT
14095+#ifdef CONFIG_PAX_MEMORY_UDEREF
14096+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
14097+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
15a11c5b
MT
14098+#ifdef CONFIG_X86_64
14099+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
14100+#endif
fe2de317
MT
14101+#endif
14102+
66a7e928
MT
14103+#endif
14104+
14105+ BLANK();
14106+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
14107+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
8308f9c9 14108+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
66a7e928 14109+
8308f9c9
MT
14110 #ifdef CONFIG_XEN
14111 BLANK();
14112 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
fe2de317 14113diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
c6e2a6c8 14114index 1b4754f..fbb4227 100644
fe2de317
MT
14115--- a/arch/x86/kernel/asm-offsets_64.c
14116+++ b/arch/x86/kernel/asm-offsets_64.c
c6e2a6c8 14117@@ -76,6 +76,7 @@ int main(void)
fe2de317
MT
14118 BLANK();
14119 #undef ENTRY
14120
14121+ DEFINE(TSS_size, sizeof(struct tss_struct));
14122 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
14123 BLANK();
14124
14125diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
c6e2a6c8 14126index 6ab6aa2..8f71507 100644
fe2de317
MT
14127--- a/arch/x86/kernel/cpu/Makefile
14128+++ b/arch/x86/kernel/cpu/Makefile
14129@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
14130 CFLAGS_REMOVE_perf_event.o = -pg
14131 endif
14132
14133-# Make sure load_percpu_segment has no stackprotector
14134-nostackp := $(call cc-option, -fno-stack-protector)
14135-CFLAGS_common.o := $(nostackp)
14136-
14137 obj-y := intel_cacheinfo.o scattered.o topology.o
14138 obj-y += proc.o capflags.o powerflags.o common.o
14139 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
14140diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
c6e2a6c8 14141index 146bb62..ac9c74a 100644
fe2de317
MT
14142--- a/arch/x86/kernel/cpu/amd.c
14143+++ b/arch/x86/kernel/cpu/amd.c
c6e2a6c8 14144@@ -691,7 +691,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
71d190be
MT
14145 unsigned int size)
14146 {
14147 /* AMD errata T13 (order #21922) */
14148- if ((c->x86 == 6)) {
14149+ if (c->x86 == 6) {
14150 /* Duron Rev A0 */
14151 if (c->x86_model == 3 && c->x86_mask == 0)
14152 size = 64;
fe2de317 14153diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
c6e2a6c8 14154index cf79302..b1b28ae 100644
fe2de317
MT
14155--- a/arch/x86/kernel/cpu/common.c
14156+++ b/arch/x86/kernel/cpu/common.c
c6e2a6c8 14157@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
58c5fc13
MT
14158
14159 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
14160
14161-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
14162-#ifdef CONFIG_X86_64
14163- /*
14164- * We need valid kernel segments for data and code in long mode too
14165- * IRET will check the segment types kkeil 2000/10/28
14166- * Also sysret mandates a special GDT layout
14167- *
14168- * TLS descriptors are currently at a different place compared to i386.
14169- * Hopefully nobody expects them at a fixed place (Wine?)
14170- */
ae4e228f
MT
14171- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
14172- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
14173- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
14174- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
14175- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
14176- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
58c5fc13 14177-#else
ae4e228f
MT
14178- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
14179- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14180- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
14181- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
58c5fc13
MT
14182- /*
14183- * Segments used for calling PnP BIOS have byte granularity.
14184- * They code segments and data segments have fixed 64k limits,
14185- * the transfer segment sizes are set at run time.
14186- */
14187- /* 32-bit code */
ae4e228f 14188- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
58c5fc13 14189- /* 16-bit code */
ae4e228f 14190- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
58c5fc13 14191- /* 16-bit data */
ae4e228f 14192- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
58c5fc13 14193- /* 16-bit data */
ae4e228f 14194- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
58c5fc13 14195- /* 16-bit data */
ae4e228f 14196- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
58c5fc13
MT
14197- /*
14198- * The APM segments have byte granularity and their bases
14199- * are set at run time. All have 64k limits.
14200- */
14201- /* 32-bit code */
ae4e228f 14202- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
58c5fc13 14203- /* 16-bit code */
ae4e228f 14204- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
58c5fc13 14205- /* data */
ae4e228f 14206- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
58c5fc13 14207-
ae4e228f
MT
14208- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
14209- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
58c5fc13
MT
14210- GDT_STACK_CANARY_INIT
14211-#endif
14212-} };
14213-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
14214-
14215 static int __init x86_xsave_setup(char *s)
14216 {
14217 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
c6e2a6c8 14218@@ -374,7 +320,7 @@ void switch_to_new_gdt(int cpu)
58c5fc13
MT
14219 {
14220 struct desc_ptr gdt_descr;
14221
14222- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
14223+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14224 gdt_descr.size = GDT_SIZE - 1;
14225 load_gdt(&gdt_descr);
14226 /* Reload the per-cpu base */
c6e2a6c8 14227@@ -841,6 +787,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
58c5fc13
MT
14228 /* Filter out anything that depends on CPUID levels we don't have */
14229 filter_cpuid_features(c, true);
14230
4c928ab7 14231+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
58c5fc13
MT
14232+ setup_clear_cpu_cap(X86_FEATURE_SEP);
14233+#endif
14234+
14235 /* If the model name is still unset, do table lookup. */
14236 if (!c->x86_model_id[0]) {
14237 const char *p;
c6e2a6c8 14238@@ -1021,10 +971,12 @@ static __init int setup_disablecpuid(char *arg)
71d190be
MT
14239 }
14240 __setup("clearcpuid=", setup_disablecpuid);
14241
14242+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
14243+EXPORT_PER_CPU_SYMBOL(current_tinfo);
14244+
14245 #ifdef CONFIG_X86_64
14246 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
5e856224
MT
14247-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
14248- (unsigned long) nmi_idt_table };
14249+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
71d190be 14250
5e856224
MT
14251 DEFINE_PER_CPU_FIRST(union irq_stack_union,
14252 irq_stack_union) __aligned(PAGE_SIZE);
c6e2a6c8 14253@@ -1038,7 +990,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
71d190be
MT
14254 EXPORT_PER_CPU_SYMBOL(current_task);
14255
14256 DEFINE_PER_CPU(unsigned long, kernel_stack) =
14257- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
66a7e928 14258+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
71d190be
MT
14259 EXPORT_PER_CPU_SYMBOL(kernel_stack);
14260
14261 DEFINE_PER_CPU(char *, irq_stack_ptr) =
5e856224 14262@@ -1126,7 +1078,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
bc901d79
MT
14263 {
14264 memset(regs, 0, sizeof(struct pt_regs));
14265 regs->fs = __KERNEL_PERCPU;
14266- regs->gs = __KERNEL_STACK_CANARY;
14267+ savesegment(gs, regs->gs);
14268
14269 return regs;
14270 }
5e856224 14271@@ -1181,7 +1133,7 @@ void __cpuinit cpu_init(void)
58c5fc13
MT
14272 int i;
14273
14274 cpu = stack_smp_processor_id();
14275- t = &per_cpu(init_tss, cpu);
14276+ t = init_tss + cpu;
ae4e228f 14277 oist = &per_cpu(orig_ist, cpu);
58c5fc13
MT
14278
14279 #ifdef CONFIG_NUMA
5e856224 14280@@ -1207,7 +1159,7 @@ void __cpuinit cpu_init(void)
df50ba0c
MT
14281 switch_to_new_gdt(cpu);
14282 loadsegment(fs, 0);
14283
14284- load_idt((const struct desc_ptr *)&idt_descr);
14285+ load_idt(&idt_descr);
14286
14287 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
14288 syscall_init();
5e856224 14289@@ -1216,7 +1168,6 @@ void __cpuinit cpu_init(void)
bc901d79
MT
14290 wrmsrl(MSR_KERNEL_GS_BASE, 0);
14291 barrier();
14292
14293- x86_configure_nx();
14294 if (cpu != 0)
14295 enable_x2apic();
14296
5e856224 14297@@ -1272,7 +1223,7 @@ void __cpuinit cpu_init(void)
58c5fc13
MT
14298 {
14299 int cpu = smp_processor_id();
14300 struct task_struct *curr = current;
14301- struct tss_struct *t = &per_cpu(init_tss, cpu);
14302+ struct tss_struct *t = init_tss + cpu;
14303 struct thread_struct *thread = &curr->thread;
14304
14305 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
fe2de317 14306diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
5e856224 14307index 3e6ff6c..54b4992 100644
fe2de317
MT
14308--- a/arch/x86/kernel/cpu/intel.c
14309+++ b/arch/x86/kernel/cpu/intel.c
4c928ab7 14310@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
58c5fc13
MT
14311 * Update the IDT descriptor and reload the IDT so that
14312 * it uses the read-only mapped virtual address.
14313 */
14314- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
14315+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
14316 load_idt(&idt_descr);
14317 }
14318 #endif
fe2de317 14319diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
c6e2a6c8 14320index 61604ae..98250a5 100644
fe2de317
MT
14321--- a/arch/x86/kernel/cpu/mcheck/mce.c
14322+++ b/arch/x86/kernel/cpu/mcheck/mce.c
6e9df6a3
MT
14323@@ -42,6 +42,7 @@
14324 #include <asm/processor.h>
c52201e0
MT
14325 #include <asm/mce.h>
14326 #include <asm/msr.h>
14327+#include <asm/local.h>
14328
14329 #include "mce-internal.h"
14330
5e856224 14331@@ -250,7 +251,7 @@ static void print_mce(struct mce *m)
ae4e228f
MT
14332 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
14333 m->cs, m->ip);
14334
14335- if (m->cs == __KERNEL_CS)
14336+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
14337 print_symbol("{%s}", m->ip);
14338 pr_cont("\n");
14339 }
5e856224 14340@@ -283,10 +284,10 @@ static void print_mce(struct mce *m)
8308f9c9
MT
14341
14342 #define PANIC_TIMEOUT 5 /* 5 seconds */
14343
14344-static atomic_t mce_paniced;
14345+static atomic_unchecked_t mce_paniced;
14346
14347 static int fake_panic;
14348-static atomic_t mce_fake_paniced;
14349+static atomic_unchecked_t mce_fake_paniced;
14350
14351 /* Panic in progress. Enable interrupts and wait for final IPI */
14352 static void wait_for_panic(void)
5e856224 14353@@ -310,7 +311,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
8308f9c9
MT
14354 /*
14355 * Make sure only one CPU runs in machine check panic
14356 */
14357- if (atomic_inc_return(&mce_paniced) > 1)
14358+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
14359 wait_for_panic();
14360 barrier();
14361
5e856224 14362@@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
8308f9c9
MT
14363 console_verbose();
14364 } else {
14365 /* Don't log too much for fake panic */
14366- if (atomic_inc_return(&mce_fake_paniced) > 1)
14367+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
14368 return;
14369 }
14370 /* First print corrected ones that are still unlogged */
c6e2a6c8 14371@@ -684,7 +685,7 @@ static int mce_timed_out(u64 *t)
8308f9c9
MT
14372 * might have been modified by someone else.
14373 */
14374 rmb();
14375- if (atomic_read(&mce_paniced))
14376+ if (atomic_read_unchecked(&mce_paniced))
14377 wait_for_panic();
14378 if (!monarch_timeout)
14379 goto out;
c6e2a6c8 14380@@ -1535,7 +1536,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
fe2de317
MT
14381 }
14382
14383 /* Call the installed machine check handler for this CPU setup. */
14384-void (*machine_check_vector)(struct pt_regs *, long error_code) =
14385+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
14386 unexpected_machine_check;
14387
14388 /*
c6e2a6c8 14389@@ -1558,7 +1559,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
fe2de317
MT
14390 return;
14391 }
14392
14393+ pax_open_kernel();
14394 machine_check_vector = do_machine_check;
14395+ pax_close_kernel();
14396
14397 __mcheck_cpu_init_generic();
14398 __mcheck_cpu_init_vendor(c);
c6e2a6c8 14399@@ -1572,7 +1575,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
58c5fc13
MT
14400 */
14401
6e9df6a3
MT
14402 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
14403-static int mce_chrdev_open_count; /* #times opened */
14404+static local_t mce_chrdev_open_count; /* #times opened */
14405 static int mce_chrdev_open_exclu; /* already open exclusive? */
58c5fc13 14406
6e9df6a3 14407 static int mce_chrdev_open(struct inode *inode, struct file *file)
c6e2a6c8 14408@@ -1580,7 +1583,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
6e9df6a3 14409 spin_lock(&mce_chrdev_state_lock);
58c5fc13 14410
6e9df6a3
MT
14411 if (mce_chrdev_open_exclu ||
14412- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
14413+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
14414 spin_unlock(&mce_chrdev_state_lock);
58c5fc13
MT
14415
14416 return -EBUSY;
c6e2a6c8 14417@@ -1588,7 +1591,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
58c5fc13
MT
14418
14419 if (file->f_flags & O_EXCL)
6e9df6a3
MT
14420 mce_chrdev_open_exclu = 1;
14421- mce_chrdev_open_count++;
14422+ local_inc(&mce_chrdev_open_count);
58c5fc13 14423
6e9df6a3 14424 spin_unlock(&mce_chrdev_state_lock);
58c5fc13 14425
c6e2a6c8 14426@@ -1599,7 +1602,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
58c5fc13 14427 {
6e9df6a3 14428 spin_lock(&mce_chrdev_state_lock);
58c5fc13 14429
6e9df6a3
MT
14430- mce_chrdev_open_count--;
14431+ local_dec(&mce_chrdev_open_count);
14432 mce_chrdev_open_exclu = 0;
58c5fc13 14433
6e9df6a3 14434 spin_unlock(&mce_chrdev_state_lock);
c6e2a6c8 14435@@ -2324,7 +2327,7 @@ struct dentry *mce_get_debugfs_dir(void)
8308f9c9
MT
14436 static void mce_reset(void)
14437 {
14438 cpu_missing = 0;
14439- atomic_set(&mce_fake_paniced, 0);
14440+ atomic_set_unchecked(&mce_fake_paniced, 0);
14441 atomic_set(&mce_executing, 0);
14442 atomic_set(&mce_callin, 0);
14443 atomic_set(&global_nwo, 0);
fe2de317 14444diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
c6e2a6c8 14445index 2d5454c..51987eb 100644
fe2de317
MT
14446--- a/arch/x86/kernel/cpu/mcheck/p5.c
14447+++ b/arch/x86/kernel/cpu/mcheck/p5.c
c6e2a6c8
MT
14448@@ -11,6 +11,7 @@
14449 #include <asm/processor.h>
4c928ab7
MT
14450 #include <asm/mce.h>
14451 #include <asm/msr.h>
14452+#include <asm/pgtable.h>
14453
14454 /* By default disabled */
14455 int mce_p5_enabled __read_mostly;
c6e2a6c8 14456@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
fe2de317
MT
14457 if (!cpu_has(c, X86_FEATURE_MCE))
14458 return;
14459
15a11c5b 14460+ pax_open_kernel();
fe2de317 14461 machine_check_vector = pentium_machine_check;
15a11c5b 14462+ pax_close_kernel();
fe2de317
MT
14463 /* Make sure the vector pointer is visible before we enable MCEs: */
14464 wmb();
14465
14466diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
c6e2a6c8 14467index 2d7998f..17c9de1 100644
fe2de317
MT
14468--- a/arch/x86/kernel/cpu/mcheck/winchip.c
14469+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
c6e2a6c8
MT
14470@@ -10,6 +10,7 @@
14471 #include <asm/processor.h>
4c928ab7
MT
14472 #include <asm/mce.h>
14473 #include <asm/msr.h>
14474+#include <asm/pgtable.h>
14475
14476 /* Machine check handler for WinChip C6: */
14477 static void winchip_machine_check(struct pt_regs *regs, long error_code)
c6e2a6c8 14478@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
fe2de317
MT
14479 {
14480 u32 lo, hi;
14481
14482+ pax_open_kernel();
14483 machine_check_vector = winchip_machine_check;
14484+ pax_close_kernel();
14485 /* Make sure the vector pointer is visible before we enable MCEs: */
14486 wmb();
14487
14488diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14489index 6b96110..0da73eb 100644
14490--- a/arch/x86/kernel/cpu/mtrr/main.c
14491+++ b/arch/x86/kernel/cpu/mtrr/main.c
66a7e928 14492@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
ae4e228f
MT
14493 u64 size_or_mask, size_and_mask;
14494 static bool mtrr_aps_delayed_init;
14495
df50ba0c 14496-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
ae4e228f
MT
14497+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14498
df50ba0c 14499 const struct mtrr_ops *mtrr_if;
ae4e228f 14500
fe2de317
MT
14501diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14502index df5e41f..816c719 100644
14503--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14504+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
15a11c5b
MT
14505@@ -25,7 +25,7 @@ struct mtrr_ops {
14506 int (*validate_add_page)(unsigned long base, unsigned long size,
ae4e228f 14507 unsigned int type);
15a11c5b
MT
14508 int (*have_wrcomb)(void);
14509-};
14510+} __do_const;
ae4e228f
MT
14511
14512 extern int generic_get_free_region(unsigned long base, unsigned long size,
15a11c5b 14513 int replace_reg);
fe2de317 14514diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
c6e2a6c8 14515index bb8e034..fb9020b 100644
fe2de317
MT
14516--- a/arch/x86/kernel/cpu/perf_event.c
14517+++ b/arch/x86/kernel/cpu/perf_event.c
c6e2a6c8 14518@@ -1835,7 +1835,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
57199397
MT
14519 break;
14520
bc901d79 14521 perf_callchain_store(entry, frame.return_address);
57199397 14522- fp = frame.next_frame;
6e9df6a3 14523+ fp = (const void __force_user *)frame.next_frame;
57199397
MT
14524 }
14525 }
14526
fe2de317 14527diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
4c928ab7 14528index 13ad899..f642b9a 100644
fe2de317
MT
14529--- a/arch/x86/kernel/crash.c
14530+++ b/arch/x86/kernel/crash.c
4c928ab7
MT
14531@@ -36,10 +36,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
14532 {
58c5fc13 14533 #ifdef CONFIG_X86_32
4c928ab7
MT
14534 struct pt_regs fixed_regs;
14535-#endif
14536
14537-#ifdef CONFIG_X86_32
58c5fc13
MT
14538- if (!user_mode_vm(regs)) {
14539+ if (!user_mode(regs)) {
14540 crash_fixup_ss_esp(&fixed_regs, regs);
14541 regs = &fixed_regs;
14542 }
fe2de317
MT
14543diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14544index 37250fe..bf2ec74 100644
14545--- a/arch/x86/kernel/doublefault_32.c
14546+++ b/arch/x86/kernel/doublefault_32.c
58c5fc13
MT
14547@@ -11,7 +11,7 @@
14548
14549 #define DOUBLEFAULT_STACKSIZE (1024)
14550 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14551-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14552+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14553
14554 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14555
14556@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14557 unsigned long gdt, tss;
14558
14559 store_gdt(&gdt_desc);
14560- gdt = gdt_desc.address;
14561+ gdt = (unsigned long)gdt_desc.address;
14562
14563 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14564
fe2de317 14565@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
58c5fc13
MT
14566 /* 0x2 bit is always set */
14567 .flags = X86_EFLAGS_SF | 0x2,
14568 .sp = STACK_START,
14569- .es = __USER_DS,
14570+ .es = __KERNEL_DS,
14571 .cs = __KERNEL_CS,
14572 .ss = __KERNEL_DS,
14573- .ds = __USER_DS,
14574+ .ds = __KERNEL_DS,
14575 .fs = __KERNEL_PERCPU,
14576
14577 .__cr3 = __pa_nodebug(swapper_pg_dir),
fe2de317 14578diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
c6e2a6c8 14579index 1b81839..0b4e7b0 100644
fe2de317
MT
14580--- a/arch/x86/kernel/dumpstack.c
14581+++ b/arch/x86/kernel/dumpstack.c
bc901d79
MT
14582@@ -2,6 +2,9 @@
14583 * Copyright (C) 1991, 1992 Linus Torvalds
14584 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14585 */
14586+#ifdef CONFIG_GRKERNSEC_HIDESYM
14587+#define __INCLUDED_BY_HIDESYM 1
14588+#endif
14589 #include <linux/kallsyms.h>
14590 #include <linux/kprobes.h>
14591 #include <linux/uaccess.h>
c6e2a6c8 14592@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
71d190be
MT
14593 static void
14594 print_ftrace_graph_addr(unsigned long addr, void *data,
14595 const struct stacktrace_ops *ops,
14596- struct thread_info *tinfo, int *graph)
14597+ struct task_struct *task, int *graph)
14598 {
c6e2a6c8 14599- struct task_struct *task;
71d190be 14600 unsigned long ret_addr;
c6e2a6c8
MT
14601 int index;
14602
14603 if (addr != (unsigned long)return_to_handler)
14604 return;
14605
14606- task = tinfo->task;
14607 index = task->curr_ret_stack;
71d190be 14608
c6e2a6c8
MT
14609 if (!task->ret_stack || index < *graph)
14610@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
71d190be
MT
14611 static inline void
14612 print_ftrace_graph_addr(unsigned long addr, void *data,
14613 const struct stacktrace_ops *ops,
14614- struct thread_info *tinfo, int *graph)
14615+ struct task_struct *task, int *graph)
14616 { }
14617 #endif
14618
c6e2a6c8 14619@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
71d190be
MT
14620 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14621 */
14622
14623-static inline int valid_stack_ptr(struct thread_info *tinfo,
14624- void *p, unsigned int size, void *end)
14625+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14626 {
14627- void *t = tinfo;
14628 if (end) {
14629 if (p < end && p >= (end-THREAD_SIZE))
14630 return 1;
c6e2a6c8 14631@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
71d190be
MT
14632 }
14633
14634 unsigned long
14635-print_context_stack(struct thread_info *tinfo,
14636+print_context_stack(struct task_struct *task, void *stack_start,
14637 unsigned long *stack, unsigned long bp,
14638 const struct stacktrace_ops *ops, void *data,
14639 unsigned long *end, int *graph)
14640 {
14641 struct stack_frame *frame = (struct stack_frame *)bp;
14642
14643- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14644+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14645 unsigned long addr;
14646
14647 addr = *stack;
c6e2a6c8 14648@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
71d190be
MT
14649 } else {
14650 ops->address(data, addr, 0);
14651 }
14652- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14653+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14654 }
14655 stack++;
14656 }
c6e2a6c8 14657@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
71d190be
MT
14658 EXPORT_SYMBOL_GPL(print_context_stack);
14659
14660 unsigned long
14661-print_context_stack_bp(struct thread_info *tinfo,
14662+print_context_stack_bp(struct task_struct *task, void *stack_start,
14663 unsigned long *stack, unsigned long bp,
14664 const struct stacktrace_ops *ops, void *data,
14665 unsigned long *end, int *graph)
c6e2a6c8 14666@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
71d190be
MT
14667 struct stack_frame *frame = (struct stack_frame *)bp;
14668 unsigned long *ret_addr = &frame->return_address;
14669
14670- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
14671+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
14672 unsigned long addr = *ret_addr;
14673
14674 if (!__kernel_text_address(addr))
c6e2a6c8 14675@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
71d190be
MT
14676 ops->address(data, addr, 1);
14677 frame = frame->next_frame;
14678 ret_addr = &frame->return_address;
14679- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14680+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14681 }
14682
14683 return (unsigned long)frame;
c6e2a6c8 14684@@ -189,7 +188,7 @@ void dump_stack(void)
57199397 14685
66a7e928 14686 bp = stack_frame(current, NULL);
57199397
MT
14687 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14688- current->pid, current->comm, print_tainted(),
14689+ task_pid_nr(current), current->comm, print_tainted(),
14690 init_utsname()->release,
14691 (int)strcspn(init_utsname()->version, " "),
14692 init_utsname()->version);
c6e2a6c8 14693@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
71d190be
MT
14694 }
14695 EXPORT_SYMBOL_GPL(oops_begin);
14696
14697+extern void gr_handle_kernel_exploit(void);
14698+
14699 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14700 {
14701 if (regs && kexec_should_crash(current))
c6e2a6c8 14702@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
57199397
MT
14703 panic("Fatal exception in interrupt");
14704 if (panic_on_oops)
14705 panic("Fatal exception");
14706- do_exit(signr);
71d190be
MT
14707+
14708+ gr_handle_kernel_exploit();
14709+
57199397
MT
14710+ do_group_exit(signr);
14711 }
14712
14713 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
c6e2a6c8 14714@@ -273,7 +277,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
57199397
MT
14715
14716 show_registers(regs);
14717 #ifdef CONFIG_X86_32
14718- if (user_mode_vm(regs)) {
14719+ if (user_mode(regs)) {
14720 sp = regs->sp;
14721 ss = regs->ss & 0xffff;
14722 } else {
c6e2a6c8 14723@@ -301,7 +305,7 @@ void die(const char *str, struct pt_regs *regs, long err)
57199397
MT
14724 unsigned long flags = oops_begin();
14725 int sig = SIGSEGV;
14726
14727- if (!user_mode_vm(regs))
14728+ if (!user_mode(regs))
14729 report_bug(regs->ip, regs);
14730
14731 if (__die(str, regs, err))
fe2de317 14732diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
c6e2a6c8 14733index 88ec912..e95e935 100644
fe2de317
MT
14734--- a/arch/x86/kernel/dumpstack_32.c
14735+++ b/arch/x86/kernel/dumpstack_32.c
14736@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14737 bp = stack_frame(task, regs);
14738
14739 for (;;) {
14740- struct thread_info *context;
14741+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14742
14743- context = (struct thread_info *)
14744- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14745- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
14746+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14747
14748- stack = (unsigned long *)context->previous_esp;
14749- if (!stack)
14750+ if (stack_start == task_stack_page(task))
14751 break;
14752+ stack = *(unsigned long **)stack_start;
14753 if (ops->stack(data, "IRQ") < 0)
14754 break;
14755 touch_nmi_watchdog();
c6e2a6c8
MT
14756@@ -87,7 +85,7 @@ void show_registers(struct pt_regs *regs)
14757 int i;
14758
14759 print_modules();
14760- __show_regs(regs, !user_mode_vm(regs));
14761+ __show_regs(regs, !user_mode(regs));
14762
14763 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
14764 TASK_COMM_LEN, current->comm, task_pid_nr(current),
fe2de317
MT
14765@@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs)
14766 * When in-kernel, we also print out the stack and code at the
14767 * time of the fault..
14768 */
14769- if (!user_mode_vm(regs)) {
14770+ if (!user_mode(regs)) {
14771 unsigned int code_prologue = code_bytes * 43 / 64;
14772 unsigned int code_len = code_bytes;
14773 unsigned char c;
14774 u8 *ip;
14775+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14776
14777 printk(KERN_EMERG "Stack:\n");
14778 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
14779
14780 printk(KERN_EMERG "Code: ");
14781
14782- ip = (u8 *)regs->ip - code_prologue;
14783+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14784 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14785 /* try starting at IP */
14786- ip = (u8 *)regs->ip;
14787+ ip = (u8 *)regs->ip + cs_base;
14788 code_len = code_len - code_prologue + 1;
14789 }
14790 for (i = 0; i < code_len; i++, ip++) {
14791@@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs)
4c928ab7 14792 printk(KERN_CONT " Bad EIP value.");
fe2de317
MT
14793 break;
14794 }
14795- if (ip == (u8 *)regs->ip)
14796+ if (ip == (u8 *)regs->ip + cs_base)
4c928ab7 14797 printk(KERN_CONT "<%02x> ", c);
fe2de317 14798 else
4c928ab7 14799 printk(KERN_CONT "%02x ", c);
fe2de317
MT
14800@@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
14801 {
14802 unsigned short ud2;
14803
14804+ ip = ktla_ktva(ip);
14805 if (ip < PAGE_OFFSET)
14806 return 0;
14807 if (probe_kernel_address((unsigned short *)ip, ud2))
14808@@ -139,3 +139,15 @@ int is_valid_bugaddr(unsigned long ip)
14809
14810 return ud2 == 0x0b0f;
14811 }
14812+
14813+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14814+void pax_check_alloca(unsigned long size)
14815+{
14816+ unsigned long sp = (unsigned long)&sp, stack_left;
14817+
14818+ /* all kernel stacks are of the same size */
14819+ stack_left = sp & (THREAD_SIZE - 1);
14820+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14821+}
14822+EXPORT_SYMBOL(pax_check_alloca);
14823+#endif
14824diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
c6e2a6c8 14825index 17107bd..9623722 100644
fe2de317
MT
14826--- a/arch/x86/kernel/dumpstack_64.c
14827+++ b/arch/x86/kernel/dumpstack_64.c
14828@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14829 unsigned long *irq_stack_end =
14830 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14831 unsigned used = 0;
14832- struct thread_info *tinfo;
14833 int graph = 0;
14834 unsigned long dummy;
14835+ void *stack_start;
14836
14837 if (!task)
14838 task = current;
14839@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14840 * current stack address. If the stacks consist of nested
14841 * exceptions
14842 */
14843- tinfo = task_thread_info(task);
14844 for (;;) {
14845 char *id;
14846 unsigned long *estack_end;
14847+
14848 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14849 &used, &id);
14850
14851@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14852 if (ops->stack(data, id) < 0)
14853 break;
14854
14855- bp = ops->walk_stack(tinfo, stack, bp, ops,
14856+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14857 data, estack_end, &graph);
14858 ops->stack(data, "<EOE>");
14859 /*
c6e2a6c8
MT
14860@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14861 * second-to-last pointer (index -2 to end) in the
14862 * exception stack:
14863 */
14864+ if ((u16)estack_end[-1] != __KERNEL_DS)
14865+ goto out;
14866 stack = (unsigned long *) estack_end[-2];
14867 continue;
14868 }
14869@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
fe2de317
MT
14870 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
14871 if (ops->stack(data, "IRQ") < 0)
14872 break;
14873- bp = ops->walk_stack(tinfo, stack, bp,
14874+ bp = ops->walk_stack(task, irq_stack, stack, bp,
14875 ops, data, irq_stack_end, &graph);
14876 /*
14877 * We link to the next stack (which would be
c6e2a6c8 14878@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
fe2de317
MT
14879 /*
14880 * This handles the process stack:
14881 */
14882- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14883+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14884+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
c6e2a6c8 14885+out:
fe2de317
MT
14886 put_cpu();
14887 }
14888 EXPORT_SYMBOL(dump_trace);
c6e2a6c8 14889@@ -305,3 +309,50 @@ int is_valid_bugaddr(unsigned long ip)
fe2de317
MT
14890
14891 return ud2 == 0x0b0f;
14892 }
14893+
14894+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14895+void pax_check_alloca(unsigned long size)
14896+{
14897+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14898+ unsigned cpu, used;
14899+ char *id;
14900+
14901+ /* check the process stack first */
14902+ stack_start = (unsigned long)task_stack_page(current);
14903+ stack_end = stack_start + THREAD_SIZE;
14904+ if (likely(stack_start <= sp && sp < stack_end)) {
14905+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14906+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14907+ return;
14908+ }
14909+
14910+ cpu = get_cpu();
14911+
14912+ /* check the irq stacks */
14913+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14914+ stack_start = stack_end - IRQ_STACK_SIZE;
14915+ if (stack_start <= sp && sp < stack_end) {
14916+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14917+ put_cpu();
14918+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14919+ return;
14920+ }
14921+
14922+ /* check the exception stacks */
14923+ used = 0;
14924+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14925+ stack_start = stack_end - EXCEPTION_STKSZ;
14926+ if (stack_end && stack_start <= sp && sp < stack_end) {
14927+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14928+ put_cpu();
14929+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14930+ return;
14931+ }
14932+
14933+ put_cpu();
14934+
14935+ /* unknown stack */
14936+ BUG();
14937+}
14938+EXPORT_SYMBOL(pax_check_alloca);
14939+#endif
14940diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
5e856224 14941index 9b9f18b..9fcaa04 100644
fe2de317
MT
14942--- a/arch/x86/kernel/early_printk.c
14943+++ b/arch/x86/kernel/early_printk.c
66a7e928
MT
14944@@ -7,6 +7,7 @@
14945 #include <linux/pci_regs.h>
14946 #include <linux/pci_ids.h>
14947 #include <linux/errno.h>
14948+#include <linux/sched.h>
14949 #include <asm/io.h>
14950 #include <asm/processor.h>
14951 #include <asm/fcntl.h>
fe2de317 14952diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
5e856224 14953index 7b784f4..db6b628 100644
fe2de317
MT
14954--- a/arch/x86/kernel/entry_32.S
14955+++ b/arch/x86/kernel/entry_32.S
5e856224 14956@@ -179,13 +179,146 @@
bc901d79
MT
14957 /*CFI_REL_OFFSET gs, PT_GS*/
14958 .endm
14959 .macro SET_KERNEL_GS reg
58c5fc13 14960+
bc901d79
MT
14961+#ifdef CONFIG_CC_STACKPROTECTOR
14962 movl $(__KERNEL_STACK_CANARY), \reg
14963+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14964+ movl $(__USER_DS), \reg
14965+#else
14966+ xorl \reg, \reg
14967+#endif
14968+
14969 movl \reg, %gs
14970 .endm
58c5fc13
MT
14971
14972 #endif /* CONFIG_X86_32_LAZY_GS */
14973
14974-.macro SAVE_ALL
8308f9c9 14975+.macro pax_enter_kernel
df50ba0c 14976+#ifdef CONFIG_PAX_KERNEXEC
8308f9c9 14977+ call pax_enter_kernel
df50ba0c 14978+#endif
8308f9c9
MT
14979+.endm
14980+
14981+.macro pax_exit_kernel
14982+#ifdef CONFIG_PAX_KERNEXEC
14983+ call pax_exit_kernel
df50ba0c
MT
14984+#endif
14985+.endm
14986+
df50ba0c 14987+#ifdef CONFIG_PAX_KERNEXEC
8308f9c9 14988+ENTRY(pax_enter_kernel)
df50ba0c 14989+#ifdef CONFIG_PARAVIRT
66a7e928
MT
14990+ pushl %eax
14991+ pushl %ecx
df50ba0c
MT
14992+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14993+ mov %eax, %esi
14994+#else
14995+ mov %cr0, %esi
14996+#endif
14997+ bts $16, %esi
14998+ jnc 1f
14999+ mov %cs, %esi
15000+ cmp $__KERNEL_CS, %esi
15001+ jz 3f
15002+ ljmp $__KERNEL_CS, $3f
15003+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
15004+2:
15005+#ifdef CONFIG_PARAVIRT
15006+ mov %esi, %eax
15007+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
15008+#else
15009+ mov %esi, %cr0
15010+#endif
15011+3:
15012+#ifdef CONFIG_PARAVIRT
66a7e928
MT
15013+ popl %ecx
15014+ popl %eax
df50ba0c 15015+#endif
8308f9c9
MT
15016+ ret
15017+ENDPROC(pax_enter_kernel)
15018+
15019+ENTRY(pax_exit_kernel)
15020+#ifdef CONFIG_PARAVIRT
66a7e928
MT
15021+ pushl %eax
15022+ pushl %ecx
8308f9c9
MT
15023+#endif
15024+ mov %cs, %esi
15025+ cmp $__KERNEXEC_KERNEL_CS, %esi
15026+ jnz 2f
15027+#ifdef CONFIG_PARAVIRT
15028+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
15029+ mov %eax, %esi
15030+#else
15031+ mov %cr0, %esi
15032+#endif
15033+ btr $16, %esi
15034+ ljmp $__KERNEL_CS, $1f
15035+1:
15036+#ifdef CONFIG_PARAVIRT
15037+ mov %esi, %eax
15038+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
15039+#else
15040+ mov %esi, %cr0
15041+#endif
15042+2:
15043+#ifdef CONFIG_PARAVIRT
66a7e928
MT
15044+ popl %ecx
15045+ popl %eax
8308f9c9
MT
15046+#endif
15047+ ret
15048+ENDPROC(pax_exit_kernel)
15049+#endif
15050+
15051+.macro pax_erase_kstack
15052+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15053+ call pax_erase_kstack
df50ba0c
MT
15054+#endif
15055+.endm
15056+
8308f9c9 15057+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
66a7e928
MT
15058+/*
15059+ * ebp: thread_info
15060+ * ecx, edx: can be clobbered
15061+ */
8308f9c9 15062+ENTRY(pax_erase_kstack)
66a7e928
MT
15063+ pushl %edi
15064+ pushl %eax
8308f9c9 15065+
66a7e928 15066+ mov TI_lowest_stack(%ebp), %edi
8308f9c9
MT
15067+ mov $-0xBEEF, %eax
15068+ std
66a7e928
MT
15069+
15070+1: mov %edi, %ecx
8308f9c9
MT
15071+ and $THREAD_SIZE_asm - 1, %ecx
15072+ shr $2, %ecx
15073+ repne scasl
15074+ jecxz 2f
66a7e928
MT
15075+
15076+ cmp $2*16, %ecx
8308f9c9 15077+ jc 2f
66a7e928
MT
15078+
15079+ mov $2*16, %ecx
8308f9c9
MT
15080+ repe scasl
15081+ jecxz 2f
15082+ jne 1b
66a7e928
MT
15083+
15084+2: cld
8308f9c9
MT
15085+ mov %esp, %ecx
15086+ sub %edi, %ecx
15087+ shr $2, %ecx
15088+ rep stosl
15089+
66a7e928
MT
15090+ mov TI_task_thread_sp0(%ebp), %edi
15091+ sub $128, %edi
15092+ mov %edi, TI_lowest_stack(%ebp)
15093+
15094+ popl %eax
15095+ popl %edi
8308f9c9
MT
15096+ ret
15097+ENDPROC(pax_erase_kstack)
15098+#endif
15099+
58c5fc13
MT
15100+.macro __SAVE_ALL _DS
15101 cld
15102 PUSH_GS
bc901d79 15103 pushl_cfi %fs
5e856224 15104@@ -208,7 +341,7 @@
bc901d79
MT
15105 CFI_REL_OFFSET ecx, 0
15106 pushl_cfi %ebx
58c5fc13
MT
15107 CFI_REL_OFFSET ebx, 0
15108- movl $(__USER_DS), %edx
15109+ movl $\_DS, %edx
15110 movl %edx, %ds
15111 movl %edx, %es
15112 movl $(__KERNEL_PERCPU), %edx
5e856224 15113@@ -216,6 +349,15 @@
58c5fc13
MT
15114 SET_KERNEL_GS %edx
15115 .endm
15116
15117+.macro SAVE_ALL
ae4e228f 15118+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
58c5fc13 15119+ __SAVE_ALL __KERNEL_DS
8308f9c9 15120+ pax_enter_kernel
58c5fc13
MT
15121+#else
15122+ __SAVE_ALL __USER_DS
15123+#endif
15124+.endm
15125+
15126 .macro RESTORE_INT_REGS
bc901d79
MT
15127 popl_cfi %ebx
15128 CFI_RESTORE ebx
5e856224 15129@@ -301,7 +443,7 @@ ENTRY(ret_from_fork)
6e9df6a3
MT
15130 popfl_cfi
15131 jmp syscall_exit
15132 CFI_ENDPROC
15133-END(ret_from_fork)
15134+ENDPROC(ret_from_fork)
15135
15136 /*
15137 * Interrupt exit functions should be protected against kprobes
5e856224
MT
15138@@ -335,7 +477,15 @@ resume_userspace_sig:
15139 andl $SEGMENT_RPL_MASK, %eax
15140 #endif
58c5fc13
MT
15141 cmpl $USER_RPL, %eax
15142+
15143+#ifdef CONFIG_PAX_KERNEXEC
15144+ jae resume_userspace
ae4e228f 15145+
5e856224 15146+ pax_exit_kernel
58c5fc13
MT
15147+ jmp resume_kernel
15148+#else
15149 jb resume_kernel # not returning to v8086 or userspace
15150+#endif
15151
15152 ENTRY(resume_userspace)
15153 LOCKDEP_SYS_EXIT
5e856224 15154@@ -347,8 +497,8 @@ ENTRY(resume_userspace)
66a7e928
MT
15155 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15156 # int/exception return?
15157 jne work_pending
15158- jmp restore_all
6e9df6a3 15159-END(ret_from_exception)
66a7e928 15160+ jmp restore_all_pax
6e9df6a3 15161+ENDPROC(ret_from_exception)
66a7e928
MT
15162
15163 #ifdef CONFIG_PREEMPT
6e9df6a3 15164 ENTRY(resume_kernel)
5e856224 15165@@ -363,7 +513,7 @@ need_resched:
6e9df6a3
MT
15166 jz restore_all
15167 call preempt_schedule_irq
15168 jmp need_resched
15169-END(resume_kernel)
15170+ENDPROC(resume_kernel)
15171 #endif
15172 CFI_ENDPROC
15173 /*
5e856224 15174@@ -397,23 +547,34 @@ sysenter_past_esp:
58c5fc13
MT
15175 /*CFI_REL_OFFSET cs, 0*/
15176 /*
15177 * Push current_thread_info()->sysenter_return to the stack.
15178- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15179- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15180 */
66a7e928 15181- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
71d190be 15182+ pushl_cfi $0
58c5fc13
MT
15183 CFI_REL_OFFSET eip, 0
15184
bc901d79 15185 pushl_cfi %eax
71d190be
MT
15186 SAVE_ALL
15187+ GET_THREAD_INFO(%ebp)
15188+ movl TI_sysenter_return(%ebp),%ebp
15189+ movl %ebp,PT_EIP(%esp)
15190 ENABLE_INTERRUPTS(CLBR_NONE)
15191
15192 /*
58c5fc13
MT
15193 * Load the potential sixth argument from user stack.
15194 * Careful about security.
15195 */
15196+ movl PT_OLDESP(%esp),%ebp
15197+
15198+#ifdef CONFIG_PAX_MEMORY_UDEREF
15199+ mov PT_OLDSS(%esp),%ds
15200+1: movl %ds:(%ebp),%ebp
15201+ push %ss
15202+ pop %ds
15203+#else
15204 cmpl $__PAGE_OFFSET-3,%ebp
15205 jae syscall_fault
15206 1: movl (%ebp),%ebp
15207+#endif
15208+
15209 movl %ebp,PT_EBP(%esp)
15210 .section __ex_table,"a"
15211 .align 4
5e856224 15212@@ -436,12 +597,24 @@ sysenter_do_call:
58c5fc13
MT
15213 testl $_TIF_ALLWORK_MASK, %ecx
15214 jne sysexit_audit
15215 sysenter_exit:
15216+
15217+#ifdef CONFIG_PAX_RANDKSTACK
8308f9c9 15218+ pushl_cfi %eax
15a11c5b 15219+ movl %esp, %eax
58c5fc13 15220+ call pax_randomize_kstack
8308f9c9
MT
15221+ popl_cfi %eax
15222+#endif
15223+
66a7e928 15224+ pax_erase_kstack
58c5fc13
MT
15225+
15226 /* if something modifies registers it must also disable sysexit */
15227 movl PT_EIP(%esp), %edx
15228 movl PT_OLDESP(%esp), %ecx
15229 xorl %ebp,%ebp
15230 TRACE_IRQS_ON
15231 1: mov PT_FS(%esp), %fs
15232+2: mov PT_DS(%esp), %ds
15233+3: mov PT_ES(%esp), %es
15234 PTGS_TO_GS
15235 ENABLE_INTERRUPTS_SYSEXIT
15236
5e856224 15237@@ -458,6 +631,9 @@ sysenter_audit:
66a7e928
MT
15238 movl %eax,%edx /* 2nd arg: syscall number */
15239 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
5e856224 15240 call __audit_syscall_entry
66a7e928
MT
15241+
15242+ pax_erase_kstack
15243+
15244 pushl_cfi %ebx
15245 movl PT_EAX(%esp),%eax /* reload syscall number */
15246 jmp sysenter_do_call
5e856224 15247@@ -483,11 +659,17 @@ sysexit_audit:
58c5fc13
MT
15248
15249 CFI_ENDPROC
15250 .pushsection .fixup,"ax"
15251-2: movl $0,PT_FS(%esp)
15252+4: movl $0,PT_FS(%esp)
15253+ jmp 1b
15254+5: movl $0,PT_DS(%esp)
15255+ jmp 1b
15256+6: movl $0,PT_ES(%esp)
15257 jmp 1b
15258 .section __ex_table,"a"
15259 .align 4
15260- .long 1b,2b
15261+ .long 1b,4b
15262+ .long 2b,5b
15263+ .long 3b,6b
15264 .popsection
15265 PTGS_TO_GS_EX
15266 ENDPROC(ia32_sysenter_target)
5e856224 15267@@ -520,6 +702,15 @@ syscall_exit:
58c5fc13
MT
15268 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15269 jne syscall_exit_work
15270
66a7e928
MT
15271+restore_all_pax:
15272+
58c5fc13 15273+#ifdef CONFIG_PAX_RANDKSTACK
15a11c5b 15274+ movl %esp, %eax
58c5fc13
MT
15275+ call pax_randomize_kstack
15276+#endif
8308f9c9 15277+
8308f9c9 15278+ pax_erase_kstack
58c5fc13
MT
15279+
15280 restore_all:
15281 TRACE_IRQS_IRET
15282 restore_all_notrace:
5e856224 15283@@ -579,14 +770,34 @@ ldt_ss:
6892158b
MT
15284 * compensating for the offset by changing to the ESPFIX segment with
15285 * a base address that matches for the difference.
15286 */
15287-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
15288+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
15289 mov %esp, %edx /* load kernel esp */
58c5fc13
MT
15290 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15291 mov %dx, %ax /* eax: new kernel esp */
15292 sub %eax, %edx /* offset (low word is 0) */
58c5fc13
MT
15293+#ifdef CONFIG_SMP
15294+ movl PER_CPU_VAR(cpu_number), %ebx
15295+ shll $PAGE_SHIFT_asm, %ebx
15296+ addl $cpu_gdt_table, %ebx
15297+#else
15298+ movl $cpu_gdt_table, %ebx
15299+#endif
15300 shr $16, %edx
6892158b
MT
15301- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
15302- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
15a11c5b
MT
15303+
15304+#ifdef CONFIG_PAX_KERNEXEC
15305+ mov %cr0, %esi
15306+ btr $16, %esi
15307+ mov %esi, %cr0
15308+#endif
15309+
6892158b
MT
15310+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
15311+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
15a11c5b
MT
15312+
15313+#ifdef CONFIG_PAX_KERNEXEC
15314+ bts $16, %esi
15315+ mov %esi, %cr0
15316+#endif
15317+
bc901d79
MT
15318 pushl_cfi $__ESPFIX_SS
15319 pushl_cfi %eax /* new kernel esp */
15320 /* Disable interrupts, but do not irqtrace this section: we
5e856224 15321@@ -615,38 +826,30 @@ work_resched:
66a7e928
MT
15322 movl TI_flags(%ebp), %ecx
15323 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15324 # than syscall tracing?
15325- jz restore_all
15326+ jz restore_all_pax
15327 testb $_TIF_NEED_RESCHED, %cl
15328 jnz work_resched
58c5fc13
MT
15329
15330 work_notifysig: # deal with pending signals and
15331 # notify-resume requests
15332+ movl %esp, %eax
15333 #ifdef CONFIG_VM86
15334 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15335- movl %esp, %eax
15336- jne work_notifysig_v86 # returning to kernel-space or
15337+ jz 1f # returning to kernel-space or
15338 # vm86-space
5e856224
MT
15339- TRACE_IRQS_ON
15340- ENABLE_INTERRUPTS(CLBR_NONE)
58c5fc13
MT
15341- xorl %edx, %edx
15342- call do_notify_resume
15343- jmp resume_userspace_sig
15344
15345- ALIGN
15346-work_notifysig_v86:
bc901d79 15347 pushl_cfi %ecx # save ti_flags for do_notify_resume
58c5fc13 15348 call save_v86_state # %eax contains pt_regs pointer
bc901d79 15349 popl_cfi %ecx
58c5fc13
MT
15350 movl %eax, %esp
15351-#else
15352- movl %esp, %eax
15353+1:
15354 #endif
5e856224
MT
15355 TRACE_IRQS_ON
15356 ENABLE_INTERRUPTS(CLBR_NONE)
58c5fc13
MT
15357 xorl %edx, %edx
15358 call do_notify_resume
6e9df6a3
MT
15359 jmp resume_userspace_sig
15360-END(work_pending)
15361+ENDPROC(work_pending)
15362
15363 # perform syscall exit tracing
15364 ALIGN
5e856224 15365@@ -654,11 +857,14 @@ syscall_trace_entry:
66a7e928
MT
15366 movl $-ENOSYS,PT_EAX(%esp)
15367 movl %esp, %eax
15368 call syscall_trace_enter
15369+
15370+ pax_erase_kstack
15371+
15372 /* What it returned is what we'll actually use. */
5e856224 15373 cmpl $(NR_syscalls), %eax
66a7e928 15374 jnae syscall_call
6e9df6a3
MT
15375 jmp syscall_exit
15376-END(syscall_trace_entry)
15377+ENDPROC(syscall_trace_entry)
15378
15379 # perform syscall exit tracing
15380 ALIGN
5e856224 15381@@ -671,20 +877,24 @@ syscall_exit_work:
6e9df6a3
MT
15382 movl %esp, %eax
15383 call syscall_trace_leave
15384 jmp resume_userspace
15385-END(syscall_exit_work)
15386+ENDPROC(syscall_exit_work)
15387 CFI_ENDPROC
58c5fc13
MT
15388
15389 RING0_INT_FRAME # can't unwind into user space anyway
15390 syscall_fault:
15391+#ifdef CONFIG_PAX_MEMORY_UDEREF
15392+ push %ss
15393+ pop %ds
15394+#endif
15395 GET_THREAD_INFO(%ebp)
15396 movl $-EFAULT,PT_EAX(%esp)
15397 jmp resume_userspace
6e9df6a3
MT
15398-END(syscall_fault)
15399+ENDPROC(syscall_fault)
15400
15401 syscall_badsys:
15402 movl $-ENOSYS,PT_EAX(%esp)
15403 jmp resume_userspace
15404-END(syscall_badsys)
15405+ENDPROC(syscall_badsys)
15406 CFI_ENDPROC
15407 /*
15408 * End of kprobes section
5e856224 15409@@ -756,6 +966,36 @@ ENTRY(ptregs_clone)
bc901d79
MT
15410 CFI_ENDPROC
15411 ENDPROC(ptregs_clone)
15412
15413+ ALIGN;
15414+ENTRY(kernel_execve)
15415+ CFI_STARTPROC
15416+ pushl_cfi %ebp
15417+ sub $PT_OLDSS+4,%esp
15418+ pushl_cfi %edi
15419+ pushl_cfi %ecx
15420+ pushl_cfi %eax
15421+ lea 3*4(%esp),%edi
15422+ mov $PT_OLDSS/4+1,%ecx
15423+ xorl %eax,%eax
15424+ rep stosl
15425+ popl_cfi %eax
15426+ popl_cfi %ecx
15427+ popl_cfi %edi
15428+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15429+ pushl_cfi %esp
15430+ call sys_execve
15431+ add $4,%esp
15432+ CFI_ADJUST_CFA_OFFSET -4
15433+ GET_THREAD_INFO(%ebp)
15434+ test %eax,%eax
15435+ jz syscall_exit
15436+ add $PT_OLDSS+4,%esp
15437+ CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
15438+ popl_cfi %ebp
15439+ ret
15440+ CFI_ENDPROC
15441+ENDPROC(kernel_execve)
15442+
15443 .macro FIXUP_ESPFIX_STACK
15444 /*
15445 * Switch back for ESPFIX stack to the normal zerobased stack
5e856224 15446@@ -765,8 +1005,15 @@ ENDPROC(ptregs_clone)
58c5fc13
MT
15447 * normal stack and adjusts ESP with the matching offset.
15448 */
15449 /* fixup the stack */
6892158b
MT
15450- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
15451- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
58c5fc13
MT
15452+#ifdef CONFIG_SMP
15453+ movl PER_CPU_VAR(cpu_number), %ebx
15454+ shll $PAGE_SHIFT_asm, %ebx
15455+ addl $cpu_gdt_table, %ebx
15456+#else
15457+ movl $cpu_gdt_table, %ebx
15458+#endif
6892158b
MT
15459+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
15460+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
58c5fc13 15461 shl $16, %eax
6892158b 15462 addl %esp, %eax /* the adjusted stack pointer */
bc901d79 15463 pushl_cfi $__KERNEL_DS
5e856224 15464@@ -819,7 +1066,7 @@ vector=vector+1
6e9df6a3
MT
15465 .endr
15466 2: jmp common_interrupt
15467 .endr
15468-END(irq_entries_start)
15469+ENDPROC(irq_entries_start)
15470
15471 .previous
15472 END(interrupt)
5e856224 15473@@ -867,7 +1114,7 @@ ENTRY(coprocessor_error)
6e9df6a3
MT
15474 pushl_cfi $do_coprocessor_error
15475 jmp error_code
15476 CFI_ENDPROC
15477-END(coprocessor_error)
15478+ENDPROC(coprocessor_error)
15479
15480 ENTRY(simd_coprocessor_error)
15481 RING0_INT_FRAME
5e856224 15482@@ -888,7 +1135,7 @@ ENTRY(simd_coprocessor_error)
6e9df6a3
MT
15483 #endif
15484 jmp error_code
15485 CFI_ENDPROC
15486-END(simd_coprocessor_error)
15487+ENDPROC(simd_coprocessor_error)
15488
15489 ENTRY(device_not_available)
15490 RING0_INT_FRAME
5e856224 15491@@ -896,7 +1143,7 @@ ENTRY(device_not_available)
6e9df6a3
MT
15492 pushl_cfi $do_device_not_available
15493 jmp error_code
15494 CFI_ENDPROC
15495-END(device_not_available)
15496+ENDPROC(device_not_available)
15497
15498 #ifdef CONFIG_PARAVIRT
15499 ENTRY(native_iret)
5e856224 15500@@ -905,12 +1152,12 @@ ENTRY(native_iret)
6e9df6a3
MT
15501 .align 4
15502 .long native_iret, iret_exc
15503 .previous
15504-END(native_iret)
15505+ENDPROC(native_iret)
15506
15507 ENTRY(native_irq_enable_sysexit)
15508 sti
15509 sysexit
15510-END(native_irq_enable_sysexit)
15511+ENDPROC(native_irq_enable_sysexit)
15512 #endif
15513
15514 ENTRY(overflow)
5e856224 15515@@ -919,7 +1166,7 @@ ENTRY(overflow)
6e9df6a3
MT
15516 pushl_cfi $do_overflow
15517 jmp error_code
15518 CFI_ENDPROC
15519-END(overflow)
15520+ENDPROC(overflow)
15521
15522 ENTRY(bounds)
15523 RING0_INT_FRAME
5e856224 15524@@ -927,7 +1174,7 @@ ENTRY(bounds)
6e9df6a3
MT
15525 pushl_cfi $do_bounds
15526 jmp error_code
15527 CFI_ENDPROC
15528-END(bounds)
15529+ENDPROC(bounds)
15530
15531 ENTRY(invalid_op)
15532 RING0_INT_FRAME
5e856224 15533@@ -935,7 +1182,7 @@ ENTRY(invalid_op)
6e9df6a3
MT
15534 pushl_cfi $do_invalid_op
15535 jmp error_code
15536 CFI_ENDPROC
15537-END(invalid_op)
15538+ENDPROC(invalid_op)
15539
15540 ENTRY(coprocessor_segment_overrun)
15541 RING0_INT_FRAME
5e856224 15542@@ -943,35 +1190,35 @@ ENTRY(coprocessor_segment_overrun)
6e9df6a3
MT
15543 pushl_cfi $do_coprocessor_segment_overrun
15544 jmp error_code
15545 CFI_ENDPROC
15546-END(coprocessor_segment_overrun)
15547+ENDPROC(coprocessor_segment_overrun)
15548
15549 ENTRY(invalid_TSS)
15550 RING0_EC_FRAME
15551 pushl_cfi $do_invalid_TSS
15552 jmp error_code
15553 CFI_ENDPROC
15554-END(invalid_TSS)
15555+ENDPROC(invalid_TSS)
15556
15557 ENTRY(segment_not_present)
15558 RING0_EC_FRAME
15559 pushl_cfi $do_segment_not_present
15560 jmp error_code
15561 CFI_ENDPROC
15562-END(segment_not_present)
15563+ENDPROC(segment_not_present)
15564
15565 ENTRY(stack_segment)
15566 RING0_EC_FRAME
15567 pushl_cfi $do_stack_segment
15568 jmp error_code
15569 CFI_ENDPROC
15570-END(stack_segment)
15571+ENDPROC(stack_segment)
15572
15573 ENTRY(alignment_check)
15574 RING0_EC_FRAME
15575 pushl_cfi $do_alignment_check
15576 jmp error_code
15577 CFI_ENDPROC
15578-END(alignment_check)
15579+ENDPROC(alignment_check)
15580
15581 ENTRY(divide_error)
15582 RING0_INT_FRAME
5e856224 15583@@ -979,7 +1226,7 @@ ENTRY(divide_error)
6e9df6a3
MT
15584 pushl_cfi $do_divide_error
15585 jmp error_code
15586 CFI_ENDPROC
15587-END(divide_error)
15588+ENDPROC(divide_error)
15589
15590 #ifdef CONFIG_X86_MCE
15591 ENTRY(machine_check)
5e856224 15592@@ -988,7 +1235,7 @@ ENTRY(machine_check)
6e9df6a3
MT
15593 pushl_cfi machine_check_vector
15594 jmp error_code
15595 CFI_ENDPROC
15596-END(machine_check)
15597+ENDPROC(machine_check)
15598 #endif
15599
15600 ENTRY(spurious_interrupt_bug)
5e856224 15601@@ -997,7 +1244,7 @@ ENTRY(spurious_interrupt_bug)
6e9df6a3
MT
15602 pushl_cfi $do_spurious_interrupt_bug
15603 jmp error_code
15604 CFI_ENDPROC
15605-END(spurious_interrupt_bug)
15606+ENDPROC(spurious_interrupt_bug)
15607 /*
15608 * End of kprobes section
15609 */
5e856224 15610@@ -1112,7 +1359,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
6e9df6a3
MT
15611
15612 ENTRY(mcount)
15613 ret
15614-END(mcount)
15615+ENDPROC(mcount)
15616
15617 ENTRY(ftrace_caller)
15618 cmpl $0, function_trace_stop
5e856224 15619@@ -1141,7 +1388,7 @@ ftrace_graph_call:
6e9df6a3
MT
15620 .globl ftrace_stub
15621 ftrace_stub:
15622 ret
15623-END(ftrace_caller)
15624+ENDPROC(ftrace_caller)
15625
15626 #else /* ! CONFIG_DYNAMIC_FTRACE */
15627
5e856224 15628@@ -1177,7 +1424,7 @@ trace:
6e9df6a3
MT
15629 popl %ecx
15630 popl %eax
15631 jmp ftrace_stub
15632-END(mcount)
15633+ENDPROC(mcount)
15634 #endif /* CONFIG_DYNAMIC_FTRACE */
15635 #endif /* CONFIG_FUNCTION_TRACER */
15636
5e856224 15637@@ -1198,7 +1445,7 @@ ENTRY(ftrace_graph_caller)
6e9df6a3
MT
15638 popl %ecx
15639 popl %eax
15640 ret
15641-END(ftrace_graph_caller)
15642+ENDPROC(ftrace_graph_caller)
15643
15644 .globl return_to_handler
15645 return_to_handler:
5e856224 15646@@ -1253,15 +1500,18 @@ error_code:
58c5fc13
MT
15647 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15648 REG_TO_PTGS %ecx
15649 SET_KERNEL_GS %ecx
15650- movl $(__USER_DS), %ecx
15651+ movl $(__KERNEL_DS), %ecx
15652 movl %ecx, %ds
15653 movl %ecx, %es
df50ba0c 15654+
8308f9c9 15655+ pax_enter_kernel
df50ba0c 15656+
58c5fc13 15657 TRACE_IRQS_OFF
df50ba0c
MT
15658 movl %esp,%eax # pt_regs pointer
15659 call *%edi
6e9df6a3
MT
15660 jmp ret_from_exception
15661 CFI_ENDPROC
15662-END(page_fault)
15663+ENDPROC(page_fault)
15664
15665 /*
15666 * Debug traps and NMI can happen at the one SYSENTER instruction
5e856224 15667@@ -1303,7 +1553,7 @@ debug_stack_correct:
6e9df6a3
MT
15668 call do_debug
15669 jmp ret_from_exception
15670 CFI_ENDPROC
15671-END(debug)
15672+ENDPROC(debug)
15673
15674 /*
15675 * NMI is doubly nasty. It can happen _while_ we're handling
5e856224 15676@@ -1340,6 +1590,9 @@ nmi_stack_correct:
58c5fc13
MT
15677 xorl %edx,%edx # zero error code
15678 movl %esp,%eax # pt_regs pointer
15679 call do_nmi
15680+
8308f9c9 15681+ pax_exit_kernel
58c5fc13
MT
15682+
15683 jmp restore_all_notrace
15684 CFI_ENDPROC
15685
5e856224 15686@@ -1376,12 +1629,15 @@ nmi_espfix_stack:
58c5fc13
MT
15687 FIXUP_ESPFIX_STACK # %eax == %esp
15688 xorl %edx,%edx # zero error code
15689 call do_nmi
15690+
8308f9c9 15691+ pax_exit_kernel
58c5fc13
MT
15692+
15693 RESTORE_REGS
15694 lss 12+4(%esp), %esp # back to espfix stack
15695 CFI_ADJUST_CFA_OFFSET -24
6e9df6a3
MT
15696 jmp irq_return
15697 CFI_ENDPROC
15698-END(nmi)
15699+ENDPROC(nmi)
15700
15701 ENTRY(int3)
15702 RING0_INT_FRAME
5e856224 15703@@ -1393,14 +1649,14 @@ ENTRY(int3)
6e9df6a3
MT
15704 call do_int3
15705 jmp ret_from_exception
15706 CFI_ENDPROC
15707-END(int3)
15708+ENDPROC(int3)
15709
15710 ENTRY(general_protection)
15711 RING0_EC_FRAME
15712 pushl_cfi $do_general_protection
15713 jmp error_code
15714 CFI_ENDPROC
15715-END(general_protection)
15716+ENDPROC(general_protection)
15717
15718 #ifdef CONFIG_KVM_GUEST
15719 ENTRY(async_page_fault)
5e856224 15720@@ -1408,7 +1664,7 @@ ENTRY(async_page_fault)
6e9df6a3
MT
15721 pushl_cfi $do_async_page_fault
15722 jmp error_code
15723 CFI_ENDPROC
15724-END(async_page_fault)
15725+ENDPROC(async_page_fault)
15726 #endif
15727
15728 /*
fe2de317 15729diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
c6e2a6c8 15730index cdc79b5..4710a75 100644
fe2de317
MT
15731--- a/arch/x86/kernel/entry_64.S
15732+++ b/arch/x86/kernel/entry_64.S
5e856224 15733@@ -56,6 +56,8 @@
ae4e228f
MT
15734 #include <asm/ftrace.h>
15735 #include <asm/percpu.h>
5e856224 15736 #include <linux/err.h>
ae4e228f 15737+#include <asm/pgtable.h>
6e9df6a3 15738+#include <asm/alternative-asm.h>
ae4e228f
MT
15739
15740 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15741 #include <linux/elf-em.h>
5e856224 15742@@ -69,8 +71,9 @@
6e9df6a3
MT
15743 #ifdef CONFIG_FUNCTION_TRACER
15744 #ifdef CONFIG_DYNAMIC_FTRACE
15745 ENTRY(mcount)
15746+ pax_force_retaddr
15747 retq
15748-END(mcount)
15749+ENDPROC(mcount)
15750
15751 ENTRY(ftrace_caller)
15752 cmpl $0, function_trace_stop
5e856224 15753@@ -93,8 +96,9 @@ GLOBAL(ftrace_graph_call)
6e9df6a3
MT
15754 #endif
15755
15756 GLOBAL(ftrace_stub)
15757+ pax_force_retaddr
15758 retq
15759-END(ftrace_caller)
15760+ENDPROC(ftrace_caller)
15761
15762 #else /* ! CONFIG_DYNAMIC_FTRACE */
15763 ENTRY(mcount)
5e856224 15764@@ -113,6 +117,7 @@ ENTRY(mcount)
6e9df6a3
MT
15765 #endif
15766
15767 GLOBAL(ftrace_stub)
15768+ pax_force_retaddr
15769 retq
15770
15771 trace:
5e856224 15772@@ -122,12 +127,13 @@ trace:
6e9df6a3
MT
15773 movq 8(%rbp), %rsi
15774 subq $MCOUNT_INSN_SIZE, %rdi
15775
15776+ pax_force_fptr ftrace_trace_function
15777 call *ftrace_trace_function
15778
15779 MCOUNT_RESTORE_FRAME
15780
15781 jmp ftrace_stub
15782-END(mcount)
15783+ENDPROC(mcount)
15784 #endif /* CONFIG_DYNAMIC_FTRACE */
15785 #endif /* CONFIG_FUNCTION_TRACER */
15786
5e856224 15787@@ -147,8 +153,9 @@ ENTRY(ftrace_graph_caller)
6e9df6a3
MT
15788
15789 MCOUNT_RESTORE_FRAME
15790
15791+ pax_force_retaddr
15792 retq
15793-END(ftrace_graph_caller)
15794+ENDPROC(ftrace_graph_caller)
15795
15796 GLOBAL(return_to_handler)
15797 subq $24, %rsp
5e856224 15798@@ -164,6 +171,7 @@ GLOBAL(return_to_handler)
6e9df6a3
MT
15799 movq 8(%rsp), %rdx
15800 movq (%rsp), %rax
15801 addq $24, %rsp
15802+ pax_force_fptr %rdi
15803 jmp *%rdi
15804 #endif
15805
5e856224 15806@@ -179,6 +187,282 @@ ENTRY(native_usergs_sysret64)
df50ba0c
MT
15807 ENDPROC(native_usergs_sysret64)
15808 #endif /* CONFIG_PARAVIRT */
15809
15810+ .macro ljmpq sel, off
8308f9c9 15811+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
df50ba0c
MT
15812+ .byte 0x48; ljmp *1234f(%rip)
15813+ .pushsection .rodata
15814+ .align 16
15815+ 1234: .quad \off; .word \sel
15816+ .popsection
15817+#else
66a7e928
MT
15818+ pushq $\sel
15819+ pushq $\off
df50ba0c
MT
15820+ lretq
15821+#endif
15822+ .endm
15823+
317566c1 15824+ .macro pax_enter_kernel
fe2de317 15825+ pax_set_fptr_mask
317566c1
MT
15826+#ifdef CONFIG_PAX_KERNEXEC
15827+ call pax_enter_kernel
15828+#endif
15829+ .endm
15830+
15831+ .macro pax_exit_kernel
15832+#ifdef CONFIG_PAX_KERNEXEC
15833+ call pax_exit_kernel
15834+#endif
15835+ .endm
df50ba0c
MT
15836+
15837+#ifdef CONFIG_PAX_KERNEXEC
317566c1 15838+ENTRY(pax_enter_kernel)
66a7e928 15839+ pushq %rdi
df50ba0c
MT
15840+
15841+#ifdef CONFIG_PARAVIRT
15842+ PV_SAVE_REGS(CLBR_RDI)
15843+#endif
15844+
15845+ GET_CR0_INTO_RDI
15846+ bts $16,%rdi
fe2de317 15847+ jnc 3f
df50ba0c
MT
15848+ mov %cs,%edi
15849+ cmp $__KERNEL_CS,%edi
fe2de317
MT
15850+ jnz 2f
15851+1:
df50ba0c
MT
15852+
15853+#ifdef CONFIG_PARAVIRT
15854+ PV_RESTORE_REGS(CLBR_RDI)
15855+#endif
15856+
66a7e928 15857+ popq %rdi
6e9df6a3 15858+ pax_force_retaddr
df50ba0c 15859+ retq
fe2de317
MT
15860+
15861+2: ljmpq __KERNEL_CS,1f
15862+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15863+4: SET_RDI_INTO_CR0
15864+ jmp 1b
df50ba0c
MT
15865+ENDPROC(pax_enter_kernel)
15866+
15867+ENTRY(pax_exit_kernel)
66a7e928 15868+ pushq %rdi
df50ba0c
MT
15869+
15870+#ifdef CONFIG_PARAVIRT
15871+ PV_SAVE_REGS(CLBR_RDI)
15872+#endif
15873+
15874+ mov %cs,%rdi
15875+ cmp $__KERNEXEC_KERNEL_CS,%edi
fe2de317
MT
15876+ jz 2f
15877+1:
15878+
15879+#ifdef CONFIG_PARAVIRT
15880+ PV_RESTORE_REGS(CLBR_RDI);
15881+#endif
15882+
15883+ popq %rdi
15884+ pax_force_retaddr
15885+ retq
df50ba0c 15886+
fe2de317
MT
15887+2: GET_CR0_INTO_RDI
15888+ btr $16,%rdi
15889+ ljmpq __KERNEL_CS,3f
15890+3: SET_RDI_INTO_CR0
15891+ jmp 1b
df50ba0c
MT
15892+#ifdef CONFIG_PARAVIRT
15893+ PV_RESTORE_REGS(CLBR_RDI);
15894+#endif
15895+
66a7e928 15896+ popq %rdi
6e9df6a3 15897+ pax_force_retaddr
df50ba0c
MT
15898+ retq
15899+ENDPROC(pax_exit_kernel)
317566c1 15900+#endif
df50ba0c 15901+
317566c1 15902+ .macro pax_enter_kernel_user
fe2de317 15903+ pax_set_fptr_mask
317566c1
MT
15904+#ifdef CONFIG_PAX_MEMORY_UDEREF
15905+ call pax_enter_kernel_user
15906+#endif
15907+ .endm
df50ba0c 15908+
317566c1 15909+ .macro pax_exit_kernel_user
df50ba0c 15910+#ifdef CONFIG_PAX_MEMORY_UDEREF
317566c1
MT
15911+ call pax_exit_kernel_user
15912+#endif
71d190be 15913+#ifdef CONFIG_PAX_RANDKSTACK
4c928ab7 15914+ pushq %rax
71d190be 15915+ call pax_randomize_kstack
4c928ab7 15916+ popq %rax
71d190be 15917+#endif
317566c1
MT
15918+ .endm
15919+
15920+#ifdef CONFIG_PAX_MEMORY_UDEREF
15921+ENTRY(pax_enter_kernel_user)
66a7e928
MT
15922+ pushq %rdi
15923+ pushq %rbx
df50ba0c
MT
15924+
15925+#ifdef CONFIG_PARAVIRT
15926+ PV_SAVE_REGS(CLBR_RDI)
15927+#endif
15928+
15929+ GET_CR3_INTO_RDI
15930+ mov %rdi,%rbx
15931+ add $__START_KERNEL_map,%rbx
15932+ sub phys_base(%rip),%rbx
15933+
15934+#ifdef CONFIG_PARAVIRT
66a7e928 15935+ pushq %rdi
df50ba0c
MT
15936+ cmpl $0, pv_info+PARAVIRT_enabled
15937+ jz 1f
15938+ i = 0
15939+ .rept USER_PGD_PTRS
15940+ mov i*8(%rbx),%rsi
15941+ mov $0,%sil
15942+ lea i*8(%rbx),%rdi
15a11c5b 15943+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
df50ba0c
MT
15944+ i = i + 1
15945+ .endr
15946+ jmp 2f
15947+1:
15948+#endif
15949+
15950+ i = 0
15951+ .rept USER_PGD_PTRS
15952+ movb $0,i*8(%rbx)
15953+ i = i + 1
15954+ .endr
15955+
15956+#ifdef CONFIG_PARAVIRT
66a7e928 15957+2: popq %rdi
df50ba0c
MT
15958+#endif
15959+ SET_RDI_INTO_CR3
15960+
15961+#ifdef CONFIG_PAX_KERNEXEC
15962+ GET_CR0_INTO_RDI
15963+ bts $16,%rdi
15964+ SET_RDI_INTO_CR0
15965+#endif
15966+
15967+#ifdef CONFIG_PARAVIRT
15968+ PV_RESTORE_REGS(CLBR_RDI)
15969+#endif
15970+
66a7e928
MT
15971+ popq %rbx
15972+ popq %rdi
6e9df6a3 15973+ pax_force_retaddr
df50ba0c
MT
15974+ retq
15975+ENDPROC(pax_enter_kernel_user)
15976+
15977+ENTRY(pax_exit_kernel_user)
df50ba0c
MT
15978+ push %rdi
15979+
15980+#ifdef CONFIG_PARAVIRT
66a7e928 15981+ pushq %rbx
df50ba0c
MT
15982+ PV_SAVE_REGS(CLBR_RDI)
15983+#endif
15984+
15985+#ifdef CONFIG_PAX_KERNEXEC
15986+ GET_CR0_INTO_RDI
15987+ btr $16,%rdi
15988+ SET_RDI_INTO_CR0
15989+#endif
15990+
15991+ GET_CR3_INTO_RDI
15992+ add $__START_KERNEL_map,%rdi
15993+ sub phys_base(%rip),%rdi
15994+
15995+#ifdef CONFIG_PARAVIRT
15996+ cmpl $0, pv_info+PARAVIRT_enabled
15997+ jz 1f
15998+ mov %rdi,%rbx
15999+ i = 0
16000+ .rept USER_PGD_PTRS
16001+ mov i*8(%rbx),%rsi
16002+ mov $0x67,%sil
16003+ lea i*8(%rbx),%rdi
15a11c5b 16004+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
df50ba0c
MT
16005+ i = i + 1
16006+ .endr
16007+ jmp 2f
16008+1:
16009+#endif
16010+
16011+ i = 0
16012+ .rept USER_PGD_PTRS
16013+ movb $0x67,i*8(%rdi)
16014+ i = i + 1
16015+ .endr
16016+
16017+#ifdef CONFIG_PARAVIRT
16018+2: PV_RESTORE_REGS(CLBR_RDI)
66a7e928 16019+ popq %rbx
df50ba0c
MT
16020+#endif
16021+
66a7e928 16022+ popq %rdi
6e9df6a3 16023+ pax_force_retaddr
df50ba0c
MT
16024+ retq
16025+ENDPROC(pax_exit_kernel_user)
66a7e928
MT
16026+#endif
16027+
6e9df6a3 16028+.macro pax_erase_kstack
66a7e928
MT
16029+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16030+ call pax_erase_kstack
16031+#endif
6e9df6a3 16032+.endm
66a7e928
MT
16033+
16034+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
16035+/*
fe2de317 16036+ * r11: thread_info
66a7e928
MT
16037+ * rcx, rdx: can be clobbered
16038+ */
16039+ENTRY(pax_erase_kstack)
16040+ pushq %rdi
16041+ pushq %rax
fe2de317 16042+ pushq %r11
66a7e928 16043+
fe2de317
MT
16044+ GET_THREAD_INFO(%r11)
16045+ mov TI_lowest_stack(%r11), %rdi
66a7e928
MT
16046+ mov $-0xBEEF, %rax
16047+ std
16048+
16049+1: mov %edi, %ecx
16050+ and $THREAD_SIZE_asm - 1, %ecx
16051+ shr $3, %ecx
16052+ repne scasq
16053+ jecxz 2f
16054+
16055+ cmp $2*8, %ecx
16056+ jc 2f
16057+
16058+ mov $2*8, %ecx
16059+ repe scasq
16060+ jecxz 2f
16061+ jne 1b
16062+
16063+2: cld
16064+ mov %esp, %ecx
16065+ sub %edi, %ecx
15a11c5b
MT
16066+
16067+ cmp $THREAD_SIZE_asm, %rcx
16068+ jb 3f
16069+ ud2
16070+3:
16071+
66a7e928
MT
16072+ shr $3, %ecx
16073+ rep stosq
16074+
fe2de317 16075+ mov TI_task_thread_sp0(%r11), %rdi
66a7e928 16076+ sub $256, %rdi
fe2de317 16077+ mov %rdi, TI_lowest_stack(%r11)
66a7e928 16078+
fe2de317 16079+ popq %r11
66a7e928
MT
16080+ popq %rax
16081+ popq %rdi
6e9df6a3 16082+ pax_force_retaddr
66a7e928
MT
16083+ ret
16084+ENDPROC(pax_erase_kstack)
317566c1 16085+#endif
df50ba0c
MT
16086
16087 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
16088 #ifdef CONFIG_TRACE_IRQFLAGS
5e856224 16089@@ -232,8 +516,8 @@ ENDPROC(native_usergs_sysret64)
fe2de317
MT
16090 .endm
16091
16092 .macro UNFAKE_STACK_FRAME
16093- addq $8*6, %rsp
16094- CFI_ADJUST_CFA_OFFSET -(6*8)
16095+ addq $8*6 + ARG_SKIP, %rsp
16096+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16097 .endm
16098
16099 /*
5e856224 16100@@ -320,7 +604,7 @@ ENDPROC(native_usergs_sysret64)
6e9df6a3
MT
16101 movq %rsp, %rsi
16102
16103 leaq -RBP(%rsp),%rdi /* arg1 for handler */
c6e2a6c8
MT
16104- testl $3, CS-RBP(%rsi)
16105+ testb $3, CS-RBP(%rsi)
df50ba0c
MT
16106 je 1f
16107 SWAPGS
16108 /*
c6e2a6c8 16109@@ -355,9 +639,10 @@ ENTRY(save_rest)
6e9df6a3
MT
16110 movq_cfi r15, R15+16
16111 movq %r11, 8(%rsp) /* return address */
16112 FIXUP_TOP_OF_STACK %r11, 16
16113+ pax_force_retaddr
16114 ret
16115 CFI_ENDPROC
16116-END(save_rest)
16117+ENDPROC(save_rest)
16118
16119 /* save complete stack frame */
16120 .pushsection .kprobes.text, "ax"
c6e2a6c8 16121@@ -386,9 +671,10 @@ ENTRY(save_paranoid)
6e9df6a3
MT
16122 js 1f /* negative -> in kernel */
16123 SWAPGS
16124 xorl %ebx,%ebx
16125-1: ret
fe2de317 16126+1: pax_force_retaddr_bts
6e9df6a3
MT
16127+ ret
16128 CFI_ENDPROC
16129-END(save_paranoid)
16130+ENDPROC(save_paranoid)
16131 .popsection
16132
16133 /*
c6e2a6c8 16134@@ -410,7 +696,7 @@ ENTRY(ret_from_fork)
df50ba0c
MT
16135
16136 RESTORE_REST
16137
16138- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16139+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
5e856224 16140 jz retint_restore_args
df50ba0c
MT
16141
16142 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
c6e2a6c8 16143@@ -420,7 +706,7 @@ ENTRY(ret_from_fork)
6e9df6a3
MT
16144 jmp ret_from_sys_call # go to the SYSRET fastpath
16145
16146 CFI_ENDPROC
16147-END(ret_from_fork)
16148+ENDPROC(ret_from_fork)
16149
16150 /*
16151 * System call entry. Up to 6 arguments in registers are supported.
c6e2a6c8 16152@@ -456,7 +742,7 @@ END(ret_from_fork)
71d190be
MT
16153 ENTRY(system_call)
16154 CFI_STARTPROC simple
16155 CFI_SIGNAL_FRAME
16156- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16157+ CFI_DEF_CFA rsp,0
16158 CFI_REGISTER rip,rcx
16159 /*CFI_REGISTER rflags,r11*/
16160 SWAPGS_UNSAFE_STACK
c6e2a6c8 16161@@ -469,16 +755,18 @@ GLOBAL(system_call_after_swapgs)
df50ba0c
MT
16162
16163 movq %rsp,PER_CPU_VAR(old_rsp)
16164 movq PER_CPU_VAR(kernel_stack),%rsp
fe2de317 16165+ SAVE_ARGS 8*6,0
317566c1 16166+ pax_enter_kernel_user
df50ba0c
MT
16167 /*
16168 * No need to follow this irqs off/on section - it's straight
16169 * and short:
71d190be
MT
16170 */
16171 ENABLE_INTERRUPTS(CLBR_NONE)
6e9df6a3 16172- SAVE_ARGS 8,0
71d190be
MT
16173 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16174 movq %rcx,RIP-ARGOFFSET(%rsp)
16175 CFI_REL_OFFSET rip,RIP-ARGOFFSET
5e856224
MT
16176- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16177+ GET_THREAD_INFO(%rcx)
16178+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
16179 jnz tracesys
fe2de317 16180 system_call_fastpath:
c6e2a6c8
MT
16181 #if __SYSCALL_MASK == ~0
16182@@ -488,7 +776,7 @@ system_call_fastpath:
16183 cmpl $__NR_syscall_max,%eax
16184 #endif
fe2de317
MT
16185 ja badsys
16186- movq %r10,%rcx
16187+ movq R10-ARGOFFSET(%rsp),%rcx
16188 call *sys_call_table(,%rax,8) # XXX: rip relative
16189 movq %rax,RAX-ARGOFFSET(%rsp)
16190 /*
c6e2a6c8 16191@@ -502,10 +790,13 @@ sysret_check:
5e856224
MT
16192 LOCKDEP_SYS_EXIT
16193 DISABLE_INTERRUPTS(CLBR_NONE)
16194 TRACE_IRQS_OFF
16195- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
16196+ GET_THREAD_INFO(%rcx)
16197+ movl TI_flags(%rcx),%edx
df50ba0c
MT
16198 andl %edi,%edx
16199 jnz sysret_careful
16200 CFI_REMEMBER_STATE
317566c1 16201+ pax_exit_kernel_user
15a11c5b 16202+ pax_erase_kstack
df50ba0c
MT
16203 /*
16204 * sysretq will re-enable interrupts:
16205 */
c6e2a6c8 16206@@ -557,14 +848,18 @@ badsys:
fe2de317
MT
16207 * jump back to the normal fast path.
16208 */
16209 auditsys:
16210- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16211+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16212 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16213 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16214 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
66a7e928
MT
16215 movq %rax,%rsi /* 2nd arg: syscall number */
16216 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
5e856224 16217 call __audit_syscall_entry
66a7e928
MT
16218+
16219+ pax_erase_kstack
16220+
16221 LOAD_ARGS 0 /* reload call-clobbered registers */
fe2de317 16222+ pax_set_fptr_mask
66a7e928
MT
16223 jmp system_call_fastpath
16224
fe2de317 16225 /*
c6e2a6c8 16226@@ -585,7 +880,7 @@ sysret_audit:
5e856224
MT
16227 /* Do syscall tracing */
16228 tracesys:
16229 #ifdef CONFIG_AUDITSYSCALL
16230- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
16231+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
16232 jz auditsys
16233 #endif
16234 SAVE_REST
c6e2a6c8 16235@@ -593,12 +888,16 @@ tracesys:
66a7e928
MT
16236 FIXUP_TOP_OF_STACK %rdi
16237 movq %rsp,%rdi
16238 call syscall_trace_enter
16239+
16240+ pax_erase_kstack
16241+
16242 /*
16243 * Reload arg registers from stack in case ptrace changed them.
16244 * We don't reload %rax because syscall_trace_enter() returned
fe2de317
MT
16245 * the value it wants us to use in the table lookup.
16246 */
16247 LOAD_ARGS ARGOFFSET, 1
16248+ pax_set_fptr_mask
16249 RESTORE_REST
c6e2a6c8 16250 #if __SYSCALL_MASK == ~0
fe2de317 16251 cmpq $__NR_syscall_max,%rax
c6e2a6c8
MT
16252@@ -607,7 +906,7 @@ tracesys:
16253 cmpl $__NR_syscall_max,%eax
16254 #endif
fe2de317
MT
16255 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16256- movq %r10,%rcx /* fixup for C */
16257+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16258 call *sys_call_table(,%rax,8)
16259 movq %rax,RAX-ARGOFFSET(%rsp)
16260 /* Use IRET because user could have changed frame */
c6e2a6c8 16261@@ -628,6 +927,7 @@ GLOBAL(int_with_check)
4c928ab7
MT
16262 andl %edi,%edx
16263 jnz int_careful
16264 andl $~TS_COMPAT,TI_status(%rcx)
16265+ pax_erase_kstack
16266 jmp retint_swapgs
16267
16268 /* Either reschedule or signal or syscall exit tracking needed. */
c6e2a6c8 16269@@ -674,7 +974,7 @@ int_restore_rest:
6e9df6a3
MT
16270 TRACE_IRQS_OFF
16271 jmp int_with_check
16272 CFI_ENDPROC
16273-END(system_call)
16274+ENDPROC(system_call)
16275
16276 /*
16277 * Certain special system calls that need to save a complete full stack frame.
c6e2a6c8 16278@@ -690,7 +990,7 @@ ENTRY(\label)
6e9df6a3
MT
16279 call \func
16280 jmp ptregscall_common
16281 CFI_ENDPROC
16282-END(\label)
16283+ENDPROC(\label)
16284 .endm
16285
16286 PTREGSCALL stub_clone, sys_clone, %r8
c6e2a6c8 16287@@ -708,9 +1008,10 @@ ENTRY(ptregscall_common)
6e9df6a3
MT
16288 movq_cfi_restore R12+8, r12
16289 movq_cfi_restore RBP+8, rbp
16290 movq_cfi_restore RBX+8, rbx
16291+ pax_force_retaddr
16292 ret $REST_SKIP /* pop extended registers */
16293 CFI_ENDPROC
16294-END(ptregscall_common)
16295+ENDPROC(ptregscall_common)
16296
16297 ENTRY(stub_execve)
16298 CFI_STARTPROC
c6e2a6c8 16299@@ -725,7 +1026,7 @@ ENTRY(stub_execve)
6e9df6a3
MT
16300 RESTORE_REST
16301 jmp int_ret_from_sys_call
16302 CFI_ENDPROC
16303-END(stub_execve)
16304+ENDPROC(stub_execve)
16305
16306 /*
16307 * sigreturn is special because it needs to restore all registers on return.
c6e2a6c8 16308@@ -743,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
6e9df6a3
MT
16309 RESTORE_REST
16310 jmp int_ret_from_sys_call
16311 CFI_ENDPROC
16312-END(stub_rt_sigreturn)
16313+ENDPROC(stub_rt_sigreturn)
16314
c6e2a6c8
MT
16315 #ifdef CONFIG_X86_X32_ABI
16316 PTREGSCALL stub_x32_sigaltstack, sys32_sigaltstack, %rdx
16317@@ -812,7 +1113,7 @@ vector=vector+1
6e9df6a3
MT
16318 2: jmp common_interrupt
16319 .endr
16320 CFI_ENDPROC
16321-END(irq_entries_start)
16322+ENDPROC(irq_entries_start)
16323
16324 .previous
16325 END(interrupt)
c6e2a6c8 16326@@ -832,6 +1133,16 @@ END(interrupt)
4c928ab7 16327 subq $ORIG_RAX-RBP, %rsp
16454cff 16328 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
6e9df6a3 16329 SAVE_ARGS_IRQ
df50ba0c
MT
16330+#ifdef CONFIG_PAX_MEMORY_UDEREF
16331+ testb $3, CS(%rdi)
16332+ jnz 1f
317566c1 16333+ pax_enter_kernel
df50ba0c 16334+ jmp 2f
317566c1 16335+1: pax_enter_kernel_user
df50ba0c
MT
16336+2:
16337+#else
317566c1 16338+ pax_enter_kernel
df50ba0c 16339+#endif
ae4e228f
MT
16340 call \func
16341 .endm
16342
c6e2a6c8 16343@@ -863,7 +1174,7 @@ ret_from_intr:
6e9df6a3 16344
ae4e228f 16345 exit_intr:
ae4e228f 16346 GET_THREAD_INFO(%rcx)
df50ba0c
MT
16347- testl $3,CS-ARGOFFSET(%rsp)
16348+ testb $3,CS-ARGOFFSET(%rsp)
ae4e228f 16349 je retint_kernel
df50ba0c
MT
16350
16351 /* Interrupt came from user space */
c6e2a6c8 16352@@ -885,12 +1196,15 @@ retint_swapgs: /* return to user-space */
df50ba0c
MT
16353 * The iretq could re-enable interrupts:
16354 */
16355 DISABLE_INTERRUPTS(CLBR_ANY)
317566c1 16356+ pax_exit_kernel_user
df50ba0c
MT
16357 TRACE_IRQS_IRETQ
16358 SWAPGS
16359 jmp restore_args
16360
16361 retint_restore_args: /* return to kernel space */
16362 DISABLE_INTERRUPTS(CLBR_ANY)
317566c1 16363+ pax_exit_kernel
6e9df6a3 16364+ pax_force_retaddr RIP-ARGOFFSET
df50ba0c
MT
16365 /*
16366 * The iretq could re-enable interrupts:
16367 */
c6e2a6c8 16368@@ -979,7 +1293,7 @@ ENTRY(retint_kernel)
6e9df6a3
MT
16369 #endif
16370
16371 CFI_ENDPROC
16372-END(common_interrupt)
16373+ENDPROC(common_interrupt)
16374 /*
16375 * End of kprobes section
16376 */
c6e2a6c8 16377@@ -996,7 +1310,7 @@ ENTRY(\sym)
6e9df6a3
MT
16378 interrupt \do_sym
16379 jmp ret_from_intr
16380 CFI_ENDPROC
16381-END(\sym)
16382+ENDPROC(\sym)
16383 .endm
16384
16385 #ifdef CONFIG_SMP
c6e2a6c8 16386@@ -1069,12 +1383,22 @@ ENTRY(\sym)
bc901d79 16387 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f
MT
16388 call error_entry
16389 DEFAULT_FRAME 0
df50ba0c
MT
16390+#ifdef CONFIG_PAX_MEMORY_UDEREF
16391+ testb $3, CS(%rsp)
16392+ jnz 1f
317566c1 16393+ pax_enter_kernel
df50ba0c 16394+ jmp 2f
317566c1 16395+1: pax_enter_kernel_user
df50ba0c
MT
16396+2:
16397+#else
317566c1 16398+ pax_enter_kernel
df50ba0c 16399+#endif
ae4e228f
MT
16400 movq %rsp,%rdi /* pt_regs pointer */
16401 xorl %esi,%esi /* no error code */
16402 call \do_sym
6e9df6a3
MT
16403 jmp error_exit /* %ebx: no swapgs flag */
16404 CFI_ENDPROC
16405-END(\sym)
16406+ENDPROC(\sym)
16407 .endm
16408
16409 .macro paranoidzeroentry sym do_sym
c6e2a6c8 16410@@ -1086,15 +1410,25 @@ ENTRY(\sym)
bc901d79 16411 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f
MT
16412 call save_paranoid
16413 TRACE_IRQS_OFF
df50ba0c
MT
16414+#ifdef CONFIG_PAX_MEMORY_UDEREF
16415+ testb $3, CS(%rsp)
16416+ jnz 1f
317566c1 16417+ pax_enter_kernel
df50ba0c 16418+ jmp 2f
317566c1 16419+1: pax_enter_kernel_user
df50ba0c
MT
16420+2:
16421+#else
317566c1 16422+ pax_enter_kernel
df50ba0c 16423+#endif
ae4e228f
MT
16424 movq %rsp,%rdi /* pt_regs pointer */
16425 xorl %esi,%esi /* no error code */
16426 call \do_sym
6e9df6a3
MT
16427 jmp paranoid_exit /* %ebx: no swapgs flag */
16428 CFI_ENDPROC
16429-END(\sym)
16430+ENDPROC(\sym)
6892158b
MT
16431 .endm
16432
16433-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
16434+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
16435 .macro paranoidzeroentry_ist sym do_sym ist
16436 ENTRY(\sym)
16437 INTR_FRAME
c6e2a6c8 16438@@ -1104,14 +1438,30 @@ ENTRY(\sym)
bc901d79 16439 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f 16440 call save_paranoid
58c5fc13 16441 TRACE_IRQS_OFF
df50ba0c
MT
16442+#ifdef CONFIG_PAX_MEMORY_UDEREF
16443+ testb $3, CS(%rsp)
16444+ jnz 1f
317566c1 16445+ pax_enter_kernel
df50ba0c 16446+ jmp 2f
317566c1 16447+1: pax_enter_kernel_user
df50ba0c
MT
16448+2:
16449+#else
317566c1 16450+ pax_enter_kernel
df50ba0c 16451+#endif
58c5fc13
MT
16452 movq %rsp,%rdi /* pt_regs pointer */
16453 xorl %esi,%esi /* no error code */
58c5fc13 16454+#ifdef CONFIG_SMP
ae4e228f
MT
16455+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
16456+ lea init_tss(%r12), %r12
58c5fc13 16457+#else
ae4e228f 16458+ lea init_tss(%rip), %r12
58c5fc13 16459+#endif
6892158b 16460 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
58c5fc13 16461 call \do_sym
6892158b 16462 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
6e9df6a3
MT
16463 jmp paranoid_exit /* %ebx: no swapgs flag */
16464 CFI_ENDPROC
16465-END(\sym)
16466+ENDPROC(\sym)
16467 .endm
16468
16469 .macro errorentry sym do_sym
c6e2a6c8 16470@@ -1122,13 +1472,23 @@ ENTRY(\sym)
bc901d79 16471 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
ae4e228f
MT
16472 call error_entry
16473 DEFAULT_FRAME 0
df50ba0c
MT
16474+#ifdef CONFIG_PAX_MEMORY_UDEREF
16475+ testb $3, CS(%rsp)
16476+ jnz 1f
317566c1 16477+ pax_enter_kernel
df50ba0c 16478+ jmp 2f
317566c1 16479+1: pax_enter_kernel_user
df50ba0c
MT
16480+2:
16481+#else
317566c1 16482+ pax_enter_kernel
df50ba0c 16483+#endif
ae4e228f
MT
16484 movq %rsp,%rdi /* pt_regs pointer */
16485 movq ORIG_RAX(%rsp),%rsi /* get error code */
16486 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
6e9df6a3
MT
16487 call \do_sym
16488 jmp error_exit /* %ebx: no swapgs flag */
16489 CFI_ENDPROC
16490-END(\sym)
16491+ENDPROC(\sym)
16492 .endm
16493
16494 /* error code is on the stack already */
c6e2a6c8 16495@@ -1141,13 +1501,23 @@ ENTRY(\sym)
ae4e228f
MT
16496 call save_paranoid
16497 DEFAULT_FRAME 0
16498 TRACE_IRQS_OFF
df50ba0c
MT
16499+#ifdef CONFIG_PAX_MEMORY_UDEREF
16500+ testb $3, CS(%rsp)
16501+ jnz 1f
317566c1 16502+ pax_enter_kernel
df50ba0c 16503+ jmp 2f
317566c1 16504+1: pax_enter_kernel_user
df50ba0c
MT
16505+2:
16506+#else
317566c1 16507+ pax_enter_kernel
df50ba0c 16508+#endif
ae4e228f
MT
16509 movq %rsp,%rdi /* pt_regs pointer */
16510 movq ORIG_RAX(%rsp),%rsi /* get error code */
16511 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
6e9df6a3
MT
16512 call \do_sym
16513 jmp paranoid_exit /* %ebx: no swapgs flag */
16514 CFI_ENDPROC
16515-END(\sym)
16516+ENDPROC(\sym)
16517 .endm
16518
16519 zeroentry divide_error do_divide_error
c6e2a6c8 16520@@ -1177,9 +1547,10 @@ gs_change:
6e9df6a3
MT
16521 2: mfence /* workaround */
16522 SWAPGS
16523 popfq_cfi
16524+ pax_force_retaddr
16525 ret
16526 CFI_ENDPROC
16527-END(native_load_gs_index)
16528+ENDPROC(native_load_gs_index)
16529
16530 .section __ex_table,"a"
16531 .align 8
c6e2a6c8 16532@@ -1201,13 +1572,14 @@ ENTRY(kernel_thread_helper)
6e9df6a3
MT
16533 * Here we are in the child and the registers are set as they were
16534 * at kernel_thread() invocation in the parent.
16535 */
16536+ pax_force_fptr %rsi
16537 call *%rsi
16538 # exit
16539 mov %eax, %edi
16540 call do_exit
16541 ud2 # padding for call trace
16542 CFI_ENDPROC
16543-END(kernel_thread_helper)
16544+ENDPROC(kernel_thread_helper)
16545
16546 /*
16547 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
c6e2a6c8 16548@@ -1234,11 +1606,11 @@ ENTRY(kernel_execve)
fe2de317
MT
16549 RESTORE_REST
16550 testq %rax,%rax
6e9df6a3 16551 je int_ret_from_sys_call
fe2de317 16552- RESTORE_ARGS
6e9df6a3
MT
16553 UNFAKE_STACK_FRAME
16554+ pax_force_retaddr
16555 ret
16556 CFI_ENDPROC
16557-END(kernel_execve)
16558+ENDPROC(kernel_execve)
16559
16560 /* Call softirq on interrupt stack. Interrupts are off. */
16561 ENTRY(call_softirq)
c6e2a6c8 16562@@ -1256,9 +1628,10 @@ ENTRY(call_softirq)
6e9df6a3
MT
16563 CFI_DEF_CFA_REGISTER rsp
16564 CFI_ADJUST_CFA_OFFSET -8
16565 decl PER_CPU_VAR(irq_count)
16566+ pax_force_retaddr
16567 ret
16568 CFI_ENDPROC
16569-END(call_softirq)
16570+ENDPROC(call_softirq)
16571
16572 #ifdef CONFIG_XEN
16573 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
c6e2a6c8 16574@@ -1296,7 +1669,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
6e9df6a3
MT
16575 decl PER_CPU_VAR(irq_count)
16576 jmp error_exit
16577 CFI_ENDPROC
16578-END(xen_do_hypervisor_callback)
16579+ENDPROC(xen_do_hypervisor_callback)
16580
16581 /*
16582 * Hypervisor uses this for application faults while it executes.
c6e2a6c8 16583@@ -1355,7 +1728,7 @@ ENTRY(xen_failsafe_callback)
6e9df6a3
MT
16584 SAVE_ALL
16585 jmp error_exit
16586 CFI_ENDPROC
16587-END(xen_failsafe_callback)
16588+ENDPROC(xen_failsafe_callback)
16589
16590 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
16591 xen_hvm_callback_vector xen_evtchn_do_upcall
c6e2a6c8 16592@@ -1404,16 +1777,31 @@ ENTRY(paranoid_exit)
df50ba0c
MT
16593 TRACE_IRQS_OFF
16594 testl %ebx,%ebx /* swapgs needed? */
16595 jnz paranoid_restore
16596- testl $3,CS(%rsp)
16597+ testb $3,CS(%rsp)
ae4e228f 16598 jnz paranoid_userspace
df50ba0c 16599+#ifdef CONFIG_PAX_MEMORY_UDEREF
317566c1 16600+ pax_exit_kernel
df50ba0c
MT
16601+ TRACE_IRQS_IRETQ 0
16602+ SWAPGS_UNSAFE_STACK
16603+ RESTORE_ALL 8
fe2de317 16604+ pax_force_retaddr_bts
df50ba0c
MT
16605+ jmp irq_return
16606+#endif
ae4e228f 16607 paranoid_swapgs:
df50ba0c 16608+#ifdef CONFIG_PAX_MEMORY_UDEREF
317566c1 16609+ pax_exit_kernel_user
df50ba0c 16610+#else
317566c1 16611+ pax_exit_kernel
df50ba0c 16612+#endif
ae4e228f
MT
16613 TRACE_IRQS_IRETQ 0
16614 SWAPGS_UNSAFE_STACK
16615 RESTORE_ALL 8
16616 jmp irq_return
16617 paranoid_restore:
317566c1 16618+ pax_exit_kernel
ae4e228f
MT
16619 TRACE_IRQS_IRETQ 0
16620 RESTORE_ALL 8
fe2de317 16621+ pax_force_retaddr_bts
ae4e228f 16622 jmp irq_return
15a11c5b
MT
16623 paranoid_userspace:
16624 GET_THREAD_INFO(%rcx)
c6e2a6c8 16625@@ -1442,7 +1830,7 @@ paranoid_schedule:
6e9df6a3
MT
16626 TRACE_IRQS_OFF
16627 jmp paranoid_userspace
16628 CFI_ENDPROC
16629-END(paranoid_exit)
16630+ENDPROC(paranoid_exit)
16631
16632 /*
16633 * Exception entry point. This expects an error code/orig_rax on the stack.
c6e2a6c8 16634@@ -1469,12 +1857,13 @@ ENTRY(error_entry)
df50ba0c
MT
16635 movq_cfi r14, R14+8
16636 movq_cfi r15, R15+8
16637 xorl %ebx,%ebx
16638- testl $3,CS+8(%rsp)
16639+ testb $3,CS+8(%rsp)
16640 je error_kernelspace
16641 error_swapgs:
16642 SWAPGS
6e9df6a3
MT
16643 error_sti:
16644 TRACE_IRQS_OFF
fe2de317 16645+ pax_force_retaddr_bts
6e9df6a3
MT
16646 ret
16647
16648 /*
c6e2a6c8 16649@@ -1501,7 +1890,7 @@ bstep_iret:
6e9df6a3
MT
16650 movq %rcx,RIP+8(%rsp)
16651 jmp error_swapgs
16652 CFI_ENDPROC
16653-END(error_entry)
16654+ENDPROC(error_entry)
16655
16656
16657 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
c6e2a6c8 16658@@ -1521,7 +1910,7 @@ ENTRY(error_exit)
6e9df6a3
MT
16659 jnz retint_careful
16660 jmp retint_swapgs
16661 CFI_ENDPROC
16662-END(error_exit)
16663+ENDPROC(error_exit)
16664
5e856224
MT
16665 /*
16666 * Test if a given stack is an NMI stack or not.
c6e2a6c8 16667@@ -1579,9 +1968,11 @@ ENTRY(nmi)
5e856224
MT
16668 * If %cs was not the kernel segment, then the NMI triggered in user
16669 * space, which means it is definitely not nested.
16670 */
16671+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
16672+ je 1f
16673 cmpl $__KERNEL_CS, 16(%rsp)
16674 jne first_nmi
16675-
16676+1:
16677 /*
16678 * Check the special variable on the stack to see if NMIs are
16679 * executing.
c6e2a6c8 16680@@ -1728,6 +2119,16 @@ end_repeat_nmi:
5e856224 16681 */
ae4e228f
MT
16682 call save_paranoid
16683 DEFAULT_FRAME 0
df50ba0c
MT
16684+#ifdef CONFIG_PAX_MEMORY_UDEREF
16685+ testb $3, CS(%rsp)
16686+ jnz 1f
317566c1 16687+ pax_enter_kernel
df50ba0c 16688+ jmp 2f
317566c1 16689+1: pax_enter_kernel_user
df50ba0c
MT
16690+2:
16691+#else
317566c1 16692+ pax_enter_kernel
df50ba0c 16693+#endif
ae4e228f
MT
16694 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16695 movq %rsp,%rdi
16696 movq $-1,%rsi
c6e2a6c8 16697@@ -1735,21 +2136,32 @@ end_repeat_nmi:
df50ba0c
MT
16698 testl %ebx,%ebx /* swapgs needed? */
16699 jnz nmi_restore
ae4e228f 16700 nmi_swapgs:
317566c1
MT
16701+#ifdef CONFIG_PAX_MEMORY_UDEREF
16702+ pax_exit_kernel_user
16703+#else
16704+ pax_exit_kernel
16705+#endif
ae4e228f 16706 SWAPGS_UNSAFE_STACK
317566c1 16707+ RESTORE_ALL 8
5e856224
MT
16708+ /* Clear the NMI executing stack variable */
16709+ movq $0, 10*8(%rsp)
317566c1 16710+ jmp irq_return
ae4e228f 16711 nmi_restore:
317566c1 16712+ pax_exit_kernel
ae4e228f 16713 RESTORE_ALL 8
fe2de317 16714+ pax_force_retaddr_bts
5e856224
MT
16715 /* Clear the NMI executing stack variable */
16716 movq $0, 10*8(%rsp)
ae4e228f 16717 jmp irq_return
6e9df6a3 16718 CFI_ENDPROC
6e9df6a3
MT
16719-END(nmi)
16720+ENDPROC(nmi)
16721
c6e2a6c8
MT
16722 ENTRY(ignore_sysret)
16723 CFI_STARTPROC
6e9df6a3
MT
16724 mov $-ENOSYS,%eax
16725 sysret
16726 CFI_ENDPROC
16727-END(ignore_sysret)
16728+ENDPROC(ignore_sysret)
16729
16730 /*
16731 * End of kprobes section
fe2de317
MT
16732diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16733index c9a281f..ce2f317 100644
16734--- a/arch/x86/kernel/ftrace.c
16735+++ b/arch/x86/kernel/ftrace.c
16736@@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the IP to write to */
15a11c5b 16737 static const void *mod_code_newcode; /* holds the text to write to the IP */
8308f9c9
MT
16738
16739 static unsigned nmi_wait_count;
16740-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16741+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16742
16743 int ftrace_arch_read_dyn_info(char *buf, int size)
16744 {
fe2de317 16745@@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
8308f9c9
MT
16746
16747 r = snprintf(buf, size, "%u %u",
16748 nmi_wait_count,
16749- atomic_read(&nmi_update_count));
16750+ atomic_read_unchecked(&nmi_update_count));
16751 return r;
16752 }
16753
16754@@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
df50ba0c 16755
ae4e228f
MT
16756 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16757 smp_rmb();
16758+ pax_open_kernel();
16759 ftrace_mod_code();
8308f9c9 16760- atomic_inc(&nmi_update_count);
ae4e228f 16761+ pax_close_kernel();
8308f9c9 16762+ atomic_inc_unchecked(&nmi_update_count);
ae4e228f
MT
16763 }
16764 /* Must have previous changes seen before executions */
8308f9c9 16765 smp_mb();
fe2de317 16766@@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
ae4e228f
MT
16767 {
16768 unsigned char replaced[MCOUNT_INSN_SIZE];
16769
16770+ ip = ktla_ktva(ip);
16771+
16772 /*
16773 * Note: Due to modules and __init, code can
16774 * disappear and change, we need to protect against faulting
fe2de317 16775@@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
58c5fc13
MT
16776 unsigned char old[MCOUNT_INSN_SIZE], *new;
16777 int ret;
16778
16779- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16780+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16781 new = ftrace_call_replace(ip, (unsigned long)func);
ae4e228f
MT
16782 ret = ftrace_modify_code(ip, old, new);
16783
fe2de317 16784@@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long ip,
ae4e228f
MT
16785 {
16786 unsigned char code[MCOUNT_INSN_SIZE];
16787
16788+ ip = ktla_ktva(ip);
16789+
16790 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16791 return -EFAULT;
16792
fe2de317 16793diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
5e856224 16794index 51ff186..9e77418 100644
fe2de317
MT
16795--- a/arch/x86/kernel/head32.c
16796+++ b/arch/x86/kernel/head32.c
bc901d79 16797@@ -19,6 +19,7 @@
ae4e228f 16798 #include <asm/io_apic.h>
58c5fc13 16799 #include <asm/bios_ebda.h>
bc901d79 16800 #include <asm/tlbflush.h>
58c5fc13
MT
16801+#include <asm/boot.h>
16802
ae4e228f 16803 static void __init i386_default_early_setup(void)
58c5fc13 16804 {
5e856224 16805@@ -31,8 +32,7 @@ static void __init i386_default_early_setup(void)
58c5fc13 16806
5e856224
MT
16807 void __init i386_start_kernel(void)
16808 {
16809- memblock_reserve(__pa_symbol(&_text),
16810- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
16811+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
58c5fc13
MT
16812
16813 #ifdef CONFIG_BLK_DEV_INITRD
16814 /* Reserve INITRD */
fe2de317
MT
16815diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16816index ce0be7c..c41476e 100644
16817--- a/arch/x86/kernel/head_32.S
16818+++ b/arch/x86/kernel/head_32.S
df50ba0c 16819@@ -25,6 +25,12 @@
58c5fc13
MT
16820 /* Physical address */
16821 #define pa(X) ((X) - __PAGE_OFFSET)
ae4e228f
MT
16822
16823+#ifdef CONFIG_PAX_KERNEXEC
16824+#define ta(X) (X)
16825+#else
16826+#define ta(X) ((X) - __PAGE_OFFSET)
16827+#endif
16828+
16829 /*
16830 * References to members of the new_cpu_data structure.
16831 */
df50ba0c 16832@@ -54,11 +60,7 @@
58c5fc13
MT
16833 * and small than max_low_pfn, otherwise will waste some page table entries
16834 */
16835
16836-#if PTRS_PER_PMD > 1
16837-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16838-#else
16839-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16840-#endif
16841+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16842
bc901d79
MT
16843 /* Number of possible pages in the lowmem region */
16844 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
fe2de317 16845@@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
58c5fc13
MT
16846 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16847
16848 /*
16849+ * Real beginning of normal "text" segment
16850+ */
16851+ENTRY(stext)
16852+ENTRY(_stext)
16853+
16854+/*
16855 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16856 * %esi points to the real-mode code as a 32-bit pointer.
16857 * CS and DS must be 4 GB flat segments, but we don't depend on
bc901d79 16858@@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
58c5fc13
MT
16859 * can.
16860 */
ae4e228f 16861 __HEAD
58c5fc13
MT
16862+
16863+#ifdef CONFIG_PAX_KERNEXEC
16864+ jmp startup_32
16865+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16866+.fill PAGE_SIZE-5,1,0xcc
16867+#endif
16868+
16869 ENTRY(startup_32)
16454cff
MT
16870 movl pa(stack_start),%ecx
16871
16872@@ -105,6 +120,57 @@ ENTRY(startup_32)
58c5fc13 16873 2:
16454cff 16874 leal -__PAGE_OFFSET(%ecx),%esp
58c5fc13
MT
16875
16876+#ifdef CONFIG_SMP
16877+ movl $pa(cpu_gdt_table),%edi
16878+ movl $__per_cpu_load,%eax
16879+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16880+ rorl $16,%eax
16881+ movb %al,__KERNEL_PERCPU + 4(%edi)
16882+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16883+ movl $__per_cpu_end - 1,%eax
ae4e228f 16884+ subl $__per_cpu_start,%eax
58c5fc13
MT
16885+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16886+#endif
16887+
16888+#ifdef CONFIG_PAX_MEMORY_UDEREF
16889+ movl $NR_CPUS,%ecx
16890+ movl $pa(cpu_gdt_table),%edi
16891+1:
16892+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
bc901d79
MT
16893+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16894+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
58c5fc13
MT
16895+ addl $PAGE_SIZE_asm,%edi
16896+ loop 1b
16897+#endif
16898+
16899+#ifdef CONFIG_PAX_KERNEXEC
16900+ movl $pa(boot_gdt),%edi
ae4e228f 16901+ movl $__LOAD_PHYSICAL_ADDR,%eax
58c5fc13
MT
16902+ movw %ax,__BOOT_CS + 2(%edi)
16903+ rorl $16,%eax
16904+ movb %al,__BOOT_CS + 4(%edi)
16905+ movb %ah,__BOOT_CS + 7(%edi)
16906+ rorl $16,%eax
16907+
ae4e228f
MT
16908+ ljmp $(__BOOT_CS),$1f
16909+1:
16910+
58c5fc13
MT
16911+ movl $NR_CPUS,%ecx
16912+ movl $pa(cpu_gdt_table),%edi
ae4e228f 16913+ addl $__PAGE_OFFSET,%eax
58c5fc13
MT
16914+1:
16915+ movw %ax,__KERNEL_CS + 2(%edi)
ae4e228f 16916+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
58c5fc13
MT
16917+ rorl $16,%eax
16918+ movb %al,__KERNEL_CS + 4(%edi)
ae4e228f 16919+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
58c5fc13 16920+ movb %ah,__KERNEL_CS + 7(%edi)
ae4e228f 16921+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
58c5fc13
MT
16922+ rorl $16,%eax
16923+ addl $PAGE_SIZE_asm,%edi
16924+ loop 1b
16925+#endif
16926+
16927 /*
16928 * Clear BSS first so that there are no surprises...
16929 */
16454cff 16930@@ -195,8 +261,11 @@ ENTRY(startup_32)
58c5fc13
MT
16931 movl %eax, pa(max_pfn_mapped)
16932
16933 /* Do early initialization of the fixmap area */
bc901d79
MT
16934- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16935- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
58c5fc13 16936+#ifdef CONFIG_COMPAT_VDSO
bc901d79 16937+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
58c5fc13 16938+#else
bc901d79 16939+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
58c5fc13
MT
16940+#endif
16941 #else /* Not PAE */
16942
16943 page_pde_offset = (__PAGE_OFFSET >> 20);
16454cff 16944@@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
58c5fc13
MT
16945 movl %eax, pa(max_pfn_mapped)
16946
16947 /* Do early initialization of the fixmap area */
bc901d79
MT
16948- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
16949- movl %eax,pa(initial_page_table+0xffc)
58c5fc13 16950+#ifdef CONFIG_COMPAT_VDSO
bc901d79 16951+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
58c5fc13 16952+#else
bc901d79 16953+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
58c5fc13
MT
16954+#endif
16955 #endif
16454cff
MT
16956
16957 #ifdef CONFIG_PARAVIRT
16958@@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16959 cmpl $num_subarch_entries, %eax
16960 jae bad_subarch
16961
16962- movl pa(subarch_entries)(,%eax,4), %eax
16963- subl $__PAGE_OFFSET, %eax
16964- jmp *%eax
16965+ jmp *pa(subarch_entries)(,%eax,4)
16966
16967 bad_subarch:
16968 WEAK(lguest_entry)
16969@@ -255,10 +325,10 @@ WEAK(xen_entry)
16970 __INITDATA
16971
16972 subarch_entries:
16973- .long default_entry /* normal x86/PC */
16974- .long lguest_entry /* lguest hypervisor */
16975- .long xen_entry /* Xen hypervisor */
16976- .long default_entry /* Moorestown MID */
66a7e928
MT
16977+ .long ta(default_entry) /* normal x86/PC */
16978+ .long ta(lguest_entry) /* lguest hypervisor */
16979+ .long ta(xen_entry) /* Xen hypervisor */
16980+ .long ta(default_entry) /* Moorestown MID */
16454cff
MT
16981 num_subarch_entries = (. - subarch_entries) / 4
16982 .previous
16983 #else
16984@@ -312,6 +382,7 @@ default_entry:
58c5fc13
MT
16985 orl %edx,%eax
16986 movl %eax,%cr4
16987
16988+#ifdef CONFIG_X86_PAE
ae4e228f
MT
16989 testb $X86_CR4_PAE, %al # check if PAE is enabled
16990 jz 6f
58c5fc13 16991
16454cff 16992@@ -340,6 +411,9 @@ default_entry:
58c5fc13
MT
16993 /* Make changes effective */
16994 wrmsr
16995
16996+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
58c5fc13 16997+#endif
ae4e228f 16998+
58c5fc13
MT
16999 6:
17000
17001 /*
16454cff 17002@@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
58c5fc13
MT
17003 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17004 movl %eax,%ss # after changing gdt.
17005
17006- movl $(__USER_DS),%eax # DS/ES contains default USER segment
17007+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17008 movl %eax,%ds
17009 movl %eax,%es
17010
16454cff 17011@@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
58c5fc13
MT
17012 */
17013 cmpb $0,ready
17014 jne 1f
df50ba0c 17015- movl $gdt_page,%eax
58c5fc13 17016+ movl $cpu_gdt_table,%eax
df50ba0c 17017 movl $stack_canary,%ecx
58c5fc13
MT
17018+#ifdef CONFIG_SMP
17019+ addl $__per_cpu_load,%ecx
17020+#endif
17021 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17022 shrl $16, %ecx
17023 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
bc901d79
MT
17024 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17025 1:
17026-#endif
17027 movl $(__KERNEL_STACK_CANARY),%eax
17028+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17029+ movl $(__USER_DS),%eax
17030+#else
17031+ xorl %eax,%eax
17032+#endif
17033 movl %eax,%gs
17034
17035 xorl %eax,%eax # Clear LDT
16454cff 17036@@ -558,22 +639,22 @@ early_page_fault:
58c5fc13
MT
17037 jmp early_fault
17038
17039 early_fault:
17040- cld
17041 #ifdef CONFIG_PRINTK
17042+ cmpl $1,%ss:early_recursion_flag
17043+ je hlt_loop
17044+ incl %ss:early_recursion_flag
17045+ cld
17046 pusha
17047 movl $(__KERNEL_DS),%eax
17048 movl %eax,%ds
17049 movl %eax,%es
17050- cmpl $2,early_recursion_flag
17051- je hlt_loop
17052- incl early_recursion_flag
17053 movl %cr2,%eax
17054 pushl %eax
17055 pushl %edx /* trapno */
17056 pushl $fault_msg
17057 call printk
17058+; call dump_stack
17059 #endif
17060- call dump_stack
17061 hlt_loop:
17062 hlt
17063 jmp hlt_loop
16454cff 17064@@ -581,8 +662,11 @@ hlt_loop:
58c5fc13
MT
17065 /* This is the default interrupt "handler" :-) */
17066 ALIGN
17067 ignore_int:
17068- cld
17069 #ifdef CONFIG_PRINTK
17070+ cmpl $2,%ss:early_recursion_flag
17071+ je hlt_loop
17072+ incl %ss:early_recursion_flag
17073+ cld
17074 pushl %eax
17075 pushl %ecx
17076 pushl %edx
16454cff 17077@@ -591,9 +675,6 @@ ignore_int:
58c5fc13
MT
17078 movl $(__KERNEL_DS),%eax
17079 movl %eax,%ds
17080 movl %eax,%es
17081- cmpl $2,early_recursion_flag
17082- je hlt_loop
17083- incl early_recursion_flag
17084 pushl 16(%esp)
17085 pushl 24(%esp)
17086 pushl 32(%esp)
16454cff 17087@@ -622,29 +703,43 @@ ENTRY(initial_code)
58c5fc13
MT
17088 /*
17089 * BSS section
17090 */
ae4e228f 17091-__PAGE_ALIGNED_BSS
66a7e928 17092- .align PAGE_SIZE
58c5fc13 17093 #ifdef CONFIG_X86_PAE
bc901d79 17094+.section .initial_pg_pmd,"a",@progbits
16454cff 17095 initial_pg_pmd:
58c5fc13
MT
17096 .fill 1024*KPMDS,4,0
17097 #else
c52201e0 17098+.section .initial_page_table,"a",@progbits
bc901d79 17099 ENTRY(initial_page_table)
58c5fc13
MT
17100 .fill 1024,4,0
17101 #endif
bc901d79 17102+.section .initial_pg_fixmap,"a",@progbits
16454cff 17103 initial_pg_fixmap:
58c5fc13 17104 .fill 1024,4,0
bc901d79
MT
17105+.section .empty_zero_page,"a",@progbits
17106 ENTRY(empty_zero_page)
17107 .fill 4096,1,0
17108+.section .swapper_pg_dir,"a",@progbits
17109 ENTRY(swapper_pg_dir)
6892158b
MT
17110+#ifdef CONFIG_X86_PAE
17111+ .fill 4,8,0
17112+#else
17113 .fill 1024,4,0
6892158b 17114+#endif
58c5fc13 17115+
bc901d79 17116+/*
58c5fc13
MT
17117+ * The IDT has to be page-aligned to simplify the Pentium
17118+ * F0 0F bug workaround.. We have a special link segment
17119+ * for this.
17120+ */
17121+.section .idt,"a",@progbits
17122+ENTRY(idt_table)
17123+ .fill 256,8,0
bc901d79
MT
17124
17125 /*
58c5fc13
MT
17126 * This starts the data section.
17127 */
17128 #ifdef CONFIG_X86_PAE
ae4e228f 17129-__PAGE_ALIGNED_DATA
58c5fc13 17130- /* Page-aligned for the benefit of paravirt? */
66a7e928 17131- .align PAGE_SIZE
bc901d79
MT
17132+.section .initial_page_table,"a",@progbits
17133 ENTRY(initial_page_table)
17134 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
58c5fc13 17135 # if KPMDS == 3
71d190be 17136@@ -663,18 +758,27 @@ ENTRY(initial_page_table)
df50ba0c
MT
17137 # error "Kernel PMDs should be 1, 2 or 3"
17138 # endif
66a7e928 17139 .align PAGE_SIZE /* needs to be page-sized too */
df50ba0c
MT
17140+
17141+#ifdef CONFIG_PAX_PER_CPU_PGD
17142+ENTRY(cpu_pgd)
17143+ .rept NR_CPUS
17144+ .fill 4,8,0
17145+ .endr
17146+#endif
17147+
17148 #endif
58c5fc13
MT
17149
17150 .data
16454cff 17151 .balign 4
58c5fc13
MT
17152 ENTRY(stack_start)
17153- .long init_thread_union+THREAD_SIZE
17154+ .long init_thread_union+THREAD_SIZE-8
58c5fc13 17155
fe2de317
MT
17156+ready: .byte 0
17157+
58c5fc13
MT
17158+.section .rodata,"a",@progbits
17159 early_recursion_flag:
17160 .long 0
17161
71d190be
MT
17162-ready: .byte 0
17163-
17164 int_msg:
17165 .asciz "Unknown interrupt or fault at: %p %p %p\n"
17166
16454cff 17167@@ -707,7 +811,7 @@ fault_msg:
58c5fc13
MT
17168 .word 0 # 32 bit align gdt_desc.address
17169 boot_gdt_descr:
17170 .word __BOOT_DS+7
17171- .long boot_gdt - __PAGE_OFFSET
17172+ .long pa(boot_gdt)
17173
17174 .word 0 # 32-bit align idt_desc.address
17175 idt_descr:
16454cff 17176@@ -718,7 +822,7 @@ idt_descr:
58c5fc13
MT
17177 .word 0 # 32 bit align gdt_desc.address
17178 ENTRY(early_gdt_descr)
17179 .word GDT_ENTRIES*8-1
df50ba0c 17180- .long gdt_page /* Overwritten for secondary CPUs */
58c5fc13
MT
17181+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17182
17183 /*
17184 * The boot_gdt must mirror the equivalent in setup.S and is
16454cff 17185@@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
58c5fc13
MT
17186 .align L1_CACHE_BYTES
17187 ENTRY(boot_gdt)
17188 .fill GDT_ENTRY_BOOT_CS,8,0
17189- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17190- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17191+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17192+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17193+
17194+ .align PAGE_SIZE_asm
17195+ENTRY(cpu_gdt_table)
17196+ .rept NR_CPUS
17197+ .quad 0x0000000000000000 /* NULL descriptor */
17198+ .quad 0x0000000000000000 /* 0x0b reserved */
17199+ .quad 0x0000000000000000 /* 0x13 reserved */
17200+ .quad 0x0000000000000000 /* 0x1b reserved */
ae4e228f
MT
17201+
17202+#ifdef CONFIG_PAX_KERNEXEC
17203+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17204+#else
58c5fc13 17205+ .quad 0x0000000000000000 /* 0x20 unused */
ae4e228f
MT
17206+#endif
17207+
58c5fc13
MT
17208+ .quad 0x0000000000000000 /* 0x28 unused */
17209+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17210+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17211+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17212+ .quad 0x0000000000000000 /* 0x4b reserved */
17213+ .quad 0x0000000000000000 /* 0x53 reserved */
17214+ .quad 0x0000000000000000 /* 0x5b reserved */
17215+
17216+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17217+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17218+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17219+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17220+
17221+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17222+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17223+
17224+ /*
17225+ * Segments used for calling PnP BIOS have byte granularity.
17226+ * The code segments and data segments have fixed 64k limits,
17227+ * the transfer segment sizes are set at run time.
17228+ */
17229+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17230+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17231+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17232+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17233+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17234+
17235+ /*
17236+ * The APM segments have byte granularity and their bases
17237+ * are set at run time. All have 64k limits.
17238+ */
17239+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17240+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17241+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17242+
17243+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17244+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
15a11c5b 17245+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
58c5fc13
MT
17246+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17247+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17248+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17249+
17250+ /* Be sure this is zeroed to avoid false validations in Xen */
17251+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17252+ .endr
fe2de317 17253diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
5e856224 17254index 40f4eb3..6d24d9d 100644
fe2de317
MT
17255--- a/arch/x86/kernel/head_64.S
17256+++ b/arch/x86/kernel/head_64.S
17257@@ -19,6 +19,8 @@
ae4e228f
MT
17258 #include <asm/cache.h>
17259 #include <asm/processor-flags.h>
17260 #include <asm/percpu.h>
17261+#include <asm/cpufeature.h>
fe2de317 17262+#include <asm/alternative-asm.h>
ae4e228f
MT
17263
17264 #ifdef CONFIG_PARAVIRT
17265 #include <asm/asm-offsets.h>
fe2de317 17266@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
58c5fc13
MT
17267 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17268 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17269 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17270+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17271+L3_VMALLOC_START = pud_index(VMALLOC_START)
fe2de317
MT
17272+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17273+L3_VMALLOC_END = pud_index(VMALLOC_END)
58c5fc13
MT
17274+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17275+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17276
17277 .text
ae4e228f 17278 __HEAD
fe2de317 17279@@ -85,35 +93,23 @@ startup_64:
58c5fc13
MT
17280 */
17281 addq %rbp, init_level4_pgt + 0(%rip)
17282 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17283+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
fe2de317 17284+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
58c5fc13
MT
17285+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17286 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17287
17288 addq %rbp, level3_ident_pgt + 0(%rip)
ae4e228f 17289+#ifndef CONFIG_XEN
58c5fc13 17290+ addq %rbp, level3_ident_pgt + 8(%rip)
ae4e228f 17291+#endif
58c5fc13
MT
17292
17293- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17294- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17295+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
fe2de317 17296+
58c5fc13
MT
17297+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17298+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17299
fe2de317
MT
17300 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17301-
58c5fc13
MT
17302- /* Add an Identity mapping if I am above 1G */
17303- leaq _text(%rip), %rdi
17304- andq $PMD_PAGE_MASK, %rdi
17305-
17306- movq %rdi, %rax
17307- shrq $PUD_SHIFT, %rax
17308- andq $(PTRS_PER_PUD - 1), %rax
17309- jz ident_complete
17310-
17311- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17312- leaq level3_ident_pgt(%rip), %rbx
17313- movq %rdx, 0(%rbx, %rax, 8)
17314-
17315- movq %rdi, %rax
17316- shrq $PMD_SHIFT, %rax
17317- andq $(PTRS_PER_PMD - 1), %rax
17318- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17319- leaq level2_spare_pgt(%rip), %rbx
17320- movq %rdx, 0(%rbx, %rax, 8)
17321-ident_complete:
58c5fc13
MT
17322+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17323
17324 /*
17325 * Fixup the kernel text+data virtual addresses. Note that
fe2de317 17326@@ -160,8 +156,8 @@ ENTRY(secondary_startup_64)
df50ba0c
MT
17327 * after the boot processor executes this code.
17328 */
17329
17330- /* Enable PAE mode and PGE */
17331- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17332+ /* Enable PAE mode and PSE/PGE */
17333+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17334 movq %rax, %cr4
17335
17336 /* Setup early boot stage 4 level pagetables. */
fe2de317 17337@@ -183,9 +179,17 @@ ENTRY(secondary_startup_64)
ae4e228f
MT
17338 movl $MSR_EFER, %ecx
17339 rdmsr
17340 btsl $_EFER_SCE, %eax /* Enable System Call */
17341- btl $20,%edi /* No Execute supported? */
17342+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
58c5fc13
MT
17343 jnc 1f
17344 btsl $_EFER_NX, %eax
17345+ leaq init_level4_pgt(%rip), %rdi
fe2de317 17346+#ifndef CONFIG_EFI
58c5fc13 17347+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
fe2de317 17348+#endif
58c5fc13 17349+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
fe2de317 17350+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
58c5fc13 17351+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
ae4e228f 17352+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
58c5fc13
MT
17353 1: wrmsr /* Make changes effective */
17354
17355 /* Setup cr0 */
fe2de317
MT
17356@@ -247,6 +251,7 @@ ENTRY(secondary_startup_64)
17357 * jump. In addition we need to ensure %cs is set so we make this
17358 * a far return.
17359 */
17360+ pax_set_fptr_mask
17361 movq initial_code(%rip),%rax
17362 pushq $0 # fake return address to stop unwinder
17363 pushq $__KERNEL_CS # set correct cs
17364@@ -269,7 +274,7 @@ ENTRY(secondary_startup_64)
58c5fc13
MT
17365 bad_address:
17366 jmp bad_address
17367
17368- .section ".init.text","ax"
17369+ __INIT
17370 #ifdef CONFIG_EARLY_PRINTK
17371 .globl early_idt_handlers
17372 early_idt_handlers:
fe2de317 17373@@ -314,18 +319,23 @@ ENTRY(early_idt_handler)
58c5fc13
MT
17374 #endif /* EARLY_PRINTK */
17375 1: hlt
17376 jmp 1b
17377+ .previous
17378
17379 #ifdef CONFIG_EARLY_PRINTK
17380+ __INITDATA
17381 early_recursion_flag:
17382 .long 0
17383+ .previous
17384
17385+ .section .rodata,"a",@progbits
17386 early_idt_msg:
17387 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17388 early_idt_ripmsg:
17389 .asciz "RIP %s\n"
fe2de317
MT
17390+ .previous
17391 #endif /* CONFIG_EARLY_PRINTK */
17392- .previous
58c5fc13
MT
17393
17394+ .section .rodata,"a",@progbits
17395 #define NEXT_PAGE(name) \
17396 .balign PAGE_SIZE; \
17397 ENTRY(name)
fe2de317 17398@@ -338,7 +348,6 @@ ENTRY(name)
bc901d79
MT
17399 i = i + 1 ; \
17400 .endr
17401
17402- .data
17403 /*
17404 * This default setting generates an ident mapping at address 0x100000
17405 * and a mapping for the kernel that precisely maps virtual address
fe2de317 17406@@ -349,13 +358,41 @@ NEXT_PAGE(init_level4_pgt)
58c5fc13
MT
17407 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17408 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17409 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17410+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
fe2de317
MT
17411+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17412+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17413+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
58c5fc13
MT
17414+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17415+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17416 .org init_level4_pgt + L4_START_KERNEL*8, 0
17417 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17418 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17419
df50ba0c
MT
17420+#ifdef CONFIG_PAX_PER_CPU_PGD
17421+NEXT_PAGE(cpu_pgd)
17422+ .rept NR_CPUS
17423+ .fill 512,8,0
17424+ .endr
17425+#endif
17426+
58c5fc13
MT
17427 NEXT_PAGE(level3_ident_pgt)
17428 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17429+#ifdef CONFIG_XEN
17430 .fill 511,8,0
17431+#else
17432+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
ae4e228f 17433+ .fill 510,8,0
58c5fc13
MT
17434+#endif
17435+
fe2de317
MT
17436+NEXT_PAGE(level3_vmalloc_start_pgt)
17437+ .fill 512,8,0
17438+
17439+NEXT_PAGE(level3_vmalloc_end_pgt)
58c5fc13
MT
17440+ .fill 512,8,0
17441+
17442+NEXT_PAGE(level3_vmemmap_pgt)
17443+ .fill L3_VMEMMAP_START,8,0
17444+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17445
17446 NEXT_PAGE(level3_kernel_pgt)
17447 .fill L3_START_KERNEL,8,0
fe2de317 17448@@ -363,20 +400,23 @@ NEXT_PAGE(level3_kernel_pgt)
58c5fc13
MT
17449 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17450 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17451
17452+NEXT_PAGE(level2_vmemmap_pgt)
17453+ .fill 512,8,0
17454+
17455 NEXT_PAGE(level2_fixmap_pgt)
17456- .fill 506,8,0
17457- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17458- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17459- .fill 5,8,0
17460+ .fill 507,8,0
17461+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17462+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17463+ .fill 4,8,0
17464
17465-NEXT_PAGE(level1_fixmap_pgt)
17466+NEXT_PAGE(level1_vsyscall_pgt)
17467 .fill 512,8,0
17468
17469-NEXT_PAGE(level2_ident_pgt)
17470- /* Since I easily can, map the first 1G.
ae4e228f 17471+ /* Since I easily can, map the first 2G.
58c5fc13
MT
17472 * Don't set NX because code runs from these pages.
17473 */
17474- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17475+NEXT_PAGE(level2_ident_pgt)
ae4e228f 17476+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
58c5fc13
MT
17477
17478 NEXT_PAGE(level2_kernel_pgt)
17479 /*
5e856224 17480@@ -389,37 +429,59 @@ NEXT_PAGE(level2_kernel_pgt)
58c5fc13
MT
17481 * If you want to increase this then increase MODULES_VADDR
17482 * too.)
17483 */
17484- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17485- KERNEL_IMAGE_SIZE/PMD_SIZE)
17486-
17487-NEXT_PAGE(level2_spare_pgt)
17488- .fill 512, 8, 0
17489+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17490
17491 #undef PMDS
17492 #undef NEXT_PAGE
17493
17494- .data
17495+ .align PAGE_SIZE
17496+ENTRY(cpu_gdt_table)
17497+ .rept NR_CPUS
17498+ .quad 0x0000000000000000 /* NULL descriptor */
17499+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17500+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17501+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17502+ .quad 0x00cffb000000ffff /* __USER32_CS */
17503+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17504+ .quad 0x00affb000000ffff /* __USER_CS */
ae4e228f
MT
17505+
17506+#ifdef CONFIG_PAX_KERNEXEC
17507+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17508+#else
58c5fc13 17509+ .quad 0x0 /* unused */
ae4e228f
MT
17510+#endif
17511+
58c5fc13
MT
17512+ .quad 0,0 /* TSS */
17513+ .quad 0,0 /* LDT */
17514+ .quad 0,0,0 /* three TLS descriptors */
17515+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17516+ /* asm/segment.h:GDT_ENTRIES must match this */
17517+
17518+ /* zero the remaining page */
17519+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17520+ .endr
17521+
17522 .align 16
17523 .globl early_gdt_descr
17524 early_gdt_descr:
17525 .word GDT_ENTRIES*8-1
17526 early_gdt_descr_base:
17527- .quad INIT_PER_CPU_VAR(gdt_page)
17528+ .quad cpu_gdt_table
17529
17530 ENTRY(phys_base)
17531 /* This must match the first entry in level2_kernel_pgt */
17532 .quad 0x0000000000000000
17533
17534 #include "../../x86/xen/xen-head.S"
17535-
17536- .section .bss, "aw", @nobits
17537+
17538+ .section .rodata,"a",@progbits
17539 .align L1_CACHE_BYTES
17540 ENTRY(idt_table)
17541- .skip IDT_ENTRIES * 16
5e856224
MT
17542+ .fill 512,8,0
17543
17544 .align L1_CACHE_BYTES
17545 ENTRY(nmi_idt_table)
17546- .skip IDT_ENTRIES * 16
58c5fc13
MT
17547+ .fill 512,8,0
17548
ae4e228f 17549 __PAGE_ALIGNED_BSS
58c5fc13 17550 .align PAGE_SIZE
fe2de317
MT
17551diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17552index 9c3bd4a..e1d9b35 100644
17553--- a/arch/x86/kernel/i386_ksyms_32.c
17554+++ b/arch/x86/kernel/i386_ksyms_32.c
ae4e228f
MT
17555@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17556 EXPORT_SYMBOL(cmpxchg8b_emu);
58c5fc13
MT
17557 #endif
17558
17559+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17560+
17561 /* Networking helper routines. */
17562 EXPORT_SYMBOL(csum_partial_copy_generic);
17563+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17564+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17565
17566 EXPORT_SYMBOL(__get_user_1);
17567 EXPORT_SYMBOL(__get_user_2);
ae4e228f 17568@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
58c5fc13
MT
17569
17570 EXPORT_SYMBOL(csum_partial);
17571 EXPORT_SYMBOL(empty_zero_page);
17572+
17573+#ifdef CONFIG_PAX_KERNEXEC
17574+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17575+#endif
4c928ab7 17576diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
c6e2a6c8 17577index 2d6e649..df6e1af 100644
4c928ab7
MT
17578--- a/arch/x86/kernel/i387.c
17579+++ b/arch/x86/kernel/i387.c
c6e2a6c8
MT
17580@@ -59,7 +59,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
17581 static inline bool interrupted_user_mode(void)
4c928ab7 17582 {
c6e2a6c8
MT
17583 struct pt_regs *regs = get_irq_regs();
17584- return regs && user_mode_vm(regs);
17585+ return regs && user_mode(regs);
4c928ab7
MT
17586 }
17587
c6e2a6c8 17588 /*
fe2de317 17589diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
c6e2a6c8 17590index 36d1853..bf25736 100644
fe2de317
MT
17591--- a/arch/x86/kernel/i8259.c
17592+++ b/arch/x86/kernel/i8259.c
c6e2a6c8 17593@@ -209,7 +209,7 @@ spurious_8259A_irq:
8308f9c9
MT
17594 "spurious 8259A interrupt: IRQ%d.\n", irq);
17595 spurious_irq_mask |= irqmask;
17596 }
17597- atomic_inc(&irq_err_count);
17598+ atomic_inc_unchecked(&irq_err_count);
17599 /*
17600 * Theoretically we do not have to handle this IRQ,
17601 * but in Linux this does not cause problems and is
fe2de317
MT
17602diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17603index 43e9ccf..44ccf6f 100644
17604--- a/arch/x86/kernel/init_task.c
17605+++ b/arch/x86/kernel/init_task.c
17606@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
71d190be
MT
17607 * way process stacks are handled. This is done by having a special
17608 * "init_task" linker map entry..
17609 */
17610-union thread_union init_thread_union __init_task_data =
17611- { INIT_THREAD_INFO(init_task) };
17612+union thread_union init_thread_union __init_task_data;
17613
17614 /*
17615 * Initial task structure.
17616@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
58c5fc13
MT
17617 * section. Since TSS's are completely CPU-local, we want them
17618 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17619 */
17620-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17621-
17622+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17623+EXPORT_SYMBOL(init_tss);
fe2de317
MT
17624diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17625index 8c96897..be66bfa 100644
17626--- a/arch/x86/kernel/ioport.c
17627+++ b/arch/x86/kernel/ioport.c
58c5fc13
MT
17628@@ -6,6 +6,7 @@
17629 #include <linux/sched.h>
17630 #include <linux/kernel.h>
17631 #include <linux/capability.h>
17632+#include <linux/security.h>
17633 #include <linux/errno.h>
17634 #include <linux/types.h>
17635 #include <linux/ioport.h>
fe2de317 17636@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
58c5fc13
MT
17637
17638 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17639 return -EINVAL;
17640+#ifdef CONFIG_GRKERNSEC_IO
df50ba0c 17641+ if (turn_on && grsec_disable_privio) {
58c5fc13
MT
17642+ gr_handle_ioperm();
17643+ return -EPERM;
17644+ }
17645+#endif
17646 if (turn_on && !capable(CAP_SYS_RAWIO))
17647 return -EPERM;
17648
fe2de317 17649@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
58c5fc13
MT
17650 * because the ->io_bitmap_max value must match the bitmap
17651 * contents:
17652 */
17653- tss = &per_cpu(init_tss, get_cpu());
17654+ tss = init_tss + get_cpu();
17655
66a7e928
MT
17656 if (turn_on)
17657 bitmap_clear(t->io_bitmap_ptr, from, num);
fe2de317 17658@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
58c5fc13
MT
17659 return -EINVAL;
17660 /* Trying to gain more privileges? */
17661 if (level > old) {
17662+#ifdef CONFIG_GRKERNSEC_IO
df50ba0c
MT
17663+ if (grsec_disable_privio) {
17664+ gr_handle_iopl();
17665+ return -EPERM;
17666+ }
17667+#endif
58c5fc13
MT
17668 if (!capable(CAP_SYS_RAWIO))
17669 return -EPERM;
58c5fc13 17670 }
fe2de317 17671diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
c6e2a6c8 17672index 3dafc60..aa8e9c4 100644
fe2de317
MT
17673--- a/arch/x86/kernel/irq.c
17674+++ b/arch/x86/kernel/irq.c
4c928ab7 17675@@ -18,7 +18,7 @@
fe2de317
MT
17676 #include <asm/mce.h>
17677 #include <asm/hw_irq.h>
17678
17679-atomic_t irq_err_count;
17680+atomic_unchecked_t irq_err_count;
17681
17682 /* Function pointer for generic interrupt vector handling */
17683 void (*x86_platform_ipi_callback)(void) = NULL;
5e856224 17684@@ -121,9 +121,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
fe2de317
MT
17685 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17686 seq_printf(p, " Machine check polls\n");
17687 #endif
17688- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17689+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17690 #if defined(CONFIG_X86_IO_APIC)
17691- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17692+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17693 #endif
17694 return 0;
17695 }
5e856224 17696@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
fe2de317
MT
17697
17698 u64 arch_irq_stat(void)
17699 {
17700- u64 sum = atomic_read(&irq_err_count);
17701+ u64 sum = atomic_read_unchecked(&irq_err_count);
17702
17703 #ifdef CONFIG_X86_IO_APIC
17704- sum += atomic_read(&irq_mis_count);
17705+ sum += atomic_read_unchecked(&irq_mis_count);
17706 #endif
17707 return sum;
17708 }
17709diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
c6e2a6c8 17710index 58b7f27..e112d08 100644
fe2de317
MT
17711--- a/arch/x86/kernel/irq_32.c
17712+++ b/arch/x86/kernel/irq_32.c
5e856224 17713@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
71d190be
MT
17714 __asm__ __volatile__("andl %%esp,%0" :
17715 "=r" (sp) : "0" (THREAD_SIZE - 1));
17716
17717- return sp < (sizeof(struct thread_info) + STACK_WARN);
17718+ return sp < STACK_WARN;
17719 }
17720
17721 static void print_stack_overflow(void)
5e856224 17722@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
71d190be
MT
17723 * per-CPU IRQ handling contexts (thread information and stack)
17724 */
17725 union irq_ctx {
17726- struct thread_info tinfo;
17727- u32 stack[THREAD_SIZE/sizeof(u32)];
17728+ unsigned long previous_esp;
17729+ u32 stack[THREAD_SIZE/sizeof(u32)];
17730 } __attribute__((aligned(THREAD_SIZE)));
17731
17732 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
5e856224 17733@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
71d190be
MT
17734 static inline int
17735 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17736 {
17737- union irq_ctx *curctx, *irqctx;
17738+ union irq_ctx *irqctx;
17739 u32 *isp, arg1, arg2;
17740
17741- curctx = (union irq_ctx *) current_thread_info();
17742 irqctx = __this_cpu_read(hardirq_ctx);
17743
17744 /*
c6e2a6c8 17745@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
71d190be
MT
17746 * handler) we can't do that and just have to keep using the
17747 * current stack (which is the irq stack already after all)
17748 */
17749- if (unlikely(curctx == irqctx))
17750+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
58c5fc13
MT
17751 return 0;
17752
17753 /* build the stack frame on the IRQ stack */
17754- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
71d190be
MT
17755- irqctx->tinfo.task = curctx->tinfo.task;
17756- irqctx->tinfo.previous_esp = current_stack_pointer;
58c5fc13 17757+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
71d190be 17758+ irqctx->previous_esp = current_stack_pointer;
bc901d79 17759
c6e2a6c8
MT
17760- /* Copy the preempt_count so that the [soft]irq checks work. */
17761- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
bc901d79 17762+#ifdef CONFIG_PAX_MEMORY_UDEREF
71d190be 17763+ __set_fs(MAKE_MM_SEG(0));
bc901d79 17764+#endif
71d190be 17765
bc901d79
MT
17766 if (unlikely(overflow))
17767 call_on_stack(print_stack_overflow, isp);
c6e2a6c8 17768@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
bc901d79
MT
17769 : "0" (irq), "1" (desc), "2" (isp),
17770 "D" (desc->handle_irq)
17771 : "memory", "cc", "ecx");
17772+
17773+#ifdef CONFIG_PAX_MEMORY_UDEREF
71d190be 17774+ __set_fs(current_thread_info()->addr_limit);
bc901d79
MT
17775+#endif
17776+
17777 return 1;
17778 }
17779
c6e2a6c8 17780@@ -121,29 +125,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
71d190be
MT
17781 */
17782 void __cpuinit irq_ctx_init(int cpu)
17783 {
17784- union irq_ctx *irqctx;
17785-
17786 if (per_cpu(hardirq_ctx, cpu))
17787 return;
17788
17789- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17790- THREAD_FLAGS,
17791- THREAD_ORDER));
17792- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17793- irqctx->tinfo.cpu = cpu;
17794- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17795- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17796-
17797- per_cpu(hardirq_ctx, cpu) = irqctx;
17798-
17799- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
17800- THREAD_FLAGS,
17801- THREAD_ORDER));
17802- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
17803- irqctx->tinfo.cpu = cpu;
17804- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17805-
17806- per_cpu(softirq_ctx, cpu) = irqctx;
17807+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17808+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
17809
17810 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17811 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
c6e2a6c8 17812@@ -152,7 +138,6 @@ void __cpuinit irq_ctx_init(int cpu)
71d190be
MT
17813 asmlinkage void do_softirq(void)
17814 {
17815 unsigned long flags;
17816- struct thread_info *curctx;
17817 union irq_ctx *irqctx;
17818 u32 *isp;
17819
c6e2a6c8 17820@@ -162,15 +147,22 @@ asmlinkage void do_softirq(void)
71d190be
MT
17821 local_irq_save(flags);
17822
17823 if (local_softirq_pending()) {
17824- curctx = current_thread_info();
17825 irqctx = __this_cpu_read(softirq_ctx);
17826- irqctx->tinfo.task = curctx->task;
17827- irqctx->tinfo.previous_esp = current_stack_pointer;
17828+ irqctx->previous_esp = current_stack_pointer;
58c5fc13
MT
17829
17830 /* build the stack frame on the softirq stack */
17831- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17832+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
bc901d79
MT
17833+
17834+#ifdef CONFIG_PAX_MEMORY_UDEREF
71d190be 17835+ __set_fs(MAKE_MM_SEG(0));
bc901d79 17836+#endif
58c5fc13
MT
17837
17838 call_on_stack(__do_softirq, isp);
bc901d79
MT
17839+
17840+#ifdef CONFIG_PAX_MEMORY_UDEREF
71d190be 17841+ __set_fs(current_thread_info()->addr_limit);
bc901d79
MT
17842+#endif
17843+
58c5fc13 17844 /*
66a7e928 17845 * Shouldn't happen, we returned above if in_interrupt():
bc901d79 17846 */
c6e2a6c8
MT
17847@@ -191,7 +183,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
17848 if (unlikely(!desc))
17849 return false;
17850
17851- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17852+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
17853 if (unlikely(overflow))
17854 print_stack_overflow();
17855 desc->handle_irq(irq, desc);
4c928ab7 17856diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
5e856224 17857index d04d3ec..ea4b374 100644
4c928ab7
MT
17858--- a/arch/x86/kernel/irq_64.c
17859+++ b/arch/x86/kernel/irq_64.c
5e856224
MT
17860@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
17861 u64 estack_top, estack_bottom;
4c928ab7
MT
17862 u64 curbase = (u64)task_stack_page(current);
17863
17864- if (user_mode_vm(regs))
17865+ if (user_mode(regs))
17866 return;
17867
5e856224
MT
17868 if (regs->sp >= curbase + sizeof(struct thread_info) +
17869diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
c6e2a6c8 17870index 1d5d31e..ab846ed 100644
5e856224
MT
17871--- a/arch/x86/kernel/kdebugfs.c
17872+++ b/arch/x86/kernel/kdebugfs.c
17873@@ -28,6 +28,8 @@ struct setup_data_node {
17874 };
17875
17876 static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17877+ size_t count, loff_t *ppos) __size_overflow(3);
17878+static ssize_t setup_data_read(struct file *file, char __user *user_buf,
17879 size_t count, loff_t *ppos)
17880 {
17881 struct setup_data_node *node = file->private_data;
fe2de317 17882diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
c6e2a6c8 17883index 8bfb614..2b3b35f 100644
fe2de317
MT
17884--- a/arch/x86/kernel/kgdb.c
17885+++ b/arch/x86/kernel/kgdb.c
c6e2a6c8 17886@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
6892158b 17887 #ifdef CONFIG_X86_32
66a7e928 17888 switch (regno) {
6892158b
MT
17889 case GDB_SS:
17890- if (!user_mode_vm(regs))
17891+ if (!user_mode(regs))
17892 *(unsigned long *)mem = __KERNEL_DS;
17893 break;
17894 case GDB_SP:
17895- if (!user_mode_vm(regs))
17896+ if (!user_mode(regs))
17897 *(unsigned long *)mem = kernel_stack_pointer(regs);
17898 break;
17899 case GDB_GS:
c6e2a6c8 17900@@ -476,12 +476,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
8308f9c9
MT
17901 case 'k':
17902 /* clear the trace bit */
17903 linux_regs->flags &= ~X86_EFLAGS_TF;
17904- atomic_set(&kgdb_cpu_doing_single_step, -1);
17905+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17906
17907 /* set the trace bit if we're stepping */
17908 if (remcomInBuffer[0] == 's') {
17909 linux_regs->flags |= X86_EFLAGS_TF;
17910- atomic_set(&kgdb_cpu_doing_single_step,
17911+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17912 raw_smp_processor_id());
17913 }
17914
c6e2a6c8 17915@@ -546,7 +546,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
8308f9c9 17916
4c928ab7 17917 switch (cmd) {
8308f9c9
MT
17918 case DIE_DEBUG:
17919- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
17920+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
17921 if (user_mode(regs))
17922 return single_step_cont(regs, args);
17923 break;
c6e2a6c8
MT
17924diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
17925index c5e410e..da6aaf9 100644
17926--- a/arch/x86/kernel/kprobes-opt.c
17927+++ b/arch/x86/kernel/kprobes-opt.c
17928@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17929 * Verify if the address gap is in 2GB range, because this uses
17930 * a relative jump.
17931 */
17932- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
17933+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
17934 if (abs(rel) > 0x7fffffff)
17935 return -ERANGE;
17936
17937@@ -359,11 +359,11 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
17938 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
17939
17940 /* Set probe function call */
17941- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
17942+ synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
17943
17944 /* Set returning jmp instruction at the tail of out-of-line buffer */
17945 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
17946- (u8 *)op->kp.addr + op->optinsn.size);
17947+ (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
17948
17949 flush_icache_range((unsigned long) buf,
17950 (unsigned long) buf + TMPL_END_IDX +
17951@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
17952 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
17953
17954 /* Backup instructions which will be replaced by jump address */
17955- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
17956+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
17957 RELATIVE_ADDR_SIZE);
17958
17959 insn_buf[0] = RELATIVEJUMP_OPCODE;
fe2de317 17960diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
c6e2a6c8 17961index e213fc8..d783ba4 100644
fe2de317
MT
17962--- a/arch/x86/kernel/kprobes.c
17963+++ b/arch/x86/kernel/kprobes.c
c6e2a6c8 17964@@ -120,8 +120,11 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
df50ba0c
MT
17965 } __attribute__((packed)) *insn;
17966
bc901d79 17967 insn = (struct __arch_relative_insn *)from;
58c5fc13 17968+
ae4e228f 17969+ pax_open_kernel();
df50ba0c
MT
17970 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
17971 insn->op = op;
ae4e228f 17972+ pax_close_kernel();
58c5fc13
MT
17973 }
17974
df50ba0c 17975 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
c6e2a6c8 17976@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
bc901d79
MT
17977 kprobe_opcode_t opcode;
17978 kprobe_opcode_t *orig_opcodes = opcodes;
17979
17980- if (search_exception_tables((unsigned long)opcodes))
17981+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17982 return 0; /* Page fault may occur on this address. */
17983
17984 retry:
c6e2a6c8
MT
17985@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17986 /* Another subsystem puts a breakpoint, failed to recover */
17987 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
17988 return 0;
ae4e228f 17989+ pax_open_kernel();
df50ba0c 17990 memcpy(dest, insn.kaddr, insn.length);
ae4e228f 17991+ pax_close_kernel();
58c5fc13 17992
df50ba0c
MT
17993 #ifdef CONFIG_X86_64
17994 if (insn_rip_relative(&insn)) {
c6e2a6c8
MT
17995@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
17996 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
df50ba0c
MT
17997 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
17998 disp = (u8 *) dest + insn_offset_displacement(&insn);
17999+ pax_open_kernel();
18000 *(s32 *) disp = (s32) newdisp;
18001+ pax_close_kernel();
18002 }
18003 #endif
18004 return insn.length;
c6e2a6c8 18005@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
df50ba0c
MT
18006 * nor set current_kprobe, because it doesn't use single
18007 * stepping.
18008 */
18009- regs->ip = (unsigned long)p->ainsn.insn;
18010+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18011 preempt_enable_no_resched();
18012 return;
18013 }
c6e2a6c8 18014@@ -504,7 +511,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
58c5fc13
MT
18015 if (p->opcode == BREAKPOINT_INSTRUCTION)
18016 regs->ip = (unsigned long)p->addr;
18017 else
18018- regs->ip = (unsigned long)p->ainsn.insn;
18019+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
18020 }
18021
df50ba0c 18022 /*
c6e2a6c8 18023@@ -583,7 +590,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
df50ba0c
MT
18024 setup_singlestep(p, regs, kcb, 0);
18025 return 1;
18026 }
18027- } else if (*addr != BREAKPOINT_INSTRUCTION) {
18028+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
58c5fc13
MT
18029 /*
18030 * The breakpoint instruction was removed right
18031 * after we hit it. Another cpu has removed
c6e2a6c8 18032@@ -628,6 +635,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
6e9df6a3
MT
18033 " movq %rax, 152(%rsp)\n"
18034 RESTORE_REGS_STRING
18035 " popfq\n"
4c928ab7 18036+#ifdef KERNEXEC_PLUGIN
6e9df6a3
MT
18037+ " btsq $63,(%rsp)\n"
18038+#endif
18039 #else
18040 " pushf\n"
18041 SAVE_REGS_STRING
c6e2a6c8
MT
18042@@ -765,7 +775,7 @@ static void __kprobes
18043 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
58c5fc13
MT
18044 {
18045 unsigned long *tos = stack_addr(regs);
18046- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18047+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18048 unsigned long orig_ip = (unsigned long)p->addr;
18049 kprobe_opcode_t *insn = p->ainsn.insn;
18050
c6e2a6c8 18051@@ -947,7 +957,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
58c5fc13
MT
18052 struct die_args *args = data;
18053 int ret = NOTIFY_DONE;
18054
18055- if (args->regs && user_mode_vm(args->regs))
18056+ if (args->regs && user_mode(args->regs))
18057 return ret;
18058
18059 switch (val) {
fe2de317 18060diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
c6e2a6c8 18061index ebc9873..1b9724b 100644
fe2de317
MT
18062--- a/arch/x86/kernel/ldt.c
18063+++ b/arch/x86/kernel/ldt.c
c6e2a6c8 18064@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
58c5fc13
MT
18065 if (reload) {
18066 #ifdef CONFIG_SMP
18067 preempt_disable();
18068- load_LDT(pc);
18069+ load_LDT_nolock(pc);
ae4e228f
MT
18070 if (!cpumask_equal(mm_cpumask(current->mm),
18071 cpumask_of(smp_processor_id())))
58c5fc13
MT
18072 smp_call_function(flush_ldt, current->mm, 1);
18073 preempt_enable();
18074 #else
18075- load_LDT(pc);
18076+ load_LDT_nolock(pc);
18077 #endif
18078 }
18079 if (oldsize) {
c6e2a6c8 18080@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
58c5fc13
MT
18081 return err;
18082
18083 for (i = 0; i < old->size; i++)
18084- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18085+ write_ldt_entry(new->ldt, i, old->ldt + i);
18086 return 0;
18087 }
18088
c6e2a6c8 18089@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
58c5fc13
MT
18090 retval = copy_ldt(&mm->context, &old_mm->context);
18091 mutex_unlock(&old_mm->context.lock);
18092 }
18093+
18094+ if (tsk == current) {
6892158b 18095+ mm->context.vdso = 0;
58c5fc13
MT
18096+
18097+#ifdef CONFIG_X86_32
18098+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18099+ mm->context.user_cs_base = 0UL;
18100+ mm->context.user_cs_limit = ~0UL;
18101+
18102+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18103+ cpus_clear(mm->context.cpu_user_cs_mask);
18104+#endif
18105+
18106+#endif
18107+#endif
18108+
18109+ }
18110+
18111 return retval;
18112 }
18113
c6e2a6c8 18114@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
58c5fc13
MT
18115 }
18116 }
18117
18118+#ifdef CONFIG_PAX_SEGMEXEC
18119+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18120+ error = -EINVAL;
18121+ goto out_unlock;
18122+ }
18123+#endif
18124+
18125 fill_ldt(&ldt, &ldt_info);
18126 if (oldmode)
18127 ldt.avl = 0;
fe2de317 18128diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
c6e2a6c8 18129index 5b19e4d..6476a76 100644
fe2de317
MT
18130--- a/arch/x86/kernel/machine_kexec_32.c
18131+++ b/arch/x86/kernel/machine_kexec_32.c
c6e2a6c8 18132@@ -26,7 +26,7 @@
58c5fc13 18133 #include <asm/cacheflush.h>
ae4e228f 18134 #include <asm/debugreg.h>
58c5fc13
MT
18135
18136-static void set_idt(void *newidt, __u16 limit)
18137+static void set_idt(struct desc_struct *newidt, __u16 limit)
18138 {
18139 struct desc_ptr curidt;
18140
c6e2a6c8 18141@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
58c5fc13
MT
18142 }
18143
18144
18145-static void set_gdt(void *newgdt, __u16 limit)
18146+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18147 {
18148 struct desc_ptr curgdt;
18149
c6e2a6c8 18150@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
58c5fc13
MT
18151 }
18152
18153 control_page = page_address(image->control_code_page);
18154- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18155+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18156
18157 relocate_kernel_ptr = control_page;
18158 page_list[PA_CONTROL_PAGE] = __pa(control_page);
fe2de317 18159diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
c6e2a6c8 18160index 0327e2b..e43737b 100644
fe2de317
MT
18161--- a/arch/x86/kernel/microcode_intel.c
18162+++ b/arch/x86/kernel/microcode_intel.c
c6e2a6c8 18163@@ -430,13 +430,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
ae4e228f
MT
18164
18165 static int get_ucode_user(void *to, const void *from, size_t n)
18166 {
18167- return copy_from_user(to, from, n);
6e9df6a3 18168+ return copy_from_user(to, (const void __force_user *)from, n);
ae4e228f
MT
18169 }
18170
18171 static enum ucode_state
18172 request_microcode_user(int cpu, const void __user *buf, size_t size)
18173 {
18174- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
6e9df6a3 18175+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
ae4e228f
MT
18176 }
18177
18178 static void microcode_fini_cpu(int cpu)
fe2de317 18179diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
c6e2a6c8 18180index f21fd94..61565cd 100644
fe2de317
MT
18181--- a/arch/x86/kernel/module.c
18182+++ b/arch/x86/kernel/module.c
c6e2a6c8 18183@@ -35,15 +35,60 @@
58c5fc13
MT
18184 #define DEBUGP(fmt...)
18185 #endif
18186
18187-void *module_alloc(unsigned long size)
16454cff 18188+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
58c5fc13 18189 {
4c928ab7
MT
18190- if (PAGE_ALIGN(size) > MODULES_LEN)
18191+ if (size == 0 || PAGE_ALIGN(size) > MODULES_LEN)
58c5fc13 18192 return NULL;
16454cff
MT
18193 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
18194- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
18195+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
18196 -1, __builtin_return_address(0));
18197 }
58c5fc13 18198
58c5fc13
MT
18199+void *module_alloc(unsigned long size)
18200+{
ae4e228f
MT
18201+
18202+#ifdef CONFIG_PAX_KERNEXEC
58c5fc13 18203+ return __module_alloc(size, PAGE_KERNEL);
ae4e228f
MT
18204+#else
18205+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18206+#endif
18207+
16454cff
MT
18208+}
18209+
ae4e228f
MT
18210+#ifdef CONFIG_PAX_KERNEXEC
18211+#ifdef CONFIG_X86_32
58c5fc13
MT
18212+void *module_alloc_exec(unsigned long size)
18213+{
18214+ struct vm_struct *area;
18215+
18216+ if (size == 0)
18217+ return NULL;
18218+
18219+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
ae4e228f 18220+ return area ? area->addr : NULL;
58c5fc13
MT
18221+}
18222+EXPORT_SYMBOL(module_alloc_exec);
18223+
18224+void module_free_exec(struct module *mod, void *module_region)
18225+{
ae4e228f 18226+ vunmap(module_region);
58c5fc13
MT
18227+}
18228+EXPORT_SYMBOL(module_free_exec);
18229+#else
58c5fc13
MT
18230+void module_free_exec(struct module *mod, void *module_region)
18231+{
18232+ module_free(mod, module_region);
18233+}
18234+EXPORT_SYMBOL(module_free_exec);
18235+
18236+void *module_alloc_exec(unsigned long size)
18237+{
18238+ return __module_alloc(size, PAGE_KERNEL_RX);
18239+}
18240+EXPORT_SYMBOL(module_alloc_exec);
18241+#endif
58c5fc13 18242+#endif
ae4e228f 18243+
6e9df6a3
MT
18244 #ifdef CONFIG_X86_32
18245 int apply_relocate(Elf32_Shdr *sechdrs,
18246 const char *strtab,
c6e2a6c8 18247@@ -54,14 +99,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
58c5fc13
MT
18248 unsigned int i;
18249 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18250 Elf32_Sym *sym;
18251- uint32_t *location;
18252+ uint32_t *plocation, location;
58c5fc13
MT
18253
18254 DEBUGP("Applying relocate section %u to %u\n", relsec,
18255 sechdrs[relsec].sh_info);
18256 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18257 /* This is where to make the change */
18258- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18259- + rel[i].r_offset;
18260+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18261+ location = (uint32_t)plocation;
18262+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18263+ plocation = ktla_ktva((void *)plocation);
18264 /* This is the symbol it is referring to. Note that all
18265 undefined symbols have been resolved. */
18266 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
c6e2a6c8 18267@@ -70,11 +117,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
58c5fc13
MT
18268 switch (ELF32_R_TYPE(rel[i].r_info)) {
18269 case R_386_32:
18270 /* We add the value into the location given */
18271- *location += sym->st_value;
ae4e228f 18272+ pax_open_kernel();
58c5fc13 18273+ *plocation += sym->st_value;
ae4e228f 18274+ pax_close_kernel();
58c5fc13
MT
18275 break;
18276 case R_386_PC32:
18277 /* Add the value, subtract its postition */
18278- *location += sym->st_value - (uint32_t)location;
ae4e228f 18279+ pax_open_kernel();
58c5fc13 18280+ *plocation += sym->st_value - location;
ae4e228f 18281+ pax_close_kernel();
58c5fc13
MT
18282 break;
18283 default:
18284 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
c6e2a6c8 18285@@ -119,21 +170,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
58c5fc13
MT
18286 case R_X86_64_NONE:
18287 break;
18288 case R_X86_64_64:
ae4e228f 18289+ pax_open_kernel();
58c5fc13 18290 *(u64 *)loc = val;
ae4e228f 18291+ pax_close_kernel();
58c5fc13
MT
18292 break;
18293 case R_X86_64_32:
ae4e228f 18294+ pax_open_kernel();
58c5fc13 18295 *(u32 *)loc = val;
ae4e228f 18296+ pax_close_kernel();
58c5fc13
MT
18297 if (val != *(u32 *)loc)
18298 goto overflow;
18299 break;
18300 case R_X86_64_32S:
ae4e228f 18301+ pax_open_kernel();
58c5fc13 18302 *(s32 *)loc = val;
ae4e228f 18303+ pax_close_kernel();
58c5fc13
MT
18304 if ((s64)val != *(s32 *)loc)
18305 goto overflow;
18306 break;
18307 case R_X86_64_PC32:
18308 val -= (u64)loc;
ae4e228f 18309+ pax_open_kernel();
58c5fc13 18310 *(u32 *)loc = val;
ae4e228f 18311+ pax_close_kernel();
58c5fc13
MT
18312+
18313 #if 0
18314 if ((s64)val != *(s32 *)loc)
18315 goto overflow;
4c928ab7 18316diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
c6e2a6c8 18317index 32856fa..ce95eaa 100644
4c928ab7
MT
18318--- a/arch/x86/kernel/nmi.c
18319+++ b/arch/x86/kernel/nmi.c
c6e2a6c8 18320@@ -507,6 +507,17 @@ static inline void nmi_nesting_postprocess(void)
4c928ab7
MT
18321 dotraplinkage notrace __kprobes void
18322 do_nmi(struct pt_regs *regs, long error_code)
18323 {
18324+
18325+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18326+ if (!user_mode(regs)) {
18327+ unsigned long cs = regs->cs & 0xFFFF;
18328+ unsigned long ip = ktva_ktla(regs->ip);
18329+
18330+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
18331+ regs->ip = ip;
18332+ }
18333+#endif
18334+
5e856224 18335 nmi_nesting_preprocess(regs);
4c928ab7 18336
5e856224 18337 nmi_enter();
fe2de317
MT
18338diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18339index 676b8c7..870ba04 100644
18340--- a/arch/x86/kernel/paravirt-spinlocks.c
18341+++ b/arch/x86/kernel/paravirt-spinlocks.c
18342@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
18343 arch_spin_lock(lock);
18344 }
18345
18346-struct pv_lock_ops pv_lock_ops = {
18347+struct pv_lock_ops pv_lock_ops __read_only = {
18348 #ifdef CONFIG_SMP
18349 .spin_is_locked = __ticket_spin_is_locked,
18350 .spin_is_contended = __ticket_spin_is_contended,
18351diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
c6e2a6c8 18352index ab13760..01218e0 100644
fe2de317
MT
18353--- a/arch/x86/kernel/paravirt.c
18354+++ b/arch/x86/kernel/paravirt.c
c6e2a6c8 18355@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
15a11c5b
MT
18356 {
18357 return x;
18358 }
18359+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18360+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18361+#endif
18362
18363 void __init default_banner(void)
18364 {
c6e2a6c8 18365@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
58c5fc13 18366 if (opfunc == NULL)
df50ba0c
MT
18367 /* If there's no function, patch it with a ud2a (BUG) */
18368 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18369- else if (opfunc == _paravirt_nop)
18370+ else if (opfunc == (void *)_paravirt_nop)
18371 /* If the operation is a nop, then nop the callsite */
18372 ret = paravirt_patch_nop();
18373
18374 /* identity functions just return their single argument */
18375- else if (opfunc == _paravirt_ident_32)
18376+ else if (opfunc == (void *)_paravirt_ident_32)
18377 ret = paravirt_patch_ident_32(insnbuf, len);
18378- else if (opfunc == _paravirt_ident_64)
18379+ else if (opfunc == (void *)_paravirt_ident_64)
18380 ret = paravirt_patch_ident_64(insnbuf, len);
15a11c5b
MT
18381+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18382+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18383+ ret = paravirt_patch_ident_64(insnbuf, len);
18384+#endif
df50ba0c
MT
18385
18386 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15a11c5b 18387 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
c6e2a6c8 18388@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
58c5fc13
MT
18389 if (insn_len > len || start == NULL)
18390 insn_len = len;
18391 else
18392- memcpy(insnbuf, start, insn_len);
18393+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18394
18395 return insn_len;
18396 }
c6e2a6c8 18397@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
58c5fc13
MT
18398 preempt_enable();
18399 }
18400
18401-struct pv_info pv_info = {
18402+struct pv_info pv_info __read_only = {
18403 .name = "bare hardware",
18404 .paravirt_enabled = 0,
18405 .kernel_rpl = 0,
c6e2a6c8 18406@@ -315,16 +322,16 @@ struct pv_info pv_info = {
6e9df6a3 18407 #endif
58c5fc13
MT
18408 };
18409
18410-struct pv_init_ops pv_init_ops = {
18411+struct pv_init_ops pv_init_ops __read_only = {
18412 .patch = native_patch,
58c5fc13
MT
18413 };
18414
18415-struct pv_time_ops pv_time_ops = {
18416+struct pv_time_ops pv_time_ops __read_only = {
ae4e228f 18417 .sched_clock = native_sched_clock,
6e9df6a3 18418 .steal_clock = native_steal_clock,
58c5fc13
MT
18419 };
18420
18421-struct pv_irq_ops pv_irq_ops = {
18422+struct pv_irq_ops pv_irq_ops __read_only = {
58c5fc13
MT
18423 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18424 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
ae4e228f 18425 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
c6e2a6c8 18426@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
58c5fc13
MT
18427 #endif
18428 };
18429
18430-struct pv_cpu_ops pv_cpu_ops = {
18431+struct pv_cpu_ops pv_cpu_ops __read_only = {
18432 .cpuid = native_cpuid,
18433 .get_debugreg = native_get_debugreg,
18434 .set_debugreg = native_set_debugreg,
c6e2a6c8 18435@@ -397,21 +404,26 @@ struct pv_cpu_ops pv_cpu_ops = {
58c5fc13
MT
18436 .end_context_switch = paravirt_nop,
18437 };
18438
18439-struct pv_apic_ops pv_apic_ops = {
18440+struct pv_apic_ops pv_apic_ops __read_only = {
18441 #ifdef CONFIG_X86_LOCAL_APIC
ae4e228f
MT
18442 .startup_ipi_hook = paravirt_nop,
18443 #endif
15a11c5b
MT
18444 };
18445
18446-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18447+#ifdef CONFIG_X86_32
18448+#ifdef CONFIG_X86_PAE
18449+/* 64-bit pagetable entries */
18450+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18451+#else
18452 /* 32-bit pagetable entries */
18453 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18454+#endif
18455 #else
18456 /* 64-bit pagetable entries */
58c5fc13
MT
18457 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18458 #endif
18459
18460-struct pv_mmu_ops pv_mmu_ops = {
18461+struct pv_mmu_ops pv_mmu_ops __read_only = {
ae4e228f
MT
18462
18463 .read_cr2 = native_read_cr2,
18464 .write_cr2 = native_write_cr2,
c6e2a6c8 18465@@ -461,6 +473,7 @@ struct pv_mmu_ops pv_mmu_ops = {
15a11c5b
MT
18466 .make_pud = PTE_IDENT,
18467
18468 .set_pgd = native_set_pgd,
18469+ .set_pgd_batched = native_set_pgd_batched,
18470 #endif
18471 #endif /* PAGETABLE_LEVELS >= 3 */
18472
c6e2a6c8 18473@@ -480,6 +493,12 @@ struct pv_mmu_ops pv_mmu_ops = {
ae4e228f
MT
18474 },
18475
18476 .set_fixmap = native_set_fixmap,
18477+
18478+#ifdef CONFIG_PAX_KERNEXEC
18479+ .pax_open_kernel = native_pax_open_kernel,
18480+ .pax_close_kernel = native_pax_close_kernel,
18481+#endif
18482+
18483 };
18484
18485 EXPORT_SYMBOL_GPL(pv_time_ops);
fe2de317 18486diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
4c928ab7 18487index 35ccf75..7a15747 100644
fe2de317
MT
18488--- a/arch/x86/kernel/pci-iommu_table.c
18489+++ b/arch/x86/kernel/pci-iommu_table.c
66a7e928
MT
18490@@ -2,7 +2,7 @@
18491 #include <asm/iommu_table.h>
18492 #include <linux/string.h>
18493 #include <linux/kallsyms.h>
18494-
18495+#include <linux/sched.h>
18496
18497 #define DEBUG 1
18498
fe2de317 18499diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
c6e2a6c8 18500index 1d92a5a..7bc8c29 100644
fe2de317
MT
18501--- a/arch/x86/kernel/process.c
18502+++ b/arch/x86/kernel/process.c
c6e2a6c8 18503@@ -69,16 +69,33 @@ void free_thread_xstate(struct task_struct *tsk)
fe2de317
MT
18504
18505 void free_thread_info(struct thread_info *ti)
18506 {
18507- free_thread_xstate(ti->task);
4c928ab7 18508 free_pages((unsigned long)ti, THREAD_ORDER);
fe2de317
MT
18509 }
18510
18511+static struct kmem_cache *task_struct_cachep;
18512+
18513 void arch_task_cache_init(void)
18514 {
18515- task_xstate_cachep =
18516- kmem_cache_create("task_xstate", xstate_size,
18517+ /* create a slab on which task_structs can be allocated */
18518+ task_struct_cachep =
18519+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18520+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18521+
18522+ task_xstate_cachep =
18523+ kmem_cache_create("task_xstate", xstate_size,
18524 __alignof__(union thread_xstate),
18525- SLAB_PANIC | SLAB_NOTRACK, NULL);
18526+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18527+}
18528+
18529+struct task_struct *alloc_task_struct_node(int node)
18530+{
18531+ return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
18532+}
18533+
18534+void free_task_struct(struct task_struct *task)
18535+{
18536+ free_thread_xstate(task);
18537+ kmem_cache_free(task_struct_cachep, task);
18538 }
18539
18540 /*
c6e2a6c8 18541@@ -91,7 +108,7 @@ void exit_thread(void)
fe2de317
MT
18542 unsigned long *bp = t->io_bitmap_ptr;
18543
18544 if (bp) {
18545- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18546+ struct tss_struct *tss = init_tss + get_cpu();
18547
18548 t->io_bitmap_ptr = NULL;
18549 clear_thread_flag(TIF_IO_BITMAP);
c6e2a6c8 18550@@ -127,7 +144,7 @@ void show_regs_common(void)
fe2de317
MT
18551
18552 printk(KERN_CONT "\n");
18553 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
18554- current->pid, current->comm, print_tainted(),
18555+ task_pid_nr(current), current->comm, print_tainted(),
18556 init_utsname()->release,
18557 (int)strcspn(init_utsname()->version, " "),
18558 init_utsname()->version);
c6e2a6c8 18559@@ -141,6 +158,9 @@ void flush_thread(void)
fe2de317
MT
18560 {
18561 struct task_struct *tsk = current;
18562
18563+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18564+ loadsegment(gs, 0);
18565+#endif
18566 flush_ptrace_hw_breakpoint(tsk);
18567 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
18568 /*
c6e2a6c8 18569@@ -303,10 +323,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
fe2de317
MT
18570 regs.di = (unsigned long) arg;
18571
18572 #ifdef CONFIG_X86_32
18573- regs.ds = __USER_DS;
18574- regs.es = __USER_DS;
18575+ regs.ds = __KERNEL_DS;
18576+ regs.es = __KERNEL_DS;
18577 regs.fs = __KERNEL_PERCPU;
18578- regs.gs = __KERNEL_STACK_CANARY;
18579+ savesegment(gs, regs.gs);
18580 #else
18581 regs.ss = __KERNEL_DS;
18582 #endif
c6e2a6c8
MT
18583@@ -392,7 +412,7 @@ static void __exit_idle(void)
18584 void exit_idle(void)
18585 {
18586 /* idle loop has pid 0 */
18587- if (current->pid)
18588+ if (task_pid_nr(current))
18589 return;
18590 __exit_idle();
18591 }
18592@@ -501,7 +521,7 @@ bool set_pm_idle_to_default(void)
fe2de317 18593
4c928ab7
MT
18594 return ret;
18595 }
fe2de317
MT
18596-void stop_this_cpu(void *dummy)
18597+__noreturn void stop_this_cpu(void *dummy)
18598 {
18599 local_irq_disable();
18600 /*
c6e2a6c8 18601@@ -743,16 +763,37 @@ static int __init idle_setup(char *str)
fe2de317
MT
18602 }
18603 early_param("idle", idle_setup);
18604
18605-unsigned long arch_align_stack(unsigned long sp)
18606+#ifdef CONFIG_PAX_RANDKSTACK
18607+void pax_randomize_kstack(struct pt_regs *regs)
18608 {
18609- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18610- sp -= get_random_int() % 8192;
18611- return sp & ~0xf;
18612-}
18613+ struct thread_struct *thread = &current->thread;
18614+ unsigned long time;
18615
18616-unsigned long arch_randomize_brk(struct mm_struct *mm)
18617-{
18618- unsigned long range_end = mm->brk + 0x02000000;
18619- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18620-}
18621+ if (!randomize_va_space)
18622+ return;
18623+
18624+ if (v8086_mode(regs))
18625+ return;
18626
18627+ rdtscl(time);
18628+
18629+ /* P4 seems to return a 0 LSB, ignore it */
18630+#ifdef CONFIG_MPENTIUM4
18631+ time &= 0x3EUL;
18632+ time <<= 2;
18633+#elif defined(CONFIG_X86_64)
18634+ time &= 0xFUL;
18635+ time <<= 4;
18636+#else
18637+ time &= 0x1FUL;
18638+ time <<= 3;
18639+#endif
18640+
18641+ thread->sp0 ^= time;
18642+ load_sp0(init_tss + smp_processor_id(), thread);
18643+
18644+#ifdef CONFIG_X86_64
18645+ percpu_write(kernel_stack, thread->sp0);
18646+#endif
18647+}
18648+#endif
18649diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
c6e2a6c8 18650index ae68473..7b0bb71 100644
fe2de317
MT
18651--- a/arch/x86/kernel/process_32.c
18652+++ b/arch/x86/kernel/process_32.c
c6e2a6c8 18653@@ -64,6 +64,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
57199397
MT
18654 unsigned long thread_saved_pc(struct task_struct *tsk)
18655 {
18656 return ((unsigned long *)tsk->thread.sp)[3];
18657+//XXX return tsk->thread.eip;
18658 }
18659
c6e2a6c8
MT
18660 void __show_regs(struct pt_regs *regs, int all)
18661@@ -73,15 +74,14 @@ void __show_regs(struct pt_regs *regs, int all)
57199397
MT
18662 unsigned long sp;
18663 unsigned short ss, gs;
18664
18665- if (user_mode_vm(regs)) {
18666+ if (user_mode(regs)) {
18667 sp = regs->sp;
18668 ss = regs->ss & 0xffff;
bc901d79
MT
18669- gs = get_user_gs(regs);
18670 } else {
18671 sp = kernel_stack_pointer(regs);
18672 savesegment(ss, ss);
18673- savesegment(gs, gs);
18674 }
18675+ gs = get_user_gs(regs);
18676
18677 show_regs_common();
18678
c6e2a6c8 18679@@ -143,13 +143,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
57199397
MT
18680 struct task_struct *tsk;
18681 int err;
18682
18683- childregs = task_pt_regs(p);
18684+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18685 *childregs = *regs;
18686 childregs->ax = 0;
18687 childregs->sp = sp;
66a7e928
MT
18688
18689 p->thread.sp = (unsigned long) childregs;
18690 p->thread.sp0 = (unsigned long) (childregs+1);
18691+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18692
18693 p->thread.ip = (unsigned long) ret_from_fork;
18694
c6e2a6c8 18695@@ -240,7 +241,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
57199397
MT
18696 struct thread_struct *prev = &prev_p->thread,
18697 *next = &next_p->thread;
18698 int cpu = smp_processor_id();
18699- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18700+ struct tss_struct *tss = init_tss + cpu;
4c928ab7 18701 fpu_switch_t fpu;
57199397
MT
18702
18703 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
c6e2a6c8 18704@@ -264,6 +265,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
57199397
MT
18705 */
18706 lazy_save_gs(prev->gs);
18707
18708+#ifdef CONFIG_PAX_MEMORY_UDEREF
bc901d79 18709+ __set_fs(task_thread_info(next_p)->addr_limit);
57199397
MT
18710+#endif
18711+
18712 /*
18713 * Load the per-thread Thread-Local Storage descriptor.
18714 */
c6e2a6c8 18715@@ -294,6 +299,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
71d190be
MT
18716 */
18717 arch_end_context_switch(next_p);
57199397 18718
71d190be
MT
18719+ percpu_write(current_task, next_p);
18720+ percpu_write(current_tinfo, &next_p->tinfo);
57199397 18721+
4c928ab7
MT
18722 /*
18723 * Restore %gs if needed (which is common)
18724 */
c6e2a6c8 18725@@ -302,8 +310,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
71d190be 18726
4c928ab7 18727 switch_fpu_finish(next_p, fpu);
71d190be
MT
18728
18729- percpu_write(current_task, next_p);
18730-
18731 return prev_p;
18732 }
18733
c6e2a6c8 18734@@ -333,4 +339,3 @@ unsigned long get_wchan(struct task_struct *p)
71d190be
MT
18735 } while (count++ < 16);
18736 return 0;
18737 }
18738-
fe2de317 18739diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
c6e2a6c8 18740index 43d8b48..c45d566 100644
fe2de317
MT
18741--- a/arch/x86/kernel/process_64.c
18742+++ b/arch/x86/kernel/process_64.c
c6e2a6c8 18743@@ -162,8 +162,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
71d190be
MT
18744 struct pt_regs *childregs;
18745 struct task_struct *me = current;
18746
18747- childregs = ((struct pt_regs *)
18748- (THREAD_SIZE + task_stack_page(p))) - 1;
66a7e928 18749+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
71d190be
MT
18750 *childregs = *regs;
18751
18752 childregs->ax = 0;
c6e2a6c8 18753@@ -175,6 +174,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
66a7e928
MT
18754 p->thread.sp = (unsigned long) childregs;
18755 p->thread.sp0 = (unsigned long) (childregs+1);
18756 p->thread.usersp = me->thread.usersp;
18757+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18758
18759 set_tsk_thread_flag(p, TIF_FORK);
18760
c6e2a6c8 18761@@ -280,7 +280,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
57199397
MT
18762 struct thread_struct *prev = &prev_p->thread;
18763 struct thread_struct *next = &next_p->thread;
18764 int cpu = smp_processor_id();
18765- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18766+ struct tss_struct *tss = init_tss + cpu;
18767 unsigned fsindex, gsindex;
4c928ab7 18768 fpu_switch_t fpu;
57199397 18769
c6e2a6c8 18770@@ -362,10 +362,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
71d190be
MT
18771 prev->usersp = percpu_read(old_rsp);
18772 percpu_write(old_rsp, next->usersp);
18773 percpu_write(current_task, next_p);
18774+ percpu_write(current_tinfo, &next_p->tinfo);
18775
18776- percpu_write(kernel_stack,
18777- (unsigned long)task_stack_page(next_p) +
18778- THREAD_SIZE - KERNEL_STACK_OFFSET);
18779+ percpu_write(kernel_stack, next->sp0);
18780
18781 /*
18782 * Now maybe reload the debug registers and handle I/O bitmaps
c6e2a6c8 18783@@ -434,12 +433,11 @@ unsigned long get_wchan(struct task_struct *p)
57199397
MT
18784 if (!p || p == current || p->state == TASK_RUNNING)
18785 return 0;
18786 stack = (unsigned long)task_stack_page(p);
18787- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
66a7e928 18788+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
57199397
MT
18789 return 0;
18790 fp = *(u64 *)(p->thread.sp);
18791 do {
18792- if (fp < (unsigned long)stack ||
18793- fp >= (unsigned long)stack+THREAD_SIZE)
66a7e928 18794+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
57199397
MT
18795 return 0;
18796 ip = *(u64 *)(fp+8);
18797 if (!in_sched_functions(ip))
fe2de317 18798diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
c6e2a6c8 18799index cf11783..e7ce551 100644
fe2de317
MT
18800--- a/arch/x86/kernel/ptrace.c
18801+++ b/arch/x86/kernel/ptrace.c
c6e2a6c8 18802@@ -824,7 +824,7 @@ long arch_ptrace(struct task_struct *child, long request,
bc901d79 18803 unsigned long addr, unsigned long data)
ae4e228f
MT
18804 {
18805 int ret;
18806- unsigned long __user *datap = (unsigned long __user *)data;
18807+ unsigned long __user *datap = (__force unsigned long __user *)data;
18808
18809 switch (request) {
18810 /* read the word at location addr in the USER area. */
c6e2a6c8 18811@@ -909,14 +909,14 @@ long arch_ptrace(struct task_struct *child, long request,
bc901d79 18812 if ((int) addr < 0)
ae4e228f
MT
18813 return -EIO;
18814 ret = do_get_thread_area(child, addr,
bc901d79
MT
18815- (struct user_desc __user *)data);
18816+ (__force struct user_desc __user *) data);
ae4e228f
MT
18817 break;
18818
18819 case PTRACE_SET_THREAD_AREA:
bc901d79 18820 if ((int) addr < 0)
ae4e228f
MT
18821 return -EIO;
18822 ret = do_set_thread_area(child, addr,
bc901d79
MT
18823- (struct user_desc __user *)data, 0);
18824+ (__force struct user_desc __user *) data, 0);
ae4e228f
MT
18825 break;
18826 #endif
18827
c6e2a6c8 18828@@ -1426,7 +1426,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
ae4e228f
MT
18829 memset(info, 0, sizeof(*info));
18830 info->si_signo = SIGTRAP;
18831 info->si_code = si_code;
18832- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
18833+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
18834 }
18835
18836 void user_single_step_siginfo(struct task_struct *tsk,
c6e2a6c8 18837@@ -1455,6 +1455,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
5e856224
MT
18838 # define IS_IA32 0
18839 #endif
18840
18841+#ifdef CONFIG_GRKERNSEC_SETXID
18842+extern void gr_delayed_cred_worker(void);
18843+#endif
18844+
18845 /*
18846 * We must return the syscall number to actually look up in the table.
18847 * This can be -1L to skip running any syscall at all.
c6e2a6c8 18848@@ -1463,6 +1467,11 @@ long syscall_trace_enter(struct pt_regs *regs)
5e856224
MT
18849 {
18850 long ret = 0;
18851
18852+#ifdef CONFIG_GRKERNSEC_SETXID
18853+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18854+ gr_delayed_cred_worker();
18855+#endif
18856+
18857 /*
18858 * If we stepped into a sysenter/syscall insn, it trapped in
18859 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
c6e2a6c8 18860@@ -1506,6 +1515,11 @@ void syscall_trace_leave(struct pt_regs *regs)
5e856224
MT
18861 {
18862 bool step;
18863
18864+#ifdef CONFIG_GRKERNSEC_SETXID
18865+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
18866+ gr_delayed_cred_worker();
18867+#endif
18868+
18869 audit_syscall_exit(regs);
18870
18871 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
fe2de317
MT
18872diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
18873index 42eb330..139955c 100644
18874--- a/arch/x86/kernel/pvclock.c
18875+++ b/arch/x86/kernel/pvclock.c
18876@@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
66a7e928
MT
18877 return pv_tsc_khz;
18878 }
18879
18880-static atomic64_t last_value = ATOMIC64_INIT(0);
18881+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
18882
18883 void pvclock_resume(void)
18884 {
18885- atomic64_set(&last_value, 0);
18886+ atomic64_set_unchecked(&last_value, 0);
18887 }
18888
18889 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
fe2de317 18890@@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
66a7e928
MT
18891 * updating at the same time, and one of them could be slightly behind,
18892 * making the assumption that last_value always go forward fail to hold.
18893 */
18894- last = atomic64_read(&last_value);
18895+ last = atomic64_read_unchecked(&last_value);
18896 do {
18897 if (ret < last)
18898 return last;
18899- last = atomic64_cmpxchg(&last_value, last, ret);
18900+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
18901 } while (unlikely(last != ret));
18902
18903 return ret;
fe2de317 18904diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
572b4308 18905index 3034ee5..7cfbfa6 100644
fe2de317
MT
18906--- a/arch/x86/kernel/reboot.c
18907+++ b/arch/x86/kernel/reboot.c
66a7e928 18908@@ -35,7 +35,7 @@ void (*pm_power_off)(void);
58c5fc13
MT
18909 EXPORT_SYMBOL(pm_power_off);
18910
18911 static const struct desc_ptr no_idt = {};
18912-static int reboot_mode;
18913+static unsigned short reboot_mode;
15a11c5b 18914 enum reboot_type reboot_type = BOOT_ACPI;
58c5fc13
MT
18915 int reboot_force;
18916
5e856224 18917@@ -335,13 +335,17 @@ core_initcall(reboot_init);
66a7e928
MT
18918 extern const unsigned char machine_real_restart_asm[];
18919 extern const u64 machine_real_restart_gdt[3];
18920
18921-void machine_real_restart(unsigned int type)
18922+__noreturn void machine_real_restart(unsigned int type)
58c5fc13 18923 {
66a7e928
MT
18924 void *restart_va;
18925 unsigned long restart_pa;
18926- void (*restart_lowmem)(unsigned int);
18927+ void (* __noreturn restart_lowmem)(unsigned int);
18928 u64 *lowmem_gdt;
18929
18930+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18931+ struct desc_struct *gdt;
18932+#endif
18933+
58c5fc13
MT
18934 local_irq_disable();
18935
66a7e928 18936 /* Write zero to CMOS register number 0x0f, which the BIOS POST
5e856224 18937@@ -367,14 +371,14 @@ void machine_real_restart(unsigned int type)
58c5fc13
MT
18938 boot)". This seems like a fairly standard thing that gets set by
18939 REBOOT.COM programs, and the previous reset routine did this
18940 too. */
18941- *((unsigned short *)0x472) = reboot_mode;
18942+ *(unsigned short *)(__va(0x472)) = reboot_mode;
18943
66a7e928
MT
18944 /* Patch the GDT in the low memory trampoline */
18945 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
18946
18947 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
18948 restart_pa = virt_to_phys(restart_va);
18949- restart_lowmem = (void (*)(unsigned int))restart_pa;
18950+ restart_lowmem = (void *)restart_pa;
18951
18952 /* GDT[0]: GDT self-pointer */
18953 lowmem_gdt[0] =
5e856224 18954@@ -385,7 +389,33 @@ void machine_real_restart(unsigned int type)
66a7e928
MT
18955 GDT_ENTRY(0x009b, restart_pa, 0xffff);
18956
18957 /* Jump to the identity-mapped low memory code */
18958+
18959+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
18960+ gdt = get_cpu_gdt_table(smp_processor_id());
18961+ pax_open_kernel();
18962+#ifdef CONFIG_PAX_MEMORY_UDEREF
18963+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
18964+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
18965+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
18966+#endif
18967+#ifdef CONFIG_PAX_KERNEXEC
18968+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
18969+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
18970+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
18971+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
18972+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
18973+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
18974+#endif
18975+ pax_close_kernel();
18976+#endif
18977+
18978+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18979+ asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
18980+ unreachable();
18981+#else
18982 restart_lowmem(type);
18983+#endif
18984+
18985 }
18986 #ifdef CONFIG_APM_MODULE
18987 EXPORT_SYMBOL(machine_real_restart);
572b4308 18988@@ -564,7 +594,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
15a11c5b
MT
18989 * try to force a triple fault and then cycle between hitting the keyboard
18990 * controller and doing that
18991 */
66a7e928
MT
18992-static void native_machine_emergency_restart(void)
18993+__noreturn static void native_machine_emergency_restart(void)
18994 {
18995 int i;
15a11c5b 18996 int attempt = 0;
572b4308 18997@@ -688,13 +718,13 @@ void native_machine_shutdown(void)
66a7e928
MT
18998 #endif
18999 }
19000
19001-static void __machine_emergency_restart(int emergency)
19002+static __noreturn void __machine_emergency_restart(int emergency)
19003 {
19004 reboot_emergency = emergency;
19005 machine_ops.emergency_restart();
19006 }
19007
19008-static void native_machine_restart(char *__unused)
19009+static __noreturn void native_machine_restart(char *__unused)
19010 {
19011 printk("machine restart\n");
19012
572b4308 19013@@ -703,7 +733,7 @@ static void native_machine_restart(char *__unused)
66a7e928
MT
19014 __machine_emergency_restart(0);
19015 }
19016
19017-static void native_machine_halt(void)
19018+static __noreturn void native_machine_halt(void)
19019 {
19020 /* stop other cpus and apics */
19021 machine_shutdown();
572b4308 19022@@ -714,7 +744,7 @@ static void native_machine_halt(void)
66a7e928
MT
19023 stop_this_cpu(NULL);
19024 }
19025
19026-static void native_machine_power_off(void)
19027+__noreturn static void native_machine_power_off(void)
19028 {
19029 if (pm_power_off) {
19030 if (!reboot_force)
572b4308 19031@@ -723,6 +753,7 @@ static void native_machine_power_off(void)
66a7e928
MT
19032 }
19033 /* a fallback in case there is no PM info available */
19034 tboot_shutdown(TB_SHUTDOWN_HALT);
19035+ unreachable();
19036 }
19037
19038 struct machine_ops machine_ops = {
fe2de317
MT
19039diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19040index 7a6f3b3..bed145d7 100644
19041--- a/arch/x86/kernel/relocate_kernel_64.S
19042+++ b/arch/x86/kernel/relocate_kernel_64.S
19043@@ -11,6 +11,7 @@
19044 #include <asm/kexec.h>
19045 #include <asm/processor-flags.h>
19046 #include <asm/pgtable_types.h>
19047+#include <asm/alternative-asm.h>
19048
19049 /*
19050 * Must be relocatable PIC code callable as a C function
19051@@ -160,13 +161,14 @@ identity_mapped:
19052 xorq %rbp, %rbp
19053 xorq %r8, %r8
19054 xorq %r9, %r9
19055- xorq %r10, %r9
19056+ xorq %r10, %r10
19057 xorq %r11, %r11
19058 xorq %r12, %r12
19059 xorq %r13, %r13
19060 xorq %r14, %r14
19061 xorq %r15, %r15
19062
19063+ pax_force_retaddr 0, 1
19064 ret
19065
19066 1:
19067diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
c6e2a6c8 19068index 1a29015..712f324 100644
fe2de317
MT
19069--- a/arch/x86/kernel/setup.c
19070+++ b/arch/x86/kernel/setup.c
c6e2a6c8 19071@@ -447,7 +447,7 @@ static void __init parse_setup_data(void)
6e9df6a3
MT
19072
19073 switch (data->type) {
19074 case SETUP_E820_EXT:
19075- parse_e820_ext(data);
19076+ parse_e820_ext((struct setup_data __force_kernel *)data);
19077 break;
19078 case SETUP_DTB:
19079 add_dtb(pa_data);
c6e2a6c8 19080@@ -639,7 +639,7 @@ static void __init trim_bios_range(void)
efbe55a5
MT
19081 * area (640->1Mb) as ram even though it is not.
19082 * take them out.
19083 */
19084- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
19085+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
19086 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
19087 }
19088
c6e2a6c8 19089@@ -763,14 +763,14 @@ void __init setup_arch(char **cmdline_p)
58c5fc13
MT
19090
19091 if (!boot_params.hdr.root_flags)
19092 root_mountflags &= ~MS_RDONLY;
19093- init_mm.start_code = (unsigned long) _text;
19094- init_mm.end_code = (unsigned long) _etext;
19095+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19096+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19097 init_mm.end_data = (unsigned long) _edata;
19098 init_mm.brk = _brk_end;
19099
19100- code_resource.start = virt_to_phys(_text);
19101- code_resource.end = virt_to_phys(_etext)-1;
19102- data_resource.start = virt_to_phys(_etext);
19103+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19104+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19105+ data_resource.start = virt_to_phys(_sdata);
19106 data_resource.end = virt_to_phys(_edata)-1;
19107 bss_resource.start = virt_to_phys(&__bss_start);
19108 bss_resource.end = virt_to_phys(&__bss_stop)-1;
fe2de317 19109diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
c6e2a6c8 19110index 5a98aa2..2f9288d 100644
fe2de317
MT
19111--- a/arch/x86/kernel/setup_percpu.c
19112+++ b/arch/x86/kernel/setup_percpu.c
57199397
MT
19113@@ -21,19 +21,17 @@
19114 #include <asm/cpu.h>
19115 #include <asm/stackprotector.h>
58c5fc13 19116
6892158b 19117-DEFINE_PER_CPU(int, cpu_number);
58c5fc13 19118+#ifdef CONFIG_SMP
6892158b 19119+DEFINE_PER_CPU(unsigned int, cpu_number);
58c5fc13
MT
19120 EXPORT_PER_CPU_SYMBOL(cpu_number);
19121+#endif
19122
19123-#ifdef CONFIG_X86_64
19124 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19125-#else
19126-#define BOOT_PERCPU_OFFSET 0
19127-#endif
19128
19129 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19130 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19131
19132-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19133+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19134 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19135 };
19136 EXPORT_SYMBOL(__per_cpu_offset);
c6e2a6c8 19137@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
58c5fc13
MT
19138 {
19139 #ifdef CONFIG_X86_32
ae4e228f
MT
19140 struct desc_struct gdt;
19141+ unsigned long base = per_cpu_offset(cpu);
19142
58c5fc13
MT
19143- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19144- 0x2 | DESCTYPE_S, 0x8);
19145- gdt.s = 1;
ae4e228f
MT
19146+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19147+ 0x83 | DESCTYPE_S, 0xC);
19148 write_gdt_entry(get_cpu_gdt_table(cpu),
19149 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
58c5fc13 19150 #endif
c6e2a6c8 19151@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
58c5fc13
MT
19152 /* alrighty, percpu areas up and running */
19153 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19154 for_each_possible_cpu(cpu) {
19155+#ifdef CONFIG_CC_STACKPROTECTOR
15a11c5b
MT
19156+#ifdef CONFIG_X86_32
19157+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
58c5fc13
MT
19158+#endif
19159+#endif
ae4e228f 19160 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
58c5fc13
MT
19161 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19162 per_cpu(cpu_number, cpu) = cpu;
c6e2a6c8 19163@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
66a7e928 19164 */
57199397 19165 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
58c5fc13 19166 #endif
58c5fc13 19167+#ifdef CONFIG_CC_STACKPROTECTOR
15a11c5b
MT
19168+#ifdef CONFIG_X86_32
19169+ if (!cpu)
19170+ per_cpu(stack_canary.canary, cpu) = canary;
58c5fc13
MT
19171+#endif
19172+#endif
19173 /*
57199397 19174 * Up to this point, the boot CPU has been using .init.data
58c5fc13 19175 * area. Reload any changed state for the boot CPU.
fe2de317 19176diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
c6e2a6c8 19177index 115eac4..c0591d5 100644
fe2de317
MT
19178--- a/arch/x86/kernel/signal.c
19179+++ b/arch/x86/kernel/signal.c
c6e2a6c8 19180@@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
58c5fc13
MT
19181 * Align the stack pointer according to the i386 ABI,
19182 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19183 */
19184- sp = ((sp + 4) & -16ul) - 4;
19185+ sp = ((sp - 12) & -16ul) - 4;
19186 #else /* !CONFIG_X86_32 */
19187 sp = round_down(sp, 16) - 8;
19188 #endif
c6e2a6c8 19189@@ -241,11 +241,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
ae4e228f
MT
19190 * Return an always-bogus address instead so we will die with SIGSEGV.
19191 */
19192 if (onsigstack && !likely(on_sig_stack(sp)))
19193- return (void __user *)-1L;
19194+ return (__force void __user *)-1L;
19195
19196 /* save i387 state */
19197 if (used_math() && save_i387_xstate(*fpstate) < 0)
19198- return (void __user *)-1L;
19199+ return (__force void __user *)-1L;
19200
19201 return (void __user *)sp;
19202 }
c6e2a6c8 19203@@ -300,9 +300,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
58c5fc13
MT
19204 }
19205
19206 if (current->mm->context.vdso)
19207- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
ae4e228f 19208+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
58c5fc13
MT
19209 else
19210- restorer = &frame->retcode;
19211+ restorer = (void __user *)&frame->retcode;
19212 if (ka->sa.sa_flags & SA_RESTORER)
19213 restorer = ka->sa.sa_restorer;
19214
c6e2a6c8 19215@@ -316,7 +316,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
ae4e228f
MT
19216 * reasons and because gdb uses it as a signature to notice
19217 * signal handler stack frames.
19218 */
19219- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19220+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19221
19222 if (err)
19223 return -EFAULT;
c6e2a6c8 19224@@ -370,7 +370,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
58c5fc13
MT
19225 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19226
19227 /* Set up to return from userspace. */
19228- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6892158b
MT
19229+ if (current->mm->context.vdso)
19230+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19231+ else
19232+ restorer = (void __user *)&frame->retcode;
58c5fc13
MT
19233 if (ka->sa.sa_flags & SA_RESTORER)
19234 restorer = ka->sa.sa_restorer;
19235 put_user_ex(restorer, &frame->pretcode);
c6e2a6c8 19236@@ -382,7 +385,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
ae4e228f
MT
19237 * reasons and because gdb uses it as a signature to notice
19238 * signal handler stack frames.
19239 */
19240- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19241+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19242 } put_user_catch(err);
19243
19244 if (err)
c6e2a6c8 19245@@ -773,7 +776,7 @@ static void do_signal(struct pt_regs *regs)
58c5fc13
MT
19246 * X86_32: vm86 regs switched out by assembly code before reaching
19247 * here, so testing against kernel CS suffices.
19248 */
19249- if (!user_mode(regs))
19250+ if (!user_mode_novm(regs))
19251 return;
19252
6e9df6a3 19253 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
fe2de317 19254diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
c6e2a6c8 19255index 6e1e406..edfb7cb 100644
fe2de317
MT
19256--- a/arch/x86/kernel/smpboot.c
19257+++ b/arch/x86/kernel/smpboot.c
c6e2a6c8 19258@@ -699,17 +699,20 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
71d190be
MT
19259 set_idle_for_cpu(cpu, c_idle.idle);
19260 do_rest:
19261 per_cpu(current_task, cpu) = c_idle.idle;
19262+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19263 #ifdef CONFIG_X86_32
19264 /* Stack for startup_32 can be just as for start_secondary onwards */
19265 irq_ctx_init(cpu);
19266 #else
19267 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19268 initial_gs = per_cpu_offset(cpu);
19269- per_cpu(kernel_stack, cpu) =
19270- (unsigned long)task_stack_page(c_idle.idle) -
19271- KERNEL_STACK_OFFSET + THREAD_SIZE;
66a7e928 19272+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
58c5fc13
MT
19273 #endif
19274+
ae4e228f 19275+ pax_open_kernel();
58c5fc13 19276 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
ae4e228f 19277+ pax_close_kernel();
58c5fc13
MT
19278+
19279 initial_code = (unsigned long)start_secondary;
16454cff 19280 stack_start = c_idle.idle->thread.sp;
58c5fc13 19281
c6e2a6c8 19282@@ -851,6 +854,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
df50ba0c
MT
19283
19284 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19285
19286+#ifdef CONFIG_PAX_PER_CPU_PGD
19287+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19288+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19289+ KERNEL_PGD_PTRS);
19290+#endif
19291+
6892158b 19292 err = do_boot_cpu(apicid, cpu);
6892158b 19293 if (err) {
bc901d79 19294 pr_debug("do_boot_cpu failed %d\n", err);
fe2de317
MT
19295diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19296index c346d11..d43b163 100644
19297--- a/arch/x86/kernel/step.c
19298+++ b/arch/x86/kernel/step.c
19299@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
ae4e228f 19300 struct desc_struct *desc;
58c5fc13
MT
19301 unsigned long base;
19302
19303- seg &= ~7UL;
19304+ seg >>= 3;
19305
19306 mutex_lock(&child->mm->context.lock);
19307- if (unlikely((seg >> 3) >= child->mm->context.size))
58c5fc13 19308+ if (unlikely(seg >= child->mm->context.size))
ae4e228f 19309 addr = -1L; /* bogus selector, access would fault */
58c5fc13 19310 else {
ae4e228f 19311 desc = child->mm->context.ldt + seg;
fe2de317 19312@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
bc901d79
MT
19313 addr += base;
19314 }
19315 mutex_unlock(&child->mm->context.lock);
19316- }
19317+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19318+ addr = ktla_ktva(addr);
19319
19320 return addr;
19321 }
fe2de317 19322@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
58c5fc13
MT
19323 unsigned char opcode[15];
19324 unsigned long addr = convert_ip_to_linear(child, regs);
19325
19326+ if (addr == -EINVAL)
19327+ return 0;
19328+
19329 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19330 for (i = 0; i < copied; i++) {
19331 switch (opcode[i]) {
fe2de317
MT
19332diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19333index 0b0cb5f..db6b9ed 100644
19334--- a/arch/x86/kernel/sys_i386_32.c
19335+++ b/arch/x86/kernel/sys_i386_32.c
bc901d79 19336@@ -24,17 +24,224 @@
58c5fc13
MT
19337
19338 #include <asm/syscalls.h>
19339
bc901d79
MT
19340-/*
19341- * Do a system call from kernel instead of calling sys_execve so we
19342- * end up with proper pt_regs.
19343- */
19344-int kernel_execve(const char *filename,
19345- const char *const argv[],
19346- const char *const envp[])
58c5fc13 19347+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
bc901d79
MT
19348 {
19349- long __res;
19350- asm volatile ("int $0x80"
19351- : "=a" (__res)
19352- : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
19353- return __res;
58c5fc13
MT
19354+ unsigned long pax_task_size = TASK_SIZE;
19355+
19356+#ifdef CONFIG_PAX_SEGMEXEC
19357+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19358+ pax_task_size = SEGMEXEC_TASK_SIZE;
19359+#endif
19360+
19361+ if (len > pax_task_size || addr > pax_task_size - len)
19362+ return -EINVAL;
19363+
19364+ return 0;
19365+}
19366+
58c5fc13
MT
19367+unsigned long
19368+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19369+ unsigned long len, unsigned long pgoff, unsigned long flags)
19370+{
19371+ struct mm_struct *mm = current->mm;
19372+ struct vm_area_struct *vma;
19373+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19374+
19375+#ifdef CONFIG_PAX_SEGMEXEC
19376+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19377+ pax_task_size = SEGMEXEC_TASK_SIZE;
19378+#endif
19379+
6892158b
MT
19380+ pax_task_size -= PAGE_SIZE;
19381+
58c5fc13
MT
19382+ if (len > pax_task_size)
19383+ return -ENOMEM;
19384+
19385+ if (flags & MAP_FIXED)
19386+ return addr;
19387+
19388+#ifdef CONFIG_PAX_RANDMMAP
19389+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19390+#endif
19391+
19392+ if (addr) {
19393+ addr = PAGE_ALIGN(addr);
57199397
MT
19394+ if (pax_task_size - len >= addr) {
19395+ vma = find_vma(mm, addr);
19396+ if (check_heap_stack_gap(vma, addr, len))
19397+ return addr;
19398+ }
58c5fc13
MT
19399+ }
19400+ if (len > mm->cached_hole_size) {
19401+ start_addr = addr = mm->free_area_cache;
19402+ } else {
19403+ start_addr = addr = mm->mmap_base;
19404+ mm->cached_hole_size = 0;
19405+ }
19406+
19407+#ifdef CONFIG_PAX_PAGEEXEC
ae4e228f 19408+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
58c5fc13
MT
19409+ start_addr = 0x00110000UL;
19410+
19411+#ifdef CONFIG_PAX_RANDMMAP
19412+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19413+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19414+#endif
19415+
19416+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19417+ start_addr = addr = mm->mmap_base;
19418+ else
19419+ addr = start_addr;
19420+ }
19421+#endif
19422+
19423+full_search:
19424+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19425+ /* At this point: (!vma || addr < vma->vm_end). */
19426+ if (pax_task_size - len < addr) {
19427+ /*
19428+ * Start a new search - just in case we missed
19429+ * some holes.
19430+ */
19431+ if (start_addr != mm->mmap_base) {
19432+ start_addr = addr = mm->mmap_base;
19433+ mm->cached_hole_size = 0;
19434+ goto full_search;
19435+ }
19436+ return -ENOMEM;
19437+ }
57199397
MT
19438+ if (check_heap_stack_gap(vma, addr, len))
19439+ break;
58c5fc13
MT
19440+ if (addr + mm->cached_hole_size < vma->vm_start)
19441+ mm->cached_hole_size = vma->vm_start - addr;
19442+ addr = vma->vm_end;
19443+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19444+ start_addr = addr = mm->mmap_base;
19445+ mm->cached_hole_size = 0;
19446+ goto full_search;
19447+ }
19448+ }
57199397
MT
19449+
19450+ /*
19451+ * Remember the place where we stopped the search:
19452+ */
19453+ mm->free_area_cache = addr + len;
19454+ return addr;
58c5fc13
MT
19455+}
19456+
19457+unsigned long
19458+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19459+ const unsigned long len, const unsigned long pgoff,
19460+ const unsigned long flags)
19461+{
19462+ struct vm_area_struct *vma;
19463+ struct mm_struct *mm = current->mm;
19464+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19465+
19466+#ifdef CONFIG_PAX_SEGMEXEC
19467+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19468+ pax_task_size = SEGMEXEC_TASK_SIZE;
19469+#endif
19470+
6892158b
MT
19471+ pax_task_size -= PAGE_SIZE;
19472+
58c5fc13
MT
19473+ /* requested length too big for entire address space */
19474+ if (len > pax_task_size)
19475+ return -ENOMEM;
19476+
19477+ if (flags & MAP_FIXED)
19478+ return addr;
19479+
19480+#ifdef CONFIG_PAX_PAGEEXEC
ae4e228f 19481+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
58c5fc13
MT
19482+ goto bottomup;
19483+#endif
19484+
19485+#ifdef CONFIG_PAX_RANDMMAP
19486+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19487+#endif
19488+
19489+ /* requesting a specific address */
19490+ if (addr) {
19491+ addr = PAGE_ALIGN(addr);
57199397
MT
19492+ if (pax_task_size - len >= addr) {
19493+ vma = find_vma(mm, addr);
19494+ if (check_heap_stack_gap(vma, addr, len))
19495+ return addr;
19496+ }
58c5fc13
MT
19497+ }
19498+
19499+ /* check if free_area_cache is useful for us */
19500+ if (len <= mm->cached_hole_size) {
19501+ mm->cached_hole_size = 0;
19502+ mm->free_area_cache = mm->mmap_base;
19503+ }
19504+
19505+ /* either no address requested or can't fit in requested address hole */
19506+ addr = mm->free_area_cache;
19507+
19508+ /* make sure it can fit in the remaining address space */
19509+ if (addr > len) {
19510+ vma = find_vma(mm, addr-len);
57199397 19511+ if (check_heap_stack_gap(vma, addr - len, len))
58c5fc13
MT
19512+ /* remember the address as a hint for next time */
19513+ return (mm->free_area_cache = addr-len);
19514+ }
19515+
19516+ if (mm->mmap_base < len)
19517+ goto bottomup;
19518+
19519+ addr = mm->mmap_base-len;
19520+
19521+ do {
19522+ /*
19523+ * Lookup failure means no vma is above this address,
19524+ * else if new region fits below vma->vm_start,
19525+ * return with success:
19526+ */
19527+ vma = find_vma(mm, addr);
57199397 19528+ if (check_heap_stack_gap(vma, addr, len))
58c5fc13
MT
19529+ /* remember the address as a hint for next time */
19530+ return (mm->free_area_cache = addr);
19531+
19532+ /* remember the largest hole we saw so far */
19533+ if (addr + mm->cached_hole_size < vma->vm_start)
19534+ mm->cached_hole_size = vma->vm_start - addr;
19535+
19536+ /* try just below the current vma->vm_start */
16454cff
MT
19537+ addr = skip_heap_stack_gap(vma, len);
19538+ } while (!IS_ERR_VALUE(addr));
58c5fc13
MT
19539+
19540+bottomup:
19541+ /*
19542+ * A failed mmap() very likely causes application failure,
19543+ * so fall back to the bottom-up function here. This scenario
19544+ * can happen with large stack limits and large mmap()
19545+ * allocations.
19546+ */
19547+
19548+#ifdef CONFIG_PAX_SEGMEXEC
19549+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19550+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19551+ else
19552+#endif
19553+
19554+ mm->mmap_base = TASK_UNMAPPED_BASE;
19555+
19556+#ifdef CONFIG_PAX_RANDMMAP
19557+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19558+ mm->mmap_base += mm->delta_mmap;
19559+#endif
19560+
19561+ mm->free_area_cache = mm->mmap_base;
19562+ mm->cached_hole_size = ~0UL;
19563+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19564+ /*
19565+ * Restore the topdown base:
19566+ */
19567+ mm->mmap_base = base;
19568+ mm->free_area_cache = base;
19569+ mm->cached_hole_size = ~0UL;
19570+
19571+ return addr;
bc901d79 19572 }
fe2de317 19573diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
c6e2a6c8 19574index b4d3c39..82bb73b 100644
fe2de317
MT
19575--- a/arch/x86/kernel/sys_x86_64.c
19576+++ b/arch/x86/kernel/sys_x86_64.c
4c928ab7 19577@@ -95,8 +95,8 @@ out:
58c5fc13
MT
19578 return error;
19579 }
19580
19581-static void find_start_end(unsigned long flags, unsigned long *begin,
19582- unsigned long *end)
19583+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19584+ unsigned long *begin, unsigned long *end)
19585 {
c6e2a6c8 19586 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
58c5fc13 19587 unsigned long new_begin;
4c928ab7 19588@@ -115,7 +115,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
58c5fc13
MT
19589 *begin = new_begin;
19590 }
19591 } else {
19592- *begin = TASK_UNMAPPED_BASE;
19593+ *begin = mm->mmap_base;
19594 *end = TASK_SIZE;
19595 }
19596 }
4c928ab7 19597@@ -132,16 +132,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
19598 if (flags & MAP_FIXED)
19599 return addr;
19600
19601- find_start_end(flags, &begin, &end);
19602+ find_start_end(mm, flags, &begin, &end);
19603
19604 if (len > end)
19605 return -ENOMEM;
19606
19607+#ifdef CONFIG_PAX_RANDMMAP
19608+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19609+#endif
19610+
19611 if (addr) {
19612 addr = PAGE_ALIGN(addr);
19613 vma = find_vma(mm, addr);
57199397
MT
19614- if (end - len >= addr &&
19615- (!vma || addr + len <= vma->vm_start))
19616+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19617 return addr;
19618 }
c6e2a6c8 19619 if (((flags & MAP_32BIT) || test_thread_flag(TIF_ADDR32))
4c928ab7 19620@@ -172,7 +175,7 @@ full_search:
57199397
MT
19621 }
19622 return -ENOMEM;
19623 }
19624- if (!vma || addr + len <= vma->vm_start) {
19625+ if (check_heap_stack_gap(vma, addr, len)) {
19626 /*
19627 * Remember the place where we stopped the search:
19628 */
4c928ab7 19629@@ -195,7 +198,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
19630 {
19631 struct vm_area_struct *vma;
19632 struct mm_struct *mm = current->mm;
c6e2a6c8
MT
19633- unsigned long addr = addr0, start_addr;
19634+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
58c5fc13
MT
19635
19636 /* requested length too big for entire address space */
19637 if (len > TASK_SIZE)
4c928ab7 19638@@ -208,13 +211,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
c6e2a6c8 19639 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
58c5fc13
MT
19640 goto bottomup;
19641
19642+#ifdef CONFIG_PAX_RANDMMAP
19643+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19644+#endif
19645+
19646 /* requesting a specific address */
19647 if (addr) {
19648 addr = PAGE_ALIGN(addr);
16454cff 19649- vma = find_vma(mm, addr);
57199397
MT
19650- if (TASK_SIZE - len >= addr &&
19651- (!vma || addr + len <= vma->vm_start))
16454cff
MT
19652- return addr;
19653+ if (TASK_SIZE - len >= addr) {
19654+ vma = find_vma(mm, addr);
19655+ if (check_heap_stack_gap(vma, addr, len))
19656+ return addr;
19657+ }
57199397
MT
19658 }
19659
16454cff 19660 /* check if free_area_cache is useful for us */
c6e2a6c8 19661@@ -240,7 +248,7 @@ try_again:
57199397
MT
19662 * return with success:
19663 */
19664 vma = find_vma(mm, addr);
19665- if (!vma || addr+len <= vma->vm_start)
19666+ if (check_heap_stack_gap(vma, addr, len))
19667 /* remember the address as a hint for next time */
19668 return mm->free_area_cache = addr;
19669
c6e2a6c8 19670@@ -249,8 +257,8 @@ try_again:
16454cff
MT
19671 mm->cached_hole_size = vma->vm_start - addr;
19672
19673 /* try just below the current vma->vm_start */
19674- addr = vma->vm_start-len;
19675- } while (len < vma->vm_start);
19676+ addr = skip_heap_stack_gap(vma, len);
19677+ } while (!IS_ERR_VALUE(addr));
19678
c6e2a6c8 19679 fail:
16454cff 19680 /*
4c928ab7 19681@@ -270,13 +278,21 @@ bottomup:
58c5fc13
MT
19682 * can happen with large stack limits and large mmap()
19683 * allocations.
19684 */
19685+ mm->mmap_base = TASK_UNMAPPED_BASE;
19686+
19687+#ifdef CONFIG_PAX_RANDMMAP
19688+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19689+ mm->mmap_base += mm->delta_mmap;
19690+#endif
19691+
19692+ mm->free_area_cache = mm->mmap_base;
19693 mm->cached_hole_size = ~0UL;
19694- mm->free_area_cache = TASK_UNMAPPED_BASE;
19695 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19696 /*
19697 * Restore the topdown base:
19698 */
19699- mm->free_area_cache = mm->mmap_base;
19700+ mm->mmap_base = base;
19701+ mm->free_area_cache = base;
19702 mm->cached_hole_size = ~0UL;
19703
19704 return addr;
fe2de317 19705diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
c6e2a6c8 19706index 6410744..79758f0 100644
fe2de317
MT
19707--- a/arch/x86/kernel/tboot.c
19708+++ b/arch/x86/kernel/tboot.c
4c928ab7 19709@@ -219,7 +219,7 @@ static int tboot_setup_sleep(void)
66a7e928
MT
19710
19711 void tboot_shutdown(u32 shutdown_type)
19712 {
19713- void (*shutdown)(void);
19714+ void (* __noreturn shutdown)(void);
19715
19716 if (!tboot_enabled())
19717 return;
4c928ab7 19718@@ -241,7 +241,7 @@ void tboot_shutdown(u32 shutdown_type)
66a7e928
MT
19719
19720 switch_to_tboot_pt();
19721
19722- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19723+ shutdown = (void *)tboot->shutdown_entry;
19724 shutdown();
19725
19726 /* should not reach here */
c6e2a6c8
MT
19727@@ -299,7 +299,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19728 return 0;
8308f9c9
MT
19729 }
19730
19731-static atomic_t ap_wfs_count;
19732+static atomic_unchecked_t ap_wfs_count;
19733
19734 static int tboot_wait_for_aps(int num_aps)
19735 {
c6e2a6c8 19736@@ -323,9 +323,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
8308f9c9
MT
19737 {
19738 switch (action) {
19739 case CPU_DYING:
19740- atomic_inc(&ap_wfs_count);
19741+ atomic_inc_unchecked(&ap_wfs_count);
19742 if (num_online_cpus() == 1)
19743- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19744+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19745 return NOTIFY_BAD;
19746 break;
19747 }
c6e2a6c8 19748@@ -344,7 +344,7 @@ static __init int tboot_late_init(void)
8308f9c9
MT
19749
19750 tboot_create_trampoline();
19751
19752- atomic_set(&ap_wfs_count, 0);
19753+ atomic_set_unchecked(&ap_wfs_count, 0);
19754 register_hotcpu_notifier(&tboot_cpu_notifier);
c6e2a6c8
MT
19755
19756 acpi_os_set_prepare_sleep(&tboot_sleep);
fe2de317 19757diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
c6e2a6c8 19758index c6eba2b..3303326 100644
fe2de317
MT
19759--- a/arch/x86/kernel/time.c
19760+++ b/arch/x86/kernel/time.c
4c928ab7 19761@@ -31,9 +31,9 @@ unsigned long profile_pc(struct pt_regs *regs)
ae4e228f 19762 {
58c5fc13
MT
19763 unsigned long pc = instruction_pointer(regs);
19764
58c5fc13
MT
19765- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19766+ if (!user_mode(regs) && in_lock_functions(pc)) {
19767 #ifdef CONFIG_FRAME_POINTER
19768- return *(unsigned long *)(regs->bp + sizeof(long));
19769+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19770 #else
ae4e228f
MT
19771 unsigned long *sp =
19772 (unsigned long *)kernel_stack_pointer(regs);
4c928ab7 19773@@ -42,11 +42,17 @@ unsigned long profile_pc(struct pt_regs *regs)
ae4e228f
MT
19774 * or above a saved flags. Eflags has bits 22-31 zero,
19775 * kernel addresses don't.
19776 */
58c5fc13
MT
19777+
19778+#ifdef CONFIG_PAX_KERNEXEC
19779+ return ktla_ktva(sp[0]);
19780+#else
19781 if (sp[0] >> 22)
19782 return sp[0];
19783 if (sp[1] >> 22)
19784 return sp[1];
19785 #endif
19786+
19787+#endif
19788 }
58c5fc13
MT
19789 return pc;
19790 }
fe2de317 19791diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
c6e2a6c8 19792index 9d9d2f9..ed344e4 100644
fe2de317
MT
19793--- a/arch/x86/kernel/tls.c
19794+++ b/arch/x86/kernel/tls.c
c6e2a6c8 19795@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
58c5fc13
MT
19796 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19797 return -EINVAL;
19798
19799+#ifdef CONFIG_PAX_SEGMEXEC
19800+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19801+ return -EINVAL;
19802+#endif
19803+
19804 set_tls_desc(p, idx, &info, 1);
19805
19806 return 0;
fe2de317
MT
19807diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19808index 451c0a7..e57f551 100644
19809--- a/arch/x86/kernel/trampoline_32.S
19810+++ b/arch/x86/kernel/trampoline_32.S
ae4e228f
MT
19811@@ -32,6 +32,12 @@
19812 #include <asm/segment.h>
19813 #include <asm/page_types.h>
19814
19815+#ifdef CONFIG_PAX_KERNEXEC
19816+#define ta(X) (X)
19817+#else
19818+#define ta(X) ((X) - __PAGE_OFFSET)
19819+#endif
19820+
66a7e928
MT
19821 #ifdef CONFIG_SMP
19822
19823 .section ".x86_trampoline","a"
19824@@ -62,7 +68,7 @@ r_base = .
ae4e228f
MT
19825 inc %ax # protected mode (PE) bit
19826 lmsw %ax # into protected mode
19827 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19828- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19829+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
19830
19831 # These need to be in the same 64K segment as the above;
19832 # hence we don't use the boot_gdt_descr defined in head.S
fe2de317
MT
19833diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19834index 09ff517..df19fbff 100644
19835--- a/arch/x86/kernel/trampoline_64.S
19836+++ b/arch/x86/kernel/trampoline_64.S
66a7e928 19837@@ -90,7 +90,7 @@ startup_32:
6892158b
MT
19838 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19839 movl %eax, %ds
19840
19841- movl $X86_CR4_PAE, %eax
19842+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19843 movl %eax, %cr4 # Enable PAE mode
19844
19845 # Setup trampoline 4 level pagetables
19846@@ -138,7 +138,7 @@ tidt:
19847 # so the kernel can live anywhere
19848 .balign 4
19849 tgdt:
19850- .short tgdt_end - tgdt # gdt limit
19851+ .short tgdt_end - tgdt - 1 # gdt limit
19852 .long tgdt - r_base
19853 .short 0
19854 .quad 0x00cf9b000000ffff # __KERNEL32_CS
fe2de317 19855diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
c6e2a6c8 19856index ff9281f1..30cb4ac 100644
fe2de317
MT
19857--- a/arch/x86/kernel/traps.c
19858+++ b/arch/x86/kernel/traps.c
57199397 19859@@ -70,12 +70,6 @@ asmlinkage int system_call(void);
58c5fc13
MT
19860
19861 /* Do we ignore FPU interrupts ? */
19862 char ignore_fpu_irq;
19863-
19864-/*
19865- * The IDT has to be page-aligned to simplify the Pentium
ae4e228f 19866- * F0 0F bug workaround.
58c5fc13 19867- */
ae4e228f 19868-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
58c5fc13
MT
19869 #endif
19870
19871 DECLARE_BITMAP(used_vectors, NR_VECTORS);
4c928ab7 19872@@ -108,13 +102,13 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
58c5fc13 19873 }
ae4e228f
MT
19874
19875 static void __kprobes
19876-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
19877+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
19878 long error_code, siginfo_t *info)
19879 {
58c5fc13
MT
19880 struct task_struct *tsk = current;
19881
19882 #ifdef CONFIG_X86_32
19883- if (regs->flags & X86_VM_MASK) {
19884+ if (v8086_mode(regs)) {
19885 /*
19886 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
19887 * On nmi (interrupt 2), do_trap should not be called.
4c928ab7 19888@@ -125,7 +119,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
58c5fc13
MT
19889 }
19890 #endif
19891
19892- if (!user_mode(regs))
19893+ if (!user_mode_novm(regs))
19894 goto kernel_trap;
19895
19896 #ifdef CONFIG_X86_32
4c928ab7 19897@@ -148,7 +142,7 @@ trap_signal:
58c5fc13
MT
19898 printk_ratelimit()) {
19899 printk(KERN_INFO
19900 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
19901- tsk->comm, tsk->pid, str,
19902+ tsk->comm, task_pid_nr(tsk), str,
19903 regs->ip, regs->sp, error_code);
19904 print_vma_addr(" in ", regs->ip);
19905 printk("\n");
4c928ab7 19906@@ -165,8 +159,20 @@ kernel_trap:
ae4e228f
MT
19907 if (!fixup_exception(regs)) {
19908 tsk->thread.error_code = error_code;
c6e2a6c8 19909 tsk->thread.trap_nr = trapnr;
ae4e228f
MT
19910+
19911+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19912+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
19913+ str = "PAX: suspicious stack segment fault";
19914+#endif
19915+
58c5fc13
MT
19916 die(str, regs, error_code);
19917 }
19918+
19919+#ifdef CONFIG_PAX_REFCOUNT
19920+ if (trapnr == 4)
19921+ pax_report_refcount_overflow(regs);
19922+#endif
19923+
19924 return;
19925
19926 #ifdef CONFIG_X86_32
c6e2a6c8 19927@@ -259,14 +265,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
58c5fc13
MT
19928 conditional_sti(regs);
19929
19930 #ifdef CONFIG_X86_32
19931- if (regs->flags & X86_VM_MASK)
19932+ if (v8086_mode(regs))
19933 goto gp_in_vm86;
19934 #endif
19935
19936 tsk = current;
19937- if (!user_mode(regs))
19938+ if (!user_mode_novm(regs))
19939 goto gp_in_kernel;
19940
19941+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
ae4e228f 19942+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
58c5fc13
MT
19943+ struct mm_struct *mm = tsk->mm;
19944+ unsigned long limit;
19945+
19946+ down_write(&mm->mmap_sem);
19947+ limit = mm->context.user_cs_limit;
19948+ if (limit < TASK_SIZE) {
19949+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
19950+ up_write(&mm->mmap_sem);
19951+ return;
19952+ }
19953+ up_write(&mm->mmap_sem);
19954+ }
19955+#endif
19956+
19957 tsk->thread.error_code = error_code;
c6e2a6c8 19958 tsk->thread.trap_nr = X86_TRAP_GP;
58c5fc13 19959
c6e2a6c8
MT
19960@@ -299,6 +321,13 @@ gp_in_kernel:
19961 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
19962 X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
58c5fc13
MT
19963 return;
19964+
19965+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
ae4e228f 19966+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
58c5fc13
MT
19967+ die("PAX: suspicious general protection fault", regs, error_code);
19968+ else
19969+#endif
19970+
19971 die("general protection fault", regs, error_code);
19972 }
19973
c6e2a6c8 19974@@ -425,7 +454,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
ae4e228f
MT
19975 /* It's safe to allow irq's after DR6 has been saved */
19976 preempt_conditional_sti(regs);
58c5fc13 19977
ae4e228f
MT
19978- if (regs->flags & X86_VM_MASK) {
19979+ if (v8086_mode(regs)) {
c6e2a6c8
MT
19980 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
19981 X86_TRAP_DB);
bc901d79 19982 preempt_conditional_cli(regs);
c6e2a6c8 19983@@ -440,7 +469,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
ae4e228f
MT
19984 * We already checked v86 mode above, so we can check for kernel mode
19985 * by just checking the CPL of CS.
58c5fc13 19986 */
ae4e228f
MT
19987- if ((dr6 & DR_STEP) && !user_mode(regs)) {
19988+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
19989 tsk->thread.debugreg6 &= ~DR_STEP;
19990 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
19991 regs->flags &= ~X86_EFLAGS_TF;
c6e2a6c8 19992@@ -471,7 +500,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
58c5fc13 19993 return;
57199397
MT
19994 conditional_sti(regs);
19995
19996- if (!user_mode_vm(regs))
19997+ if (!user_mode(regs))
19998 {
19999 if (!fixup_exception(regs)) {
20000 task->thread.error_code = error_code;
fe2de317
MT
20001diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20002index b9242ba..50c5edd 100644
20003--- a/arch/x86/kernel/verify_cpu.S
20004+++ b/arch/x86/kernel/verify_cpu.S
15a11c5b
MT
20005@@ -20,6 +20,7 @@
20006 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20007 * arch/x86/kernel/trampoline_64.S: secondary processor verification
20008 * arch/x86/kernel/head_32.S: processor startup
20009+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20010 *
20011 * verify_cpu, returns the status of longmode and SSE in register %eax.
20012 * 0: Success 1: Failure
fe2de317 20013diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
c6e2a6c8 20014index 255f58a..5e91150 100644
fe2de317
MT
20015--- a/arch/x86/kernel/vm86_32.c
20016+++ b/arch/x86/kernel/vm86_32.c
ae4e228f
MT
20017@@ -41,6 +41,7 @@
20018 #include <linux/ptrace.h>
20019 #include <linux/audit.h>
20020 #include <linux/stddef.h>
20021+#include <linux/grsecurity.h>
20022
20023 #include <asm/uaccess.h>
20024 #include <asm/io.h>
c6e2a6c8 20025@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
58c5fc13
MT
20026 do_exit(SIGSEGV);
20027 }
20028
20029- tss = &per_cpu(init_tss, get_cpu());
20030+ tss = init_tss + get_cpu();
20031 current->thread.sp0 = current->thread.saved_sp0;
20032 current->thread.sysenter_cs = __KERNEL_CS;
20033 load_sp0(tss, &current->thread);
c6e2a6c8 20034@@ -210,6 +211,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
ae4e228f
MT
20035 struct task_struct *tsk;
20036 int tmp, ret = -EPERM;
20037
20038+#ifdef CONFIG_GRKERNSEC_VM86
20039+ if (!capable(CAP_SYS_RAWIO)) {
20040+ gr_handle_vm86();
20041+ goto out;
20042+ }
20043+#endif
20044+
20045 tsk = current;
20046 if (tsk->thread.saved_sp0)
20047 goto out;
c6e2a6c8 20048@@ -240,6 +248,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
ae4e228f
MT
20049 int tmp, ret;
20050 struct vm86plus_struct __user *v86;
20051
20052+#ifdef CONFIG_GRKERNSEC_VM86
20053+ if (!capable(CAP_SYS_RAWIO)) {
20054+ gr_handle_vm86();
20055+ ret = -EPERM;
20056+ goto out;
20057+ }
20058+#endif
20059+
20060 tsk = current;
20061 switch (cmd) {
20062 case VM86_REQUEST_IRQ:
c6e2a6c8 20063@@ -326,7 +342,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
58c5fc13
MT
20064 tsk->thread.saved_fs = info->regs32->fs;
20065 tsk->thread.saved_gs = get_user_gs(info->regs32);
20066
20067- tss = &per_cpu(init_tss, get_cpu());
20068+ tss = init_tss + get_cpu();
20069 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20070 if (cpu_has_sep)
20071 tsk->thread.sysenter_cs = 0;
c6e2a6c8 20072@@ -533,7 +549,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
ae4e228f
MT
20073 goto cannot_handle;
20074 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20075 goto cannot_handle;
20076- intr_ptr = (unsigned long __user *) (i << 2);
20077+ intr_ptr = (__force unsigned long __user *) (i << 2);
20078 if (get_user(segoffs, intr_ptr))
20079 goto cannot_handle;
20080 if ((segoffs >> 16) == BIOSSEG)
fe2de317
MT
20081diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20082index 0f703f1..9e15f64 100644
20083--- a/arch/x86/kernel/vmlinux.lds.S
20084+++ b/arch/x86/kernel/vmlinux.lds.S
57199397 20085@@ -26,6 +26,13 @@
58c5fc13
MT
20086 #include <asm/page_types.h>
20087 #include <asm/cache.h>
20088 #include <asm/boot.h>
20089+#include <asm/segment.h>
20090+
58c5fc13
MT
20091+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20092+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20093+#else
20094+#define __KERNEL_TEXT_OFFSET 0
20095+#endif
20096
20097 #undef i386 /* in case the preprocessor is a 32bit one */
20098
6e9df6a3 20099@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
ae4e228f 20100
58c5fc13
MT
20101 PHDRS {
20102 text PT_LOAD FLAGS(5); /* R_E */
57199397
MT
20103+#ifdef CONFIG_X86_32
20104+ module PT_LOAD FLAGS(5); /* R_E */
20105+#endif
ae4e228f
MT
20106+#ifdef CONFIG_XEN
20107+ rodata PT_LOAD FLAGS(5); /* R_E */
20108+#else
58c5fc13 20109+ rodata PT_LOAD FLAGS(4); /* R__ */
ae4e228f 20110+#endif
16454cff 20111 data PT_LOAD FLAGS(6); /* RW_ */
6e9df6a3 20112-#ifdef CONFIG_X86_64
58c5fc13
MT
20113+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20114 #ifdef CONFIG_SMP
ae4e228f 20115 percpu PT_LOAD FLAGS(6); /* RW_ */
58c5fc13
MT
20116 #endif
20117+ text.init PT_LOAD FLAGS(5); /* R_E */
20118+ text.exit PT_LOAD FLAGS(5); /* R_E */
20119 init PT_LOAD FLAGS(7); /* RWE */
20120-#endif
20121 note PT_NOTE FLAGS(0); /* ___ */
20122 }
20123
20124 SECTIONS
20125 {
20126 #ifdef CONFIG_X86_32
20127- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20128- phys_startup_32 = startup_32 - LOAD_OFFSET;
20129+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20130 #else
20131- . = __START_KERNEL;
20132- phys_startup_64 = startup_64 - LOAD_OFFSET;
20133+ . = __START_KERNEL;
20134 #endif
20135
20136 /* Text and read-only data */
ae4e228f
MT
20137- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20138- _text = .;
58c5fc13 20139+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
ae4e228f 20140 /* bootstrapping code */
58c5fc13
MT
20141+#ifdef CONFIG_X86_32
20142+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20143+#else
20144+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20145+#endif
20146+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
ae4e228f
MT
20147+ _text = .;
20148 HEAD_TEXT
58c5fc13 20149 #ifdef CONFIG_X86_32
58c5fc13 20150 . = ALIGN(PAGE_SIZE);
6e9df6a3 20151@@ -108,13 +128,47 @@ SECTIONS
ae4e228f
MT
20152 IRQENTRY_TEXT
20153 *(.fixup)
20154 *(.gnu.warning)
20155- /* End of text section */
20156- _etext = .;
58c5fc13
MT
20157 } :text = 0x9090
20158
20159- NOTES :text :note
20160+ . += __KERNEL_TEXT_OFFSET;
fe2de317
MT
20161
20162- EXCEPTION_TABLE(16) :text = 0x9090
58c5fc13
MT
20163+#ifdef CONFIG_X86_32
20164+ . = ALIGN(PAGE_SIZE);
58c5fc13 20165+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
ae4e228f
MT
20166+
20167+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
58c5fc13
MT
20168+ MODULES_EXEC_VADDR = .;
20169+ BYTE(0)
ae4e228f 20170+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
57199397 20171+ . = ALIGN(HPAGE_SIZE);
58c5fc13 20172+ MODULES_EXEC_END = . - 1;
58c5fc13 20173+#endif
ae4e228f
MT
20174+
20175+ } :module
58c5fc13
MT
20176+#endif
20177+
57199397 20178+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
ae4e228f
MT
20179+ /* End of text section */
20180+ _etext = . - __KERNEL_TEXT_OFFSET;
57199397 20181+ }
15a11c5b 20182+
57199397
MT
20183+#ifdef CONFIG_X86_32
20184+ . = ALIGN(PAGE_SIZE);
20185+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20186+ *(.idt)
20187+ . = ALIGN(PAGE_SIZE);
20188+ *(.empty_zero_page)
bc901d79
MT
20189+ *(.initial_pg_fixmap)
20190+ *(.initial_pg_pmd)
20191+ *(.initial_page_table)
57199397
MT
20192+ *(.swapper_pg_dir)
20193+ } :rodata
20194+#endif
20195+
20196+ . = ALIGN(PAGE_SIZE);
20197+ NOTES :rodata :note
fe2de317 20198+
57199397
MT
20199+ EXCEPTION_TABLE(16) :rodata
20200
16454cff
MT
20201 #if defined(CONFIG_DEBUG_RODATA)
20202 /* .text should occupy whole number of pages */
6e9df6a3 20203@@ -126,16 +180,20 @@ SECTIONS
57199397
MT
20204
20205 /* Data */
20206 .data : AT(ADDR(.data) - LOAD_OFFSET) {
58c5fc13
MT
20207+
20208+#ifdef CONFIG_PAX_KERNEXEC
bc901d79 20209+ . = ALIGN(HPAGE_SIZE);
58c5fc13 20210+#else
bc901d79 20211+ . = ALIGN(PAGE_SIZE);
58c5fc13
MT
20212+#endif
20213+
20214 /* Start of data section */
20215 _sdata = .;
20216
20217 /* init_task */
20218 INIT_TASK_DATA(THREAD_SIZE)
20219
20220-#ifdef CONFIG_X86_32
20221- /* 32 bit has nosave before _edata */
20222 NOSAVE_DATA
20223-#endif
20224
20225 PAGE_ALIGNED_DATA(PAGE_SIZE)
ae4e228f 20226
6e9df6a3 20227@@ -176,12 +234,19 @@ SECTIONS
58c5fc13
MT
20228 #endif /* CONFIG_X86_64 */
20229
20230 /* Init code and data - will be freed after init */
20231- . = ALIGN(PAGE_SIZE);
20232 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20233+ BYTE(0)
20234+
20235+#ifdef CONFIG_PAX_KERNEXEC
57199397 20236+ . = ALIGN(HPAGE_SIZE);
58c5fc13
MT
20237+#else
20238+ . = ALIGN(PAGE_SIZE);
20239+#endif
20240+
20241 __init_begin = .; /* paired with __init_end */
20242- }
20243+ } :init.begin
20244
20245-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20246+#ifdef CONFIG_SMP
20247 /*
20248 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20249 * output PHDR, so the next output section - .init.text - should
6e9df6a3 20250@@ -190,12 +255,27 @@ SECTIONS
66a7e928 20251 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
58c5fc13
MT
20252 #endif
20253
ae4e228f 20254- INIT_TEXT_SECTION(PAGE_SIZE)
58c5fc13
MT
20255-#ifdef CONFIG_X86_64
20256- :init
20257-#endif
ae4e228f
MT
20258+ . = ALIGN(PAGE_SIZE);
20259+ init_begin = .;
20260+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20261+ VMLINUX_SYMBOL(_sinittext) = .;
20262+ INIT_TEXT
20263+ VMLINUX_SYMBOL(_einittext) = .;
20264+ . = ALIGN(PAGE_SIZE);
58c5fc13 20265+ } :text.init
bc901d79
MT
20266
20267- INIT_DATA_SECTION(16)
58c5fc13
MT
20268+ /*
20269+ * .exit.text is discard at runtime, not link time, to deal with
20270+ * references from .altinstructions and .eh_frame
20271+ */
57199397 20272+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
58c5fc13
MT
20273+ EXIT_TEXT
20274+ . = ALIGN(16);
20275+ } :text.exit
20276+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
bc901d79 20277+
ae4e228f
MT
20278+ . = ALIGN(PAGE_SIZE);
20279+ INIT_DATA_SECTION(16) :init
58c5fc13 20280
66a7e928
MT
20281 /*
20282 * Code and data for a variety of lowlevel trampolines, to be
6e9df6a3 20283@@ -269,19 +349,12 @@ SECTIONS
58c5fc13 20284 }
66a7e928 20285
bc901d79 20286 . = ALIGN(8);
58c5fc13
MT
20287- /*
20288- * .exit.text is discard at runtime, not link time, to deal with
20289- * references from .altinstructions and .eh_frame
20290- */
20291- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20292- EXIT_TEXT
20293- }
bc901d79 20294
58c5fc13
MT
20295 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20296 EXIT_DATA
20297 }
58c5fc13
MT
20298
20299-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20300+#ifndef CONFIG_SMP
15a11c5b 20301 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
58c5fc13
MT
20302 #endif
20303
6e9df6a3 20304@@ -300,16 +373,10 @@ SECTIONS
df50ba0c
MT
20305 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
20306 __smp_locks = .;
20307 *(.smp_locks)
20308- . = ALIGN(PAGE_SIZE);
20309 __smp_locks_end = .;
20310+ . = ALIGN(PAGE_SIZE);
58c5fc13
MT
20311 }
20312
20313-#ifdef CONFIG_X86_64
20314- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20315- NOSAVE_DATA
20316- }
20317-#endif
20318-
20319 /* BSS */
20320 . = ALIGN(PAGE_SIZE);
20321 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
6e9df6a3 20322@@ -325,6 +392,7 @@ SECTIONS
58c5fc13
MT
20323 __brk_base = .;
20324 . += 64 * 1024; /* 64k alignment slop space */
20325 *(.brk_reservation) /* areas brk users have reserved */
57199397 20326+ . = ALIGN(HPAGE_SIZE);
58c5fc13
MT
20327 __brk_limit = .;
20328 }
20329
6e9df6a3 20330@@ -351,13 +419,12 @@ SECTIONS
58c5fc13
MT
20331 * for the boot processor.
20332 */
df50ba0c 20333 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
58c5fc13
MT
20334-INIT_PER_CPU(gdt_page);
20335 INIT_PER_CPU(irq_stack_union);
20336
20337 /*
20338 * Build-time check on the image size:
20339 */
20340-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20341+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20342 "kernel image bigger than KERNEL_IMAGE_SIZE");
20343
20344 #ifdef CONFIG_SMP
fe2de317 20345diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
c6e2a6c8 20346index 7515cf0..331a1a0 100644
fe2de317
MT
20347--- a/arch/x86/kernel/vsyscall_64.c
20348+++ b/arch/x86/kernel/vsyscall_64.c
c6e2a6c8
MT
20349@@ -54,15 +54,13 @@
20350 DEFINE_VVAR(int, vgetcpu_mode);
20351 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
15a11c5b 20352
5e856224 20353-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
6e9df6a3
MT
20354+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
20355
20356 static int __init vsyscall_setup(char *str)
20357 {
20358 if (str) {
20359 if (!strcmp("emulate", str))
20360 vsyscall_mode = EMULATE;
20361- else if (!strcmp("native", str))
20362- vsyscall_mode = NATIVE;
20363 else if (!strcmp("none", str))
20364 vsyscall_mode = NONE;
20365 else
c6e2a6c8 20366@@ -206,7 +204,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
6e9df6a3
MT
20367
20368 tsk = current;
20369 if (seccomp_mode(&tsk->seccomp))
20370- do_exit(SIGKILL);
20371+ do_group_exit(SIGKILL);
20372
5e856224
MT
20373 /*
20374 * With a real vsyscall, page faults cause SIGSEGV. We want to
c6e2a6c8 20375@@ -278,8 +276,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
6e9df6a3
MT
20376 return true;
20377
20378 sigsegv:
20379- force_sig(SIGSEGV, current);
20380- return true;
20381+ do_group_exit(SIGKILL);
20382 }
20383
20384 /*
c6e2a6c8 20385@@ -332,10 +329,7 @@ void __init map_vsyscall(void)
6e9df6a3
MT
20386 extern char __vvar_page;
20387 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
20388
20389- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
20390- vsyscall_mode == NATIVE
20391- ? PAGE_KERNEL_VSYSCALL
20392- : PAGE_KERNEL_VVAR);
20393+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
20394 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
20395 (unsigned long)VSYSCALL_START);
20396
fe2de317
MT
20397diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
20398index 9796c2f..f686fbf 100644
20399--- a/arch/x86/kernel/x8664_ksyms_64.c
20400+++ b/arch/x86/kernel/x8664_ksyms_64.c
df50ba0c
MT
20401@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
20402 EXPORT_SYMBOL(copy_user_generic_string);
20403 EXPORT_SYMBOL(copy_user_generic_unrolled);
58c5fc13 20404 EXPORT_SYMBOL(__copy_user_nocache);
ae4e228f
MT
20405-EXPORT_SYMBOL(_copy_from_user);
20406-EXPORT_SYMBOL(_copy_to_user);
58c5fc13
MT
20407
20408 EXPORT_SYMBOL(copy_page);
ae4e228f 20409 EXPORT_SYMBOL(clear_page);
fe2de317 20410diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
c6e2a6c8 20411index e62728e..5fc3a07 100644
fe2de317
MT
20412--- a/arch/x86/kernel/xsave.c
20413+++ b/arch/x86/kernel/xsave.c
c6e2a6c8 20414@@ -131,7 +131,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
ae4e228f 20415 fx_sw_user->xstate_size > fx_sw_user->extended_size)
6892158b 20416 return -EINVAL;
ae4e228f
MT
20417
20418- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
20419+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
20420 fx_sw_user->extended_size -
20421 FP_XSTATE_MAGIC2_SIZE));
6892158b 20422 if (err)
c6e2a6c8 20423@@ -267,7 +267,7 @@ fx_only:
ae4e228f
MT
20424 * the other extended state.
20425 */
20426 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
20427- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
6e9df6a3 20428+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
ae4e228f
MT
20429 }
20430
20431 /*
c6e2a6c8 20432@@ -296,7 +296,7 @@ int restore_i387_xstate(void __user *buf)
57199397 20433 if (use_xsave())
ae4e228f
MT
20434 err = restore_user_xstate(buf);
20435 else
20436- err = fxrstor_checking((__force struct i387_fxsave_struct *)
6e9df6a3 20437+ err = fxrstor_checking((struct i387_fxsave_struct __force_kernel *)
ae4e228f
MT
20438 buf);
20439 if (unlikely(err)) {
20440 /*
5e856224 20441diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
c6e2a6c8 20442index 9fed5be..18fd595 100644
5e856224
MT
20443--- a/arch/x86/kvm/cpuid.c
20444+++ b/arch/x86/kvm/cpuid.c
20445@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
20446 struct kvm_cpuid2 *cpuid,
20447 struct kvm_cpuid_entry2 __user *entries)
20448 {
20449- int r;
20450+ int r, i;
20451
20452 r = -E2BIG;
20453 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
20454 goto out;
20455 r = -EFAULT;
20456- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
20457- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20458+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
20459 goto out;
20460+ for (i = 0; i < cpuid->nent; ++i) {
20461+ struct kvm_cpuid_entry2 cpuid_entry;
20462+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
20463+ goto out;
20464+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
20465+ }
20466 vcpu->arch.cpuid_nent = cpuid->nent;
20467 kvm_apic_set_version(vcpu);
20468 kvm_x86_ops->cpuid_update(vcpu);
20469@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
20470 struct kvm_cpuid2 *cpuid,
20471 struct kvm_cpuid_entry2 __user *entries)
20472 {
20473- int r;
20474+ int r, i;
20475
20476 r = -E2BIG;
20477 if (cpuid->nent < vcpu->arch.cpuid_nent)
20478 goto out;
20479 r = -EFAULT;
20480- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
20481- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20482+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
20483 goto out;
20484+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
20485+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
20486+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
20487+ goto out;
20488+ }
20489 return 0;
20490
20491 out:
fe2de317 20492diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
c6e2a6c8 20493index 8375622..b7bca1a 100644
fe2de317
MT
20494--- a/arch/x86/kvm/emulate.c
20495+++ b/arch/x86/kvm/emulate.c
c6e2a6c8 20496@@ -252,6 +252,7 @@ struct gprefix {
4c928ab7
MT
20497
20498 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
ae4e228f
MT
20499 do { \
20500+ unsigned long _tmp; \
20501 __asm__ __volatile__ ( \
20502 _PRE_EFLAGS("0", "4", "2") \
20503 _op _suffix " %"_x"3,%1; " \
c6e2a6c8 20504@@ -266,8 +267,6 @@ struct gprefix {
ae4e228f 20505 /* Raw emulation: instruction has two explicit operands. */
4c928ab7 20506 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
ae4e228f
MT
20507 do { \
20508- unsigned long _tmp; \
20509- \
4c928ab7 20510 switch ((ctxt)->dst.bytes) { \
ae4e228f 20511 case 2: \
4c928ab7 20512 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
c6e2a6c8 20513@@ -283,7 +282,6 @@ struct gprefix {
ae4e228f 20514
4c928ab7 20515 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
ae4e228f
MT
20516 do { \
20517- unsigned long _tmp; \
4c928ab7 20518 switch ((ctxt)->dst.bytes) { \
ae4e228f 20519 case 1: \
4c928ab7 20520 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
fe2de317 20521diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
c6e2a6c8 20522index 8584322..17d5955 100644
fe2de317
MT
20523--- a/arch/x86/kvm/lapic.c
20524+++ b/arch/x86/kvm/lapic.c
5e856224 20525@@ -54,7 +54,7 @@
df50ba0c
MT
20526 #define APIC_BUS_CYCLE_NS 1
20527
20528 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
20529-#define apic_debug(fmt, arg...)
20530+#define apic_debug(fmt, arg...) do {} while (0)
20531
20532 #define APIC_LVT_NUM 6
20533 /* 14 is the version for Xeon and Pentium 8.4.8*/
fe2de317 20534diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
c6e2a6c8 20535index df5a703..63748a7 100644
fe2de317
MT
20536--- a/arch/x86/kvm/paging_tmpl.h
20537+++ b/arch/x86/kvm/paging_tmpl.h
6e9df6a3
MT
20538@@ -197,7 +197,7 @@ retry_walk:
20539 if (unlikely(kvm_is_error_hva(host_addr)))
20540 goto error;
20541
20542- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
20543+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
20544 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
20545 goto error;
20546
fe2de317 20547diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
c6e2a6c8 20548index e334389..6839087 100644
fe2de317
MT
20549--- a/arch/x86/kvm/svm.c
20550+++ b/arch/x86/kvm/svm.c
c6e2a6c8 20551@@ -3509,7 +3509,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
58c5fc13
MT
20552 int cpu = raw_smp_processor_id();
20553
ae4e228f 20554 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
58c5fc13 20555+
ae4e228f
MT
20556+ pax_open_kernel();
20557 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
20558+ pax_close_kernel();
58c5fc13
MT
20559+
20560 load_TR_desc();
20561 }
20562
c6e2a6c8 20563@@ -3887,6 +3891,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
66a7e928 20564 #endif
8308f9c9
MT
20565 #endif
20566
20567+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20568+ __set_fs(current_thread_info()->addr_limit);
20569+#endif
20570+
20571 reload_tss(vcpu);
20572
20573 local_irq_disable();
fe2de317 20574diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
c6e2a6c8 20575index 4ff0ab9..2ff68d3 100644
fe2de317
MT
20576--- a/arch/x86/kvm/vmx.c
20577+++ b/arch/x86/kvm/vmx.c
c6e2a6c8 20578@@ -1303,7 +1303,11 @@ static void reload_tss(void)
bc901d79 20579 struct desc_struct *descs;
58c5fc13 20580
bc901d79 20581 descs = (void *)gdt->address;
58c5fc13 20582+
ae4e228f 20583+ pax_open_kernel();
58c5fc13 20584 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
ae4e228f 20585+ pax_close_kernel();
58c5fc13
MT
20586+
20587 load_TR_desc();
20588 }
20589
c6e2a6c8 20590@@ -2625,8 +2629,11 @@ static __init int hardware_setup(void)
58c5fc13
MT
20591 if (!cpu_has_vmx_flexpriority())
20592 flexpriority_enabled = 0;
20593
20594- if (!cpu_has_vmx_tpr_shadow())
20595- kvm_x86_ops->update_cr8_intercept = NULL;
20596+ if (!cpu_has_vmx_tpr_shadow()) {
ae4e228f 20597+ pax_open_kernel();
58c5fc13 20598+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
ae4e228f 20599+ pax_close_kernel();
58c5fc13
MT
20600+ }
20601
ae4e228f
MT
20602 if (enable_ept && !cpu_has_vmx_ept_2m_page())
20603 kvm_disable_largepages();
c6e2a6c8 20604@@ -3642,7 +3649,7 @@ static void vmx_set_constant_host_state(void)
57199397 20605 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
58c5fc13 20606
6e9df6a3
MT
20607 asm("mov $.Lkvm_vmx_return, %0" : "=r"(tmpl));
20608- vmcs_writel(HOST_RIP, tmpl); /* 22.2.5 */
20609+ vmcs_writel(HOST_RIP, ktla_ktva(tmpl)); /* 22.2.5 */
20610
20611 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
20612 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
c6e2a6c8 20613@@ -6180,6 +6187,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
58c5fc13
MT
20614 "jmp .Lkvm_vmx_return \n\t"
20615 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
20616 ".Lkvm_vmx_return: "
20617+
20618+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20619+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
20620+ ".Lkvm_vmx_return2: "
20621+#endif
20622+
20623 /* Save guest registers, load host registers, keep flags */
66a7e928
MT
20624 "mov %0, %c[wordsize](%%"R"sp) \n\t"
20625 "pop %0 \n\t"
c6e2a6c8 20626@@ -6228,6 +6241,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
58c5fc13 20627 #endif
66a7e928
MT
20628 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
20629 [wordsize]"i"(sizeof(ulong))
58c5fc13
MT
20630+
20631+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20632+ ,[cs]"i"(__KERNEL_CS)
20633+#endif
20634+
20635 : "cc", "memory"
bc901d79 20636 , R"ax", R"bx", R"di", R"si"
58c5fc13 20637 #ifdef CONFIG_X86_64
c6e2a6c8 20638@@ -6256,7 +6274,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6e9df6a3
MT
20639 }
20640 }
58c5fc13
MT
20641
20642- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
6892158b 20643+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
71d190be
MT
20644+
20645+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
8308f9c9 20646+ loadsegment(fs, __KERNEL_PERCPU);
71d190be
MT
20647+#endif
20648+
20649+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
20650+ __set_fs(current_thread_info()->addr_limit);
20651+#endif
20652+
6e9df6a3 20653 vmx->loaded_vmcs->launched = 1;
58c5fc13 20654
bc901d79 20655 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
fe2de317 20656diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
c6e2a6c8 20657index 185a2b8..866d2a6 100644
fe2de317
MT
20658--- a/arch/x86/kvm/x86.c
20659+++ b/arch/x86/kvm/x86.c
c6e2a6c8 20660@@ -1357,8 +1357,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
6e9df6a3
MT
20661 {
20662 struct kvm *kvm = vcpu->kvm;
20663 int lm = is_long_mode(vcpu);
20664- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20665- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20666+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
20667+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
20668 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
20669 : kvm->arch.xen_hvm_config.blob_size_32;
20670 u32 page_num = data & ~PAGE_MASK;
c6e2a6c8 20671@@ -2213,6 +2213,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
ae4e228f
MT
20672 if (n < msr_list.nmsrs)
20673 goto out;
20674 r = -EFAULT;
20675+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
20676+ goto out;
20677 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
20678 num_msrs_to_save * sizeof(u32)))
20679 goto out;
c6e2a6c8 20680@@ -2338,7 +2340,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
58c5fc13
MT
20681 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
20682 struct kvm_interrupt *irq)
20683 {
20684- if (irq->irq < 0 || irq->irq >= 256)
20685+ if (irq->irq >= 256)
20686 return -EINVAL;
20687 if (irqchip_in_kernel(vcpu->kvm))
20688 return -ENXIO;
c6e2a6c8 20689@@ -4860,7 +4862,7 @@ static void kvm_set_mmio_spte_mask(void)
6e9df6a3 20690 kvm_mmu_set_mmio_spte_mask(mask);
ae4e228f 20691 }
58c5fc13
MT
20692
20693-int kvm_arch_init(void *opaque)
20694+int kvm_arch_init(const void *opaque)
20695 {
ae4e228f 20696 int r;
15a11c5b 20697 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
fe2de317 20698diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
5e856224 20699index 642d880..44e0f3f 100644
fe2de317
MT
20700--- a/arch/x86/lguest/boot.c
20701+++ b/arch/x86/lguest/boot.c
5e856224 20702@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
15a11c5b
MT
20703 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
20704 * Launcher to reboot us.
20705 */
20706-static void lguest_restart(char *reason)
20707+static __noreturn void lguest_restart(char *reason)
20708 {
20709 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
20710+ BUG();
20711 }
20712
20713 /*G:050
fe2de317 20714diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
c6e2a6c8 20715index 00933d5..3a64af9 100644
fe2de317
MT
20716--- a/arch/x86/lib/atomic64_386_32.S
20717+++ b/arch/x86/lib/atomic64_386_32.S
66a7e928
MT
20718@@ -48,6 +48,10 @@ BEGIN(read)
20719 movl (v), %eax
20720 movl 4(v), %edx
20721 RET_ENDP
20722+BEGIN(read_unchecked)
20723+ movl (v), %eax
20724+ movl 4(v), %edx
20725+RET_ENDP
20726 #undef v
20727
20728 #define v %esi
20729@@ -55,6 +59,10 @@ BEGIN(set)
20730 movl %ebx, (v)
20731 movl %ecx, 4(v)
20732 RET_ENDP
20733+BEGIN(set_unchecked)
20734+ movl %ebx, (v)
20735+ movl %ecx, 4(v)
20736+RET_ENDP
20737 #undef v
20738
20739 #define v %esi
20740@@ -70,6 +78,20 @@ RET_ENDP
20741 BEGIN(add)
20742 addl %eax, (v)
20743 adcl %edx, 4(v)
20744+
20745+#ifdef CONFIG_PAX_REFCOUNT
20746+ jno 0f
20747+ subl %eax, (v)
20748+ sbbl %edx, 4(v)
20749+ int $4
20750+0:
20751+ _ASM_EXTABLE(0b, 0b)
20752+#endif
20753+
20754+RET_ENDP
20755+BEGIN(add_unchecked)
20756+ addl %eax, (v)
20757+ adcl %edx, 4(v)
20758 RET_ENDP
20759 #undef v
20760
20761@@ -77,6 +99,24 @@ RET_ENDP
20762 BEGIN(add_return)
20763 addl (v), %eax
20764 adcl 4(v), %edx
20765+
20766+#ifdef CONFIG_PAX_REFCOUNT
20767+ into
20768+1234:
20769+ _ASM_EXTABLE(1234b, 2f)
20770+#endif
20771+
20772+ movl %eax, (v)
20773+ movl %edx, 4(v)
20774+
20775+#ifdef CONFIG_PAX_REFCOUNT
20776+2:
20777+#endif
20778+
20779+RET_ENDP
20780+BEGIN(add_return_unchecked)
20781+ addl (v), %eax
20782+ adcl 4(v), %edx
20783 movl %eax, (v)
20784 movl %edx, 4(v)
20785 RET_ENDP
20786@@ -86,6 +126,20 @@ RET_ENDP
20787 BEGIN(sub)
20788 subl %eax, (v)
20789 sbbl %edx, 4(v)
20790+
20791+#ifdef CONFIG_PAX_REFCOUNT
20792+ jno 0f
20793+ addl %eax, (v)
20794+ adcl %edx, 4(v)
20795+ int $4
20796+0:
20797+ _ASM_EXTABLE(0b, 0b)
20798+#endif
20799+
20800+RET_ENDP
20801+BEGIN(sub_unchecked)
20802+ subl %eax, (v)
20803+ sbbl %edx, 4(v)
20804 RET_ENDP
20805 #undef v
20806
20807@@ -96,6 +150,27 @@ BEGIN(sub_return)
20808 sbbl $0, %edx
20809 addl (v), %eax
20810 adcl 4(v), %edx
20811+
20812+#ifdef CONFIG_PAX_REFCOUNT
20813+ into
20814+1234:
20815+ _ASM_EXTABLE(1234b, 2f)
20816+#endif
20817+
20818+ movl %eax, (v)
20819+ movl %edx, 4(v)
20820+
20821+#ifdef CONFIG_PAX_REFCOUNT
20822+2:
20823+#endif
20824+
20825+RET_ENDP
20826+BEGIN(sub_return_unchecked)
20827+ negl %edx
20828+ negl %eax
20829+ sbbl $0, %edx
20830+ addl (v), %eax
20831+ adcl 4(v), %edx
20832 movl %eax, (v)
20833 movl %edx, 4(v)
20834 RET_ENDP
20835@@ -105,6 +180,20 @@ RET_ENDP
20836 BEGIN(inc)
20837 addl $1, (v)
20838 adcl $0, 4(v)
20839+
20840+#ifdef CONFIG_PAX_REFCOUNT
20841+ jno 0f
20842+ subl $1, (v)
20843+ sbbl $0, 4(v)
20844+ int $4
20845+0:
20846+ _ASM_EXTABLE(0b, 0b)
20847+#endif
20848+
20849+RET_ENDP
20850+BEGIN(inc_unchecked)
20851+ addl $1, (v)
20852+ adcl $0, 4(v)
20853 RET_ENDP
20854 #undef v
20855
20856@@ -114,6 +203,26 @@ BEGIN(inc_return)
20857 movl 4(v), %edx
20858 addl $1, %eax
20859 adcl $0, %edx
20860+
20861+#ifdef CONFIG_PAX_REFCOUNT
20862+ into
20863+1234:
20864+ _ASM_EXTABLE(1234b, 2f)
20865+#endif
20866+
20867+ movl %eax, (v)
20868+ movl %edx, 4(v)
20869+
20870+#ifdef CONFIG_PAX_REFCOUNT
20871+2:
20872+#endif
20873+
20874+RET_ENDP
20875+BEGIN(inc_return_unchecked)
20876+ movl (v), %eax
20877+ movl 4(v), %edx
20878+ addl $1, %eax
20879+ adcl $0, %edx
20880 movl %eax, (v)
20881 movl %edx, 4(v)
20882 RET_ENDP
20883@@ -123,6 +232,20 @@ RET_ENDP
20884 BEGIN(dec)
20885 subl $1, (v)
20886 sbbl $0, 4(v)
20887+
20888+#ifdef CONFIG_PAX_REFCOUNT
20889+ jno 0f
20890+ addl $1, (v)
20891+ adcl $0, 4(v)
20892+ int $4
20893+0:
20894+ _ASM_EXTABLE(0b, 0b)
20895+#endif
20896+
20897+RET_ENDP
20898+BEGIN(dec_unchecked)
20899+ subl $1, (v)
20900+ sbbl $0, 4(v)
20901 RET_ENDP
20902 #undef v
20903
20904@@ -132,6 +255,26 @@ BEGIN(dec_return)
20905 movl 4(v), %edx
20906 subl $1, %eax
20907 sbbl $0, %edx
20908+
20909+#ifdef CONFIG_PAX_REFCOUNT
20910+ into
20911+1234:
20912+ _ASM_EXTABLE(1234b, 2f)
20913+#endif
20914+
20915+ movl %eax, (v)
20916+ movl %edx, 4(v)
20917+
20918+#ifdef CONFIG_PAX_REFCOUNT
20919+2:
20920+#endif
20921+
20922+RET_ENDP
20923+BEGIN(dec_return_unchecked)
20924+ movl (v), %eax
20925+ movl 4(v), %edx
20926+ subl $1, %eax
20927+ sbbl $0, %edx
20928 movl %eax, (v)
20929 movl %edx, 4(v)
20930 RET_ENDP
20931@@ -143,6 +286,13 @@ BEGIN(add_unless)
20932 adcl %edx, %edi
20933 addl (v), %eax
20934 adcl 4(v), %edx
20935+
20936+#ifdef CONFIG_PAX_REFCOUNT
20937+ into
20938+1234:
20939+ _ASM_EXTABLE(1234b, 2f)
20940+#endif
20941+
c6e2a6c8 20942 cmpl %eax, %ecx
66a7e928
MT
20943 je 3f
20944 1:
20945@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
20946 1:
20947 addl $1, %eax
20948 adcl $0, %edx
20949+
20950+#ifdef CONFIG_PAX_REFCOUNT
20951+ into
20952+1234:
20953+ _ASM_EXTABLE(1234b, 2f)
20954+#endif
20955+
20956 movl %eax, (v)
20957 movl %edx, 4(v)
20958 movl $1, %eax
20959@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
20960 movl 4(v), %edx
20961 subl $1, %eax
20962 sbbl $0, %edx
20963+
20964+#ifdef CONFIG_PAX_REFCOUNT
20965+ into
20966+1234:
20967+ _ASM_EXTABLE(1234b, 1f)
20968+#endif
20969+
20970 js 1f
20971 movl %eax, (v)
20972 movl %edx, 4(v)
fe2de317 20973diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
c6e2a6c8 20974index f5cc9eb..51fa319 100644
fe2de317
MT
20975--- a/arch/x86/lib/atomic64_cx8_32.S
20976+++ b/arch/x86/lib/atomic64_cx8_32.S
6e9df6a3 20977@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
15a11c5b
MT
20978 CFI_STARTPROC
20979
20980 read64 %ecx
6e9df6a3 20981+ pax_force_retaddr
15a11c5b 20982 ret
8308f9c9
MT
20983 CFI_ENDPROC
20984 ENDPROC(atomic64_read_cx8)
20985
20986+ENTRY(atomic64_read_unchecked_cx8)
20987+ CFI_STARTPROC
20988+
20989+ read64 %ecx
6e9df6a3 20990+ pax_force_retaddr
8308f9c9
MT
20991+ ret
20992+ CFI_ENDPROC
66a7e928 20993+ENDPROC(atomic64_read_unchecked_cx8)
8308f9c9
MT
20994+
20995 ENTRY(atomic64_set_cx8)
20996 CFI_STARTPROC
20997
6e9df6a3 20998@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
15a11c5b
MT
20999 cmpxchg8b (%esi)
21000 jne 1b
21001
6e9df6a3 21002+ pax_force_retaddr
15a11c5b 21003 ret
66a7e928
MT
21004 CFI_ENDPROC
21005 ENDPROC(atomic64_set_cx8)
21006
21007+ENTRY(atomic64_set_unchecked_cx8)
21008+ CFI_STARTPROC
21009+
21010+1:
21011+/* we don't need LOCK_PREFIX since aligned 64-bit writes
21012+ * are atomic on 586 and newer */
21013+ cmpxchg8b (%esi)
21014+ jne 1b
21015+
6e9df6a3 21016+ pax_force_retaddr
66a7e928
MT
21017+ ret
21018+ CFI_ENDPROC
21019+ENDPROC(atomic64_set_unchecked_cx8)
21020+
21021 ENTRY(atomic64_xchg_cx8)
21022 CFI_STARTPROC
21023
c6e2a6c8 21024@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
15a11c5b
MT
21025 cmpxchg8b (%esi)
21026 jne 1b
21027
6e9df6a3 21028+ pax_force_retaddr
15a11c5b 21029 ret
8308f9c9
MT
21030 CFI_ENDPROC
21031 ENDPROC(atomic64_xchg_cx8)
21032
21033-.macro addsub_return func ins insc
21034-ENTRY(atomic64_\func\()_return_cx8)
21035+.macro addsub_return func ins insc unchecked=""
21036+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21037 CFI_STARTPROC
21038 SAVE ebp
21039 SAVE ebx
c6e2a6c8 21040@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
6892158b
MT
21041 movl %edx, %ecx
21042 \ins\()l %esi, %ebx
21043 \insc\()l %edi, %ecx
21044+
8308f9c9 21045+.ifb \unchecked
6892158b
MT
21046+#ifdef CONFIG_PAX_REFCOUNT
21047+ into
21048+2:
21049+ _ASM_EXTABLE(2b, 3f)
21050+#endif
8308f9c9 21051+.endif
6892158b
MT
21052+
21053 LOCK_PREFIX
21054 cmpxchg8b (%ebp)
21055 jne 1b
21056-
21057-10:
21058 movl %ebx, %eax
21059 movl %ecx, %edx
21060+
8308f9c9 21061+.ifb \unchecked
6892158b
MT
21062+#ifdef CONFIG_PAX_REFCOUNT
21063+3:
21064+#endif
8308f9c9 21065+.endif
6892158b
MT
21066+
21067 RESTORE edi
21068 RESTORE esi
21069 RESTORE ebx
66a7e928 21070 RESTORE ebp
6e9df6a3 21071+ pax_force_retaddr
66a7e928
MT
21072 ret
21073 CFI_ENDPROC
21074-ENDPROC(atomic64_\func\()_return_cx8)
21075+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21076 .endm
8308f9c9
MT
21077
21078 addsub_return add add adc
21079 addsub_return sub sub sbb
21080+addsub_return add add adc _unchecked
66a7e928 21081+addsub_return sub sub sbb _unchecked
8308f9c9
MT
21082
21083-.macro incdec_return func ins insc
21084-ENTRY(atomic64_\func\()_return_cx8)
5e856224 21085+.macro incdec_return func ins insc unchecked=""
8308f9c9
MT
21086+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
21087 CFI_STARTPROC
21088 SAVE ebx
21089
c6e2a6c8 21090@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
6892158b
MT
21091 movl %edx, %ecx
21092 \ins\()l $1, %ebx
21093 \insc\()l $0, %ecx
21094+
8308f9c9 21095+.ifb \unchecked
6892158b
MT
21096+#ifdef CONFIG_PAX_REFCOUNT
21097+ into
21098+2:
21099+ _ASM_EXTABLE(2b, 3f)
21100+#endif
8308f9c9 21101+.endif
6892158b
MT
21102+
21103 LOCK_PREFIX
21104 cmpxchg8b (%esi)
21105 jne 1b
21106
21107-10:
21108 movl %ebx, %eax
21109 movl %ecx, %edx
21110+
8308f9c9 21111+.ifb \unchecked
6892158b
MT
21112+#ifdef CONFIG_PAX_REFCOUNT
21113+3:
21114+#endif
8308f9c9 21115+.endif
6892158b
MT
21116+
21117 RESTORE ebx
6e9df6a3 21118+ pax_force_retaddr
6892158b
MT
21119 ret
21120 CFI_ENDPROC
66a7e928
MT
21121-ENDPROC(atomic64_\func\()_return_cx8)
21122+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
21123 .endm
8308f9c9
MT
21124
21125 incdec_return inc add adc
21126 incdec_return dec sub sbb
21127+incdec_return inc add adc _unchecked
66a7e928 21128+incdec_return dec sub sbb _unchecked
8308f9c9
MT
21129
21130 ENTRY(atomic64_dec_if_positive_cx8)
21131 CFI_STARTPROC
c6e2a6c8 21132@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
66a7e928
MT
21133 movl %edx, %ecx
21134 subl $1, %ebx
21135 sbb $0, %ecx
21136+
21137+#ifdef CONFIG_PAX_REFCOUNT
21138+ into
21139+1234:
21140+ _ASM_EXTABLE(1234b, 2f)
21141+#endif
21142+
21143 js 2f
21144 LOCK_PREFIX
21145 cmpxchg8b (%esi)
c6e2a6c8 21146@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
15a11c5b
MT
21147 movl %ebx, %eax
21148 movl %ecx, %edx
21149 RESTORE ebx
6e9df6a3 21150+ pax_force_retaddr
15a11c5b
MT
21151 ret
21152 CFI_ENDPROC
21153 ENDPROC(atomic64_dec_if_positive_cx8)
c6e2a6c8 21154@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
6892158b 21155 movl %edx, %ecx
c6e2a6c8 21156 addl %ebp, %ebx
6892158b
MT
21157 adcl %edi, %ecx
21158+
21159+#ifdef CONFIG_PAX_REFCOUNT
21160+ into
21161+1234:
66a7e928 21162+ _ASM_EXTABLE(1234b, 3f)
6892158b
MT
21163+#endif
21164+
21165 LOCK_PREFIX
c6e2a6c8 21166 cmpxchg8b (%esi)
6892158b 21167 jne 1b
c6e2a6c8 21168@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
15a11c5b
MT
21169 CFI_ADJUST_CFA_OFFSET -8
21170 RESTORE ebx
21171 RESTORE ebp
6e9df6a3 21172+ pax_force_retaddr
15a11c5b
MT
21173 ret
21174 4:
21175 cmpl %edx, 4(%esp)
c6e2a6c8
MT
21176@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
21177 xorl %ecx, %ecx
6892158b 21178 addl $1, %ebx
c6e2a6c8 21179 adcl %edx, %ecx
6892158b
MT
21180+
21181+#ifdef CONFIG_PAX_REFCOUNT
21182+ into
21183+1234:
66a7e928 21184+ _ASM_EXTABLE(1234b, 3f)
6892158b
MT
21185+#endif
21186+
21187 LOCK_PREFIX
21188 cmpxchg8b (%esi)
21189 jne 1b
c6e2a6c8 21190@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
15a11c5b
MT
21191 movl $1, %eax
21192 3:
21193 RESTORE ebx
6e9df6a3 21194+ pax_force_retaddr
15a11c5b 21195 ret
c6e2a6c8
MT
21196 CFI_ENDPROC
21197 ENDPROC(atomic64_inc_not_zero_cx8)
fe2de317
MT
21198diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21199index 78d16a5..fbcf666 100644
21200--- a/arch/x86/lib/checksum_32.S
21201+++ b/arch/x86/lib/checksum_32.S
58c5fc13
MT
21202@@ -28,7 +28,8 @@
21203 #include <linux/linkage.h>
21204 #include <asm/dwarf2.h>
21205 #include <asm/errno.h>
21206-
21207+#include <asm/segment.h>
21208+
21209 /*
21210 * computes a partial checksum, e.g. for TCP/UDP fragments
21211 */
fe2de317 21212@@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
58c5fc13
MT
21213
21214 #define ARGBASE 16
21215 #define FP 12
21216-
21217-ENTRY(csum_partial_copy_generic)
21218+
21219+ENTRY(csum_partial_copy_generic_to_user)
21220 CFI_STARTPROC
bc901d79
MT
21221+
21222+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
21223+ pushl_cfi %gs
21224+ popl_cfi %es
58c5fc13 21225+ jmp csum_partial_copy_generic
bc901d79 21226+#endif
58c5fc13
MT
21227+
21228+ENTRY(csum_partial_copy_generic_from_user)
bc901d79
MT
21229+
21230+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
21231+ pushl_cfi %gs
21232+ popl_cfi %ds
bc901d79 21233+#endif
58c5fc13
MT
21234+
21235+ENTRY(csum_partial_copy_generic)
21236 subl $4,%esp
21237 CFI_ADJUST_CFA_OFFSET 4
66a7e928
MT
21238 pushl_cfi %edi
21239@@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
58c5fc13
MT
21240 jmp 4f
21241 SRC(1: movw (%esi), %bx )
21242 addl $2, %esi
21243-DST( movw %bx, (%edi) )
21244+DST( movw %bx, %es:(%edi) )
21245 addl $2, %edi
21246 addw %bx, %ax
21247 adcl $0, %eax
66a7e928 21248@@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
58c5fc13
MT
21249 SRC(1: movl (%esi), %ebx )
21250 SRC( movl 4(%esi), %edx )
21251 adcl %ebx, %eax
21252-DST( movl %ebx, (%edi) )
21253+DST( movl %ebx, %es:(%edi) )
21254 adcl %edx, %eax
21255-DST( movl %edx, 4(%edi) )
21256+DST( movl %edx, %es:4(%edi) )
21257
21258 SRC( movl 8(%esi), %ebx )
21259 SRC( movl 12(%esi), %edx )
21260 adcl %ebx, %eax
21261-DST( movl %ebx, 8(%edi) )
21262+DST( movl %ebx, %es:8(%edi) )
21263 adcl %edx, %eax
21264-DST( movl %edx, 12(%edi) )
21265+DST( movl %edx, %es:12(%edi) )
21266
21267 SRC( movl 16(%esi), %ebx )
21268 SRC( movl 20(%esi), %edx )
21269 adcl %ebx, %eax
21270-DST( movl %ebx, 16(%edi) )
21271+DST( movl %ebx, %es:16(%edi) )
21272 adcl %edx, %eax
21273-DST( movl %edx, 20(%edi) )
21274+DST( movl %edx, %es:20(%edi) )
21275
21276 SRC( movl 24(%esi), %ebx )
21277 SRC( movl 28(%esi), %edx )
21278 adcl %ebx, %eax
21279-DST( movl %ebx, 24(%edi) )
21280+DST( movl %ebx, %es:24(%edi) )
21281 adcl %edx, %eax
21282-DST( movl %edx, 28(%edi) )
21283+DST( movl %edx, %es:28(%edi) )
21284
21285 lea 32(%esi), %esi
21286 lea 32(%edi), %edi
66a7e928 21287@@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
58c5fc13
MT
21288 shrl $2, %edx # This clears CF
21289 SRC(3: movl (%esi), %ebx )
21290 adcl %ebx, %eax
21291-DST( movl %ebx, (%edi) )
21292+DST( movl %ebx, %es:(%edi) )
21293 lea 4(%esi), %esi
21294 lea 4(%edi), %edi
21295 dec %edx
66a7e928 21296@@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
58c5fc13
MT
21297 jb 5f
21298 SRC( movw (%esi), %cx )
21299 leal 2(%esi), %esi
21300-DST( movw %cx, (%edi) )
21301+DST( movw %cx, %es:(%edi) )
21302 leal 2(%edi), %edi
21303 je 6f
21304 shll $16,%ecx
21305 SRC(5: movb (%esi), %cl )
21306-DST( movb %cl, (%edi) )
21307+DST( movb %cl, %es:(%edi) )
21308 6: addl %ecx, %eax
21309 adcl $0, %eax
21310 7:
66a7e928 21311@@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
58c5fc13
MT
21312
21313 6001:
21314 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21315- movl $-EFAULT, (%ebx)
21316+ movl $-EFAULT, %ss:(%ebx)
21317
21318 # zero the complete destination - computing the rest
21319 # is too much work
66a7e928 21320@@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
58c5fc13
MT
21321
21322 6002:
21323 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21324- movl $-EFAULT,(%ebx)
21325+ movl $-EFAULT,%ss:(%ebx)
21326 jmp 5000b
21327
21328 .previous
21329
66a7e928
MT
21330+ pushl_cfi %ss
21331+ popl_cfi %ds
21332+ pushl_cfi %ss
21333+ popl_cfi %es
21334 popl_cfi %ebx
58c5fc13 21335 CFI_RESTORE ebx
66a7e928
MT
21336 popl_cfi %esi
21337@@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
21338 popl_cfi %ecx # equivalent to addl $4,%esp
58c5fc13
MT
21339 ret
21340 CFI_ENDPROC
21341-ENDPROC(csum_partial_copy_generic)
21342+ENDPROC(csum_partial_copy_generic_to_user)
21343
21344 #else
21345
21346 /* Version for PentiumII/PPro */
21347
21348 #define ROUND1(x) \
21349+ nop; nop; nop; \
21350 SRC(movl x(%esi), %ebx ) ; \
21351 addl %ebx, %eax ; \
21352- DST(movl %ebx, x(%edi) ) ;
21353+ DST(movl %ebx, %es:x(%edi)) ;
21354
21355 #define ROUND(x) \
21356+ nop; nop; nop; \
21357 SRC(movl x(%esi), %ebx ) ; \
21358 adcl %ebx, %eax ; \
21359- DST(movl %ebx, x(%edi) ) ;
21360+ DST(movl %ebx, %es:x(%edi)) ;
21361
21362 #define ARGBASE 12
21363-
21364-ENTRY(csum_partial_copy_generic)
21365+
21366+ENTRY(csum_partial_copy_generic_to_user)
21367 CFI_STARTPROC
bc901d79
MT
21368+
21369+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
21370+ pushl_cfi %gs
21371+ popl_cfi %es
58c5fc13 21372+ jmp csum_partial_copy_generic
bc901d79 21373+#endif
58c5fc13
MT
21374+
21375+ENTRY(csum_partial_copy_generic_from_user)
bc901d79
MT
21376+
21377+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
21378+ pushl_cfi %gs
21379+ popl_cfi %ds
bc901d79 21380+#endif
58c5fc13
MT
21381+
21382+ENTRY(csum_partial_copy_generic)
66a7e928 21383 pushl_cfi %ebx
58c5fc13 21384 CFI_REL_OFFSET ebx, 0
66a7e928
MT
21385 pushl_cfi %edi
21386@@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
58c5fc13
MT
21387 subl %ebx, %edi
21388 lea -1(%esi),%edx
21389 andl $-32,%edx
21390- lea 3f(%ebx,%ebx), %ebx
21391+ lea 3f(%ebx,%ebx,2), %ebx
21392 testl %esi, %esi
21393 jmp *%ebx
21394 1: addl $64,%esi
66a7e928 21395@@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
58c5fc13
MT
21396 jb 5f
21397 SRC( movw (%esi), %dx )
21398 leal 2(%esi), %esi
21399-DST( movw %dx, (%edi) )
21400+DST( movw %dx, %es:(%edi) )
21401 leal 2(%edi), %edi
21402 je 6f
21403 shll $16,%edx
21404 5:
21405 SRC( movb (%esi), %dl )
21406-DST( movb %dl, (%edi) )
21407+DST( movb %dl, %es:(%edi) )
21408 6: addl %edx, %eax
21409 adcl $0, %eax
21410 7:
21411 .section .fixup, "ax"
21412 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21413- movl $-EFAULT, (%ebx)
21414+ movl $-EFAULT, %ss:(%ebx)
21415 # zero the complete destination (computing the rest is too much work)
21416 movl ARGBASE+8(%esp),%edi # dst
21417 movl ARGBASE+12(%esp),%ecx # len
66a7e928 21418@@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
58c5fc13
MT
21419 rep; stosb
21420 jmp 7b
21421 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21422- movl $-EFAULT, (%ebx)
21423+ movl $-EFAULT, %ss:(%ebx)
21424 jmp 7b
21425 .previous
21426
bc901d79 21427+#ifdef CONFIG_PAX_MEMORY_UDEREF
66a7e928
MT
21428+ pushl_cfi %ss
21429+ popl_cfi %ds
21430+ pushl_cfi %ss
21431+ popl_cfi %es
bc901d79
MT
21432+#endif
21433+
66a7e928 21434 popl_cfi %esi
58c5fc13 21435 CFI_RESTORE esi
66a7e928
MT
21436 popl_cfi %edi
21437@@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
58c5fc13
MT
21438 CFI_RESTORE ebx
21439 ret
21440 CFI_ENDPROC
21441-ENDPROC(csum_partial_copy_generic)
21442+ENDPROC(csum_partial_copy_generic_to_user)
21443
21444 #undef ROUND
21445 #undef ROUND1
fe2de317
MT
21446diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21447index f2145cf..cea889d 100644
21448--- a/arch/x86/lib/clear_page_64.S
21449+++ b/arch/x86/lib/clear_page_64.S
6e9df6a3 21450@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
15a11c5b
MT
21451 movl $4096/8,%ecx
21452 xorl %eax,%eax
21453 rep stosq
6e9df6a3 21454+ pax_force_retaddr
15a11c5b
MT
21455 ret
21456 CFI_ENDPROC
21457 ENDPROC(clear_page_c)
6e9df6a3 21458@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
15a11c5b
MT
21459 movl $4096,%ecx
21460 xorl %eax,%eax
21461 rep stosb
6e9df6a3 21462+ pax_force_retaddr
15a11c5b
MT
21463 ret
21464 CFI_ENDPROC
21465 ENDPROC(clear_page_c_e)
6e9df6a3 21466@@ -43,6 +45,7 @@ ENTRY(clear_page)
15a11c5b
MT
21467 leaq 64(%rdi),%rdi
21468 jnz .Lloop
21469 nop
6e9df6a3 21470+ pax_force_retaddr
15a11c5b
MT
21471 ret
21472 CFI_ENDPROC
21473 .Lclear_page_end:
6e9df6a3 21474@@ -58,7 +61,7 @@ ENDPROC(clear_page)
58c5fc13
MT
21475
21476 #include <asm/cpufeature.h>
21477
21478- .section .altinstr_replacement,"ax"
21479+ .section .altinstr_replacement,"a"
21480 1: .byte 0xeb /* jmp <disp8> */
21481 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
15a11c5b 21482 2: .byte 0xeb /* jmp <disp8> */
fe2de317
MT
21483diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
21484index 1e572c5..2a162cd 100644
21485--- a/arch/x86/lib/cmpxchg16b_emu.S
21486+++ b/arch/x86/lib/cmpxchg16b_emu.S
6e9df6a3
MT
21487@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
21488
21489 popf
21490 mov $1, %al
21491+ pax_force_retaddr
21492 ret
21493
21494 not_same:
21495 popf
21496 xor %al,%al
21497+ pax_force_retaddr
21498 ret
21499
21500 CFI_ENDPROC
fe2de317 21501diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
c6e2a6c8 21502index 6b34d04..dccb07f 100644
fe2de317
MT
21503--- a/arch/x86/lib/copy_page_64.S
21504+++ b/arch/x86/lib/copy_page_64.S
6e9df6a3 21505@@ -9,6 +9,7 @@ copy_page_c:
15a11c5b
MT
21506 CFI_STARTPROC
21507 movl $4096/8,%ecx
21508 rep movsq
6e9df6a3 21509+ pax_force_retaddr
15a11c5b
MT
21510 ret
21511 CFI_ENDPROC
21512 ENDPROC(copy_page_c)
c6e2a6c8
MT
21513@@ -20,12 +21,14 @@ ENDPROC(copy_page_c)
21514
21515 ENTRY(copy_page)
21516 CFI_STARTPROC
21517- subq $2*8,%rsp
21518- CFI_ADJUST_CFA_OFFSET 2*8
21519+ subq $3*8,%rsp
21520+ CFI_ADJUST_CFA_OFFSET 3*8
21521 movq %rbx,(%rsp)
21522 CFI_REL_OFFSET rbx, 0
21523 movq %r12,1*8(%rsp)
21524 CFI_REL_OFFSET r12, 1*8
21525+ movq %r13,2*8(%rsp)
21526+ CFI_REL_OFFSET r13, 2*8
21527
21528 movl $(4096/64)-5,%ecx
21529 .p2align 4
21530@@ -37,7 +40,7 @@ ENTRY(copy_page)
fe2de317
MT
21531 movq 16 (%rsi), %rdx
21532 movq 24 (%rsi), %r8
21533 movq 32 (%rsi), %r9
21534- movq 40 (%rsi), %r10
21535+ movq 40 (%rsi), %r13
21536 movq 48 (%rsi), %r11
21537 movq 56 (%rsi), %r12
21538
c6e2a6c8 21539@@ -48,7 +51,7 @@ ENTRY(copy_page)
fe2de317
MT
21540 movq %rdx, 16 (%rdi)
21541 movq %r8, 24 (%rdi)
21542 movq %r9, 32 (%rdi)
21543- movq %r10, 40 (%rdi)
21544+ movq %r13, 40 (%rdi)
21545 movq %r11, 48 (%rdi)
21546 movq %r12, 56 (%rdi)
21547
c6e2a6c8 21548@@ -67,7 +70,7 @@ ENTRY(copy_page)
fe2de317
MT
21549 movq 16 (%rsi), %rdx
21550 movq 24 (%rsi), %r8
21551 movq 32 (%rsi), %r9
21552- movq 40 (%rsi), %r10
21553+ movq 40 (%rsi), %r13
21554 movq 48 (%rsi), %r11
21555 movq 56 (%rsi), %r12
21556
c6e2a6c8 21557@@ -76,7 +79,7 @@ ENTRY(copy_page)
fe2de317
MT
21558 movq %rdx, 16 (%rdi)
21559 movq %r8, 24 (%rdi)
21560 movq %r9, 32 (%rdi)
21561- movq %r10, 40 (%rdi)
21562+ movq %r13, 40 (%rdi)
21563 movq %r11, 48 (%rdi)
21564 movq %r12, 56 (%rdi)
21565
c6e2a6c8
MT
21566@@ -89,8 +92,11 @@ ENTRY(copy_page)
21567 CFI_RESTORE rbx
21568 movq 1*8(%rsp),%r12
21569 CFI_RESTORE r12
21570- addq $2*8,%rsp
21571- CFI_ADJUST_CFA_OFFSET -2*8
21572+ movq 2*8(%rsp),%r13
21573+ CFI_RESTORE r13
21574+ addq $3*8,%rsp
21575+ CFI_ADJUST_CFA_OFFSET -3*8
6e9df6a3 21576+ pax_force_retaddr
15a11c5b
MT
21577 ret
21578 .Lcopy_page_end:
21579 CFI_ENDPROC
c6e2a6c8 21580@@ -101,7 +107,7 @@ ENDPROC(copy_page)
58c5fc13
MT
21581
21582 #include <asm/cpufeature.h>
21583
21584- .section .altinstr_replacement,"ax"
21585+ .section .altinstr_replacement,"a"
21586 1: .byte 0xeb /* jmp <disp8> */
21587 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21588 2:
fe2de317
MT
21589diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21590index 0248402..821c786 100644
21591--- a/arch/x86/lib/copy_user_64.S
21592+++ b/arch/x86/lib/copy_user_64.S
15a11c5b 21593@@ -16,6 +16,7 @@
df50ba0c
MT
21594 #include <asm/thread_info.h>
21595 #include <asm/cpufeature.h>
15a11c5b 21596 #include <asm/alternative-asm.h>
df50ba0c
MT
21597+#include <asm/pgtable.h>
21598
15a11c5b
MT
21599 /*
21600 * By placing feature2 after feature1 in altinstructions section, we logically
21601@@ -29,7 +30,7 @@
58c5fc13
MT
21602 .byte 0xe9 /* 32bit jump */
21603 .long \orig-1f /* by default jump to orig */
21604 1:
21605- .section .altinstr_replacement,"ax"
21606+ .section .altinstr_replacement,"a"
21607 2: .byte 0xe9 /* near jump with 32bit immediate */
15a11c5b
MT
21608 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
21609 3: .byte 0xe9 /* near jump with 32bit immediate */
6e9df6a3 21610@@ -71,47 +72,20 @@
58c5fc13
MT
21611 #endif
21612 .endm
21613
21614-/* Standard copy_to_user with segment limit checking */
ae4e228f 21615-ENTRY(_copy_to_user)
58c5fc13
MT
21616- CFI_STARTPROC
21617- GET_THREAD_INFO(%rax)
21618- movq %rdi,%rcx
21619- addq %rdx,%rcx
21620- jc bad_to_user
21621- cmpq TI_addr_limit(%rax),%rcx
15a11c5b
MT
21622- ja bad_to_user
21623- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21624- copy_user_generic_unrolled,copy_user_generic_string, \
21625- copy_user_enhanced_fast_string
58c5fc13 21626- CFI_ENDPROC
ae4e228f 21627-ENDPROC(_copy_to_user)
58c5fc13
MT
21628-
21629-/* Standard copy_from_user with segment limit checking */
ae4e228f 21630-ENTRY(_copy_from_user)
58c5fc13
MT
21631- CFI_STARTPROC
21632- GET_THREAD_INFO(%rax)
21633- movq %rsi,%rcx
21634- addq %rdx,%rcx
21635- jc bad_from_user
21636- cmpq TI_addr_limit(%rax),%rcx
15a11c5b
MT
21637- ja bad_from_user
21638- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
21639- copy_user_generic_unrolled,copy_user_generic_string, \
21640- copy_user_enhanced_fast_string
58c5fc13 21641- CFI_ENDPROC
ae4e228f 21642-ENDPROC(_copy_from_user)
58c5fc13 21643-
df50ba0c
MT
21644 .section .fixup,"ax"
21645 /* must zero dest */
58c5fc13
MT
21646 ENTRY(bad_from_user)
21647 bad_from_user:
21648 CFI_STARTPROC
21649+ testl %edx,%edx
21650+ js bad_to_user
21651 movl %edx,%ecx
21652 xorl %eax,%eax
21653 rep
15a11c5b
MT
21654 stosb
21655 bad_to_user:
21656 movl %edx,%eax
6e9df6a3 21657+ pax_force_retaddr
15a11c5b
MT
21658 ret
21659 CFI_ENDPROC
21660 ENDPROC(bad_from_user)
fe2de317
MT
21661@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
21662 jz 17f
21663 1: movq (%rsi),%r8
21664 2: movq 1*8(%rsi),%r9
21665-3: movq 2*8(%rsi),%r10
21666+3: movq 2*8(%rsi),%rax
21667 4: movq 3*8(%rsi),%r11
21668 5: movq %r8,(%rdi)
21669 6: movq %r9,1*8(%rdi)
21670-7: movq %r10,2*8(%rdi)
21671+7: movq %rax,2*8(%rdi)
21672 8: movq %r11,3*8(%rdi)
21673 9: movq 4*8(%rsi),%r8
21674 10: movq 5*8(%rsi),%r9
21675-11: movq 6*8(%rsi),%r10
21676+11: movq 6*8(%rsi),%rax
21677 12: movq 7*8(%rsi),%r11
21678 13: movq %r8,4*8(%rdi)
21679 14: movq %r9,5*8(%rdi)
21680-15: movq %r10,6*8(%rdi)
21681+15: movq %rax,6*8(%rdi)
21682 16: movq %r11,7*8(%rdi)
21683 leaq 64(%rsi),%rsi
21684 leaq 64(%rdi),%rdi
6e9df6a3 21685@@ -179,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
15a11c5b
MT
21686 decl %ecx
21687 jnz 21b
21688 23: xor %eax,%eax
6e9df6a3 21689+ pax_force_retaddr
15a11c5b
MT
21690 ret
21691
21692 .section .fixup,"ax"
6e9df6a3 21693@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
15a11c5b
MT
21694 3: rep
21695 movsb
21696 4: xorl %eax,%eax
6e9df6a3 21697+ pax_force_retaddr
15a11c5b
MT
21698 ret
21699
21700 .section .fixup,"ax"
6e9df6a3 21701@@ -287,6 +263,7 @@ ENTRY(copy_user_enhanced_fast_string)
15a11c5b
MT
21702 1: rep
21703 movsb
21704 2: xorl %eax,%eax
6e9df6a3 21705+ pax_force_retaddr
15a11c5b
MT
21706 ret
21707
21708 .section .fixup,"ax"
fe2de317
MT
21709diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
21710index cb0c112..e3a6895 100644
21711--- a/arch/x86/lib/copy_user_nocache_64.S
21712+++ b/arch/x86/lib/copy_user_nocache_64.S
6e9df6a3
MT
21713@@ -8,12 +8,14 @@
21714
21715 #include <linux/linkage.h>
21716 #include <asm/dwarf2.h>
21717+#include <asm/alternative-asm.h>
21718
21719 #define FIX_ALIGNMENT 1
21720
df50ba0c
MT
21721 #include <asm/current.h>
21722 #include <asm/asm-offsets.h>
21723 #include <asm/thread_info.h>
21724+#include <asm/pgtable.h>
21725
21726 .macro ALIGN_DESTINATION
21727 #ifdef FIX_ALIGNMENT
6e9df6a3 21728@@ -50,6 +52,15 @@
df50ba0c
MT
21729 */
21730 ENTRY(__copy_user_nocache)
21731 CFI_STARTPROC
21732+
21733+#ifdef CONFIG_PAX_MEMORY_UDEREF
21734+ mov $PAX_USER_SHADOW_BASE,%rcx
21735+ cmp %rcx,%rsi
21736+ jae 1f
21737+ add %rcx,%rsi
21738+1:
21739+#endif
21740+
21741 cmpl $8,%edx
21742 jb 20f /* less then 8 bytes, go to byte copy loop */
21743 ALIGN_DESTINATION
fe2de317
MT
21744@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
21745 jz 17f
21746 1: movq (%rsi),%r8
21747 2: movq 1*8(%rsi),%r9
21748-3: movq 2*8(%rsi),%r10
21749+3: movq 2*8(%rsi),%rax
21750 4: movq 3*8(%rsi),%r11
21751 5: movnti %r8,(%rdi)
21752 6: movnti %r9,1*8(%rdi)
21753-7: movnti %r10,2*8(%rdi)
21754+7: movnti %rax,2*8(%rdi)
21755 8: movnti %r11,3*8(%rdi)
21756 9: movq 4*8(%rsi),%r8
21757 10: movq 5*8(%rsi),%r9
21758-11: movq 6*8(%rsi),%r10
21759+11: movq 6*8(%rsi),%rax
21760 12: movq 7*8(%rsi),%r11
21761 13: movnti %r8,4*8(%rdi)
21762 14: movnti %r9,5*8(%rdi)
21763-15: movnti %r10,6*8(%rdi)
21764+15: movnti %rax,6*8(%rdi)
21765 16: movnti %r11,7*8(%rdi)
21766 leaq 64(%rsi),%rsi
21767 leaq 64(%rdi),%rdi
6e9df6a3 21768@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
15a11c5b
MT
21769 jnz 21b
21770 23: xorl %eax,%eax
21771 sfence
6e9df6a3 21772+ pax_force_retaddr
15a11c5b
MT
21773 ret
21774
21775 .section .fixup,"ax"
fe2de317
MT
21776diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
21777index fb903b7..c92b7f7 100644
21778--- a/arch/x86/lib/csum-copy_64.S
21779+++ b/arch/x86/lib/csum-copy_64.S
6e9df6a3
MT
21780@@ -8,6 +8,7 @@
21781 #include <linux/linkage.h>
21782 #include <asm/dwarf2.h>
21783 #include <asm/errno.h>
21784+#include <asm/alternative-asm.h>
21785
21786 /*
21787 * Checksum copy with exception handling.
21788@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
15a11c5b
MT
21789 CFI_RESTORE rbp
21790 addq $7*8, %rsp
21791 CFI_ADJUST_CFA_OFFSET -7*8
fe2de317 21792+ pax_force_retaddr 0, 1
15a11c5b
MT
21793 ret
21794 CFI_RESTORE_STATE
21795
fe2de317
MT
21796diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
21797index 459b58a..9570bc7 100644
21798--- a/arch/x86/lib/csum-wrappers_64.c
21799+++ b/arch/x86/lib/csum-wrappers_64.c
21800@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
df50ba0c
MT
21801 len -= 2;
21802 }
21803 }
6e9df6a3 21804- isum = csum_partial_copy_generic((__force const void *)src,
8308f9c9
MT
21805+
21806+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
21807+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
21808+ src += PAX_USER_SHADOW_BASE;
8308f9c9
MT
21809+#endif
21810+
6e9df6a3 21811+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
df50ba0c
MT
21812 dst, len, isum, errp, NULL);
21813 if (unlikely(*errp))
6e9df6a3 21814 goto out_err;
fe2de317 21815@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
df50ba0c
MT
21816 }
21817
21818 *errp = 0;
6e9df6a3 21819- return csum_partial_copy_generic(src, (void __force *)dst,
8308f9c9
MT
21820+
21821+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
21822+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
21823+ dst += PAX_USER_SHADOW_BASE;
8308f9c9
MT
21824+#endif
21825+
6e9df6a3 21826+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
df50ba0c
MT
21827 len, isum, NULL, errp);
21828 }
6e9df6a3 21829 EXPORT_SYMBOL(csum_partial_copy_to_user);
fe2de317
MT
21830diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
21831index 51f1504..ddac4c1 100644
21832--- a/arch/x86/lib/getuser.S
21833+++ b/arch/x86/lib/getuser.S
6e9df6a3 21834@@ -33,15 +33,38 @@
58c5fc13
MT
21835 #include <asm/asm-offsets.h>
21836 #include <asm/thread_info.h>
21837 #include <asm/asm.h>
21838+#include <asm/segment.h>
df50ba0c 21839+#include <asm/pgtable.h>
6e9df6a3 21840+#include <asm/alternative-asm.h>
bc901d79
MT
21841+
21842+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16454cff 21843+#define __copyuser_seg gs;
bc901d79
MT
21844+#else
21845+#define __copyuser_seg
21846+#endif
58c5fc13
MT
21847
21848 .text
21849 ENTRY(__get_user_1)
ae4e228f 21850 CFI_STARTPROC
58c5fc13 21851+
bc901d79 21852+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
21853 GET_THREAD_INFO(%_ASM_DX)
21854 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21855 jae bad_get_user
bc901d79 21856-1: movzb (%_ASM_AX),%edx
df50ba0c
MT
21857+
21858+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21859+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21860+ cmp %_ASM_DX,%_ASM_AX
21861+ jae 1234f
21862+ add %_ASM_DX,%_ASM_AX
21863+1234:
21864+#endif
21865+
58c5fc13
MT
21866+#endif
21867+
16454cff 21868+1: __copyuser_seg movzb (%_ASM_AX),%edx
58c5fc13 21869 xor %eax,%eax
6e9df6a3 21870+ pax_force_retaddr
58c5fc13
MT
21871 ret
21872 CFI_ENDPROC
6e9df6a3
MT
21873 ENDPROC(__get_user_1)
21874@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
ae4e228f
MT
21875 ENTRY(__get_user_2)
21876 CFI_STARTPROC
21877 add $1,%_ASM_AX
58c5fc13 21878+
bc901d79 21879+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
21880 jc bad_get_user
21881 GET_THREAD_INFO(%_ASM_DX)
21882 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21883 jae bad_get_user
bc901d79 21884-2: movzwl -1(%_ASM_AX),%edx
df50ba0c
MT
21885+
21886+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21887+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21888+ cmp %_ASM_DX,%_ASM_AX
21889+ jae 1234f
21890+ add %_ASM_DX,%_ASM_AX
21891+1234:
21892+#endif
21893+
58c5fc13
MT
21894+#endif
21895+
16454cff 21896+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
58c5fc13 21897 xor %eax,%eax
6e9df6a3 21898+ pax_force_retaddr
58c5fc13
MT
21899 ret
21900 CFI_ENDPROC
6e9df6a3
MT
21901 ENDPROC(__get_user_2)
21902@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
ae4e228f
MT
21903 ENTRY(__get_user_4)
21904 CFI_STARTPROC
21905 add $3,%_ASM_AX
58c5fc13 21906+
bc901d79 21907+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
21908 jc bad_get_user
21909 GET_THREAD_INFO(%_ASM_DX)
21910 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21911 jae bad_get_user
bc901d79 21912-3: mov -3(%_ASM_AX),%edx
df50ba0c
MT
21913+
21914+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21915+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21916+ cmp %_ASM_DX,%_ASM_AX
21917+ jae 1234f
21918+ add %_ASM_DX,%_ASM_AX
21919+1234:
21920+#endif
21921+
58c5fc13
MT
21922+#endif
21923+
16454cff 21924+3: __copyuser_seg mov -3(%_ASM_AX),%edx
58c5fc13 21925 xor %eax,%eax
6e9df6a3 21926+ pax_force_retaddr
58c5fc13
MT
21927 ret
21928 CFI_ENDPROC
6e9df6a3
MT
21929 ENDPROC(__get_user_4)
21930@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
df50ba0c
MT
21931 GET_THREAD_INFO(%_ASM_DX)
21932 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
21933 jae bad_get_user
21934+
21935+#ifdef CONFIG_PAX_MEMORY_UDEREF
21936+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
21937+ cmp %_ASM_DX,%_ASM_AX
21938+ jae 1234f
21939+ add %_ASM_DX,%_ASM_AX
21940+1234:
21941+#endif
21942+
21943 4: movq -7(%_ASM_AX),%_ASM_DX
21944 xor %eax,%eax
6e9df6a3
MT
21945+ pax_force_retaddr
21946 ret
21947 CFI_ENDPROC
21948 ENDPROC(__get_user_8)
21949@@ -91,6 +152,7 @@ bad_get_user:
21950 CFI_STARTPROC
21951 xor %edx,%edx
21952 mov $(-EFAULT),%_ASM_AX
21953+ pax_force_retaddr
df50ba0c 21954 ret
6e9df6a3
MT
21955 CFI_ENDPROC
21956 END(bad_get_user)
fe2de317 21957diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
c6e2a6c8 21958index b1e6c4b..21ae8fc 100644
fe2de317
MT
21959--- a/arch/x86/lib/insn.c
21960+++ b/arch/x86/lib/insn.c
c52201e0 21961@@ -21,6 +21,11 @@
57199397
MT
21962 #include <linux/string.h>
21963 #include <asm/inat.h>
21964 #include <asm/insn.h>
c52201e0 21965+#ifdef __KERNEL__
57199397 21966+#include <asm/pgtable_types.h>
c52201e0
MT
21967+#else
21968+#define ktla_ktva(addr) addr
21969+#endif
57199397 21970
4c928ab7
MT
21971 /* Verify next sizeof(t) bytes can be on the same instruction */
21972 #define validate_next(t, insn, n) \
21973@@ -49,8 +54,8 @@
57199397
MT
21974 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
21975 {
21976 memset(insn, 0, sizeof(*insn));
21977- insn->kaddr = kaddr;
21978- insn->next_byte = kaddr;
21979+ insn->kaddr = ktla_ktva(kaddr);
21980+ insn->next_byte = ktla_ktva(kaddr);
21981 insn->x86_64 = x86_64 ? 1 : 0;
21982 insn->opnd_bytes = 4;
21983 if (x86_64)
fe2de317
MT
21984diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
21985index 05a95e7..326f2fa 100644
21986--- a/arch/x86/lib/iomap_copy_64.S
21987+++ b/arch/x86/lib/iomap_copy_64.S
6e9df6a3
MT
21988@@ -17,6 +17,7 @@
21989
21990 #include <linux/linkage.h>
21991 #include <asm/dwarf2.h>
21992+#include <asm/alternative-asm.h>
21993
21994 /*
21995 * override generic version in lib/iomap_copy.c
21996@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
15a11c5b
MT
21997 CFI_STARTPROC
21998 movl %edx,%ecx
21999 rep movsd
6e9df6a3 22000+ pax_force_retaddr
15a11c5b
MT
22001 ret
22002 CFI_ENDPROC
22003 ENDPROC(__iowrite32_copy)
fe2de317 22004diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
c6e2a6c8 22005index 1c273be..da9cc0e 100644
fe2de317
MT
22006--- a/arch/x86/lib/memcpy_64.S
22007+++ b/arch/x86/lib/memcpy_64.S
c6e2a6c8 22008@@ -33,6 +33,7 @@
15a11c5b
MT
22009 rep movsq
22010 movl %edx, %ecx
22011 rep movsb
6e9df6a3 22012+ pax_force_retaddr
15a11c5b
MT
22013 ret
22014 .Lmemcpy_e:
22015 .previous
c6e2a6c8
MT
22016@@ -49,6 +50,7 @@
22017 movq %rdi, %rax
22018 movq %rdx, %rcx
15a11c5b 22019 rep movsb
6e9df6a3 22020+ pax_force_retaddr
15a11c5b
MT
22021 ret
22022 .Lmemcpy_e_e:
22023 .previous
c6e2a6c8 22024@@ -76,13 +78,13 @@ ENTRY(memcpy)
fe2de317
MT
22025 */
22026 movq 0*8(%rsi), %r8
22027 movq 1*8(%rsi), %r9
22028- movq 2*8(%rsi), %r10
22029+ movq 2*8(%rsi), %rcx
22030 movq 3*8(%rsi), %r11
22031 leaq 4*8(%rsi), %rsi
22032
22033 movq %r8, 0*8(%rdi)
15a11c5b 22034 movq %r9, 1*8(%rdi)
fe2de317
MT
22035- movq %r10, 2*8(%rdi)
22036+ movq %rcx, 2*8(%rdi)
22037 movq %r11, 3*8(%rdi)
22038 leaq 4*8(%rdi), %rdi
22039 jae .Lcopy_forward_loop
c6e2a6c8 22040@@ -105,12 +107,12 @@ ENTRY(memcpy)
fe2de317
MT
22041 subq $0x20, %rdx
22042 movq -1*8(%rsi), %r8
22043 movq -2*8(%rsi), %r9
22044- movq -3*8(%rsi), %r10
22045+ movq -3*8(%rsi), %rcx
22046 movq -4*8(%rsi), %r11
22047 leaq -4*8(%rsi), %rsi
22048 movq %r8, -1*8(%rdi)
22049 movq %r9, -2*8(%rdi)
22050- movq %r10, -3*8(%rdi)
22051+ movq %rcx, -3*8(%rdi)
22052 movq %r11, -4*8(%rdi)
22053 leaq -4*8(%rdi), %rdi
22054 jae .Lcopy_backward_loop
c6e2a6c8 22055@@ -130,12 +132,13 @@ ENTRY(memcpy)
fe2de317
MT
22056 */
22057 movq 0*8(%rsi), %r8
22058 movq 1*8(%rsi), %r9
22059- movq -2*8(%rsi, %rdx), %r10
22060+ movq -2*8(%rsi, %rdx), %rcx
22061 movq -1*8(%rsi, %rdx), %r11
22062 movq %r8, 0*8(%rdi)
22063 movq %r9, 1*8(%rdi)
22064- movq %r10, -2*8(%rdi, %rdx)
22065+ movq %rcx, -2*8(%rdi, %rdx)
15a11c5b 22066 movq %r11, -1*8(%rdi, %rdx)
6e9df6a3 22067+ pax_force_retaddr
15a11c5b
MT
22068 retq
22069 .p2align 4
22070 .Lless_16bytes:
c6e2a6c8 22071@@ -148,6 +151,7 @@ ENTRY(memcpy)
15a11c5b
MT
22072 movq -1*8(%rsi, %rdx), %r9
22073 movq %r8, 0*8(%rdi)
22074 movq %r9, -1*8(%rdi, %rdx)
6e9df6a3 22075+ pax_force_retaddr
15a11c5b
MT
22076 retq
22077 .p2align 4
22078 .Lless_8bytes:
c6e2a6c8 22079@@ -161,6 +165,7 @@ ENTRY(memcpy)
15a11c5b
MT
22080 movl -4(%rsi, %rdx), %r8d
22081 movl %ecx, (%rdi)
22082 movl %r8d, -4(%rdi, %rdx)
6e9df6a3 22083+ pax_force_retaddr
15a11c5b
MT
22084 retq
22085 .p2align 4
22086 .Lless_3bytes:
c6e2a6c8
MT
22087@@ -179,6 +184,7 @@ ENTRY(memcpy)
22088 movb %cl, (%rdi)
15a11c5b
MT
22089
22090 .Lend:
6e9df6a3 22091+ pax_force_retaddr
15a11c5b
MT
22092 retq
22093 CFI_ENDPROC
22094 ENDPROC(memcpy)
fe2de317
MT
22095diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
22096index ee16461..c39c199 100644
22097--- a/arch/x86/lib/memmove_64.S
22098+++ b/arch/x86/lib/memmove_64.S
22099@@ -61,13 +61,13 @@ ENTRY(memmove)
22100 5:
22101 sub $0x20, %rdx
22102 movq 0*8(%rsi), %r11
22103- movq 1*8(%rsi), %r10
22104+ movq 1*8(%rsi), %rcx
22105 movq 2*8(%rsi), %r9
22106 movq 3*8(%rsi), %r8
22107 leaq 4*8(%rsi), %rsi
22108
22109 movq %r11, 0*8(%rdi)
22110- movq %r10, 1*8(%rdi)
22111+ movq %rcx, 1*8(%rdi)
22112 movq %r9, 2*8(%rdi)
22113 movq %r8, 3*8(%rdi)
22114 leaq 4*8(%rdi), %rdi
22115@@ -81,10 +81,10 @@ ENTRY(memmove)
22116 4:
22117 movq %rdx, %rcx
22118 movq -8(%rsi, %rdx), %r11
22119- lea -8(%rdi, %rdx), %r10
22120+ lea -8(%rdi, %rdx), %r9
22121 shrq $3, %rcx
22122 rep movsq
22123- movq %r11, (%r10)
22124+ movq %r11, (%r9)
22125 jmp 13f
22126 .Lmemmove_end_forward:
22127
22128@@ -95,14 +95,14 @@ ENTRY(memmove)
22129 7:
22130 movq %rdx, %rcx
22131 movq (%rsi), %r11
22132- movq %rdi, %r10
22133+ movq %rdi, %r9
22134 leaq -8(%rsi, %rdx), %rsi
22135 leaq -8(%rdi, %rdx), %rdi
22136 shrq $3, %rcx
22137 std
22138 rep movsq
22139 cld
22140- movq %r11, (%r10)
22141+ movq %r11, (%r9)
22142 jmp 13f
22143
22144 /*
22145@@ -127,13 +127,13 @@ ENTRY(memmove)
22146 8:
22147 subq $0x20, %rdx
22148 movq -1*8(%rsi), %r11
22149- movq -2*8(%rsi), %r10
22150+ movq -2*8(%rsi), %rcx
22151 movq -3*8(%rsi), %r9
22152 movq -4*8(%rsi), %r8
22153 leaq -4*8(%rsi), %rsi
22154
22155 movq %r11, -1*8(%rdi)
22156- movq %r10, -2*8(%rdi)
22157+ movq %rcx, -2*8(%rdi)
22158 movq %r9, -3*8(%rdi)
22159 movq %r8, -4*8(%rdi)
22160 leaq -4*8(%rdi), %rdi
22161@@ -151,11 +151,11 @@ ENTRY(memmove)
22162 * Move data from 16 bytes to 31 bytes.
22163 */
22164 movq 0*8(%rsi), %r11
22165- movq 1*8(%rsi), %r10
22166+ movq 1*8(%rsi), %rcx
22167 movq -2*8(%rsi, %rdx), %r9
22168 movq -1*8(%rsi, %rdx), %r8
22169 movq %r11, 0*8(%rdi)
22170- movq %r10, 1*8(%rdi)
22171+ movq %rcx, 1*8(%rdi)
22172 movq %r9, -2*8(%rdi, %rdx)
22173 movq %r8, -1*8(%rdi, %rdx)
22174 jmp 13f
22175@@ -167,9 +167,9 @@ ENTRY(memmove)
22176 * Move data from 8 bytes to 15 bytes.
22177 */
22178 movq 0*8(%rsi), %r11
22179- movq -1*8(%rsi, %rdx), %r10
22180+ movq -1*8(%rsi, %rdx), %r9
22181 movq %r11, 0*8(%rdi)
22182- movq %r10, -1*8(%rdi, %rdx)
22183+ movq %r9, -1*8(%rdi, %rdx)
22184 jmp 13f
22185 10:
22186 cmpq $4, %rdx
22187@@ -178,9 +178,9 @@ ENTRY(memmove)
22188 * Move data from 4 bytes to 7 bytes.
22189 */
22190 movl (%rsi), %r11d
22191- movl -4(%rsi, %rdx), %r10d
22192+ movl -4(%rsi, %rdx), %r9d
22193 movl %r11d, (%rdi)
22194- movl %r10d, -4(%rdi, %rdx)
22195+ movl %r9d, -4(%rdi, %rdx)
22196 jmp 13f
22197 11:
22198 cmp $2, %rdx
22199@@ -189,9 +189,9 @@ ENTRY(memmove)
22200 * Move data from 2 bytes to 3 bytes.
22201 */
22202 movw (%rsi), %r11w
22203- movw -2(%rsi, %rdx), %r10w
22204+ movw -2(%rsi, %rdx), %r9w
22205 movw %r11w, (%rdi)
22206- movw %r10w, -2(%rdi, %rdx)
22207+ movw %r9w, -2(%rdi, %rdx)
22208 jmp 13f
22209 12:
22210 cmp $1, %rdx
6e9df6a3 22211@@ -202,6 +202,7 @@ ENTRY(memmove)
15a11c5b
MT
22212 movb (%rsi), %r11b
22213 movb %r11b, (%rdi)
22214 13:
6e9df6a3 22215+ pax_force_retaddr
15a11c5b
MT
22216 retq
22217 CFI_ENDPROC
22218
6e9df6a3 22219@@ -210,6 +211,7 @@ ENTRY(memmove)
15a11c5b
MT
22220 /* Forward moving data. */
22221 movq %rdx, %rcx
22222 rep movsb
6e9df6a3 22223+ pax_force_retaddr
15a11c5b
MT
22224 retq
22225 .Lmemmove_end_forward_efs:
22226 .previous
fe2de317 22227diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
c6e2a6c8 22228index 2dcb380..963660a 100644
fe2de317
MT
22229--- a/arch/x86/lib/memset_64.S
22230+++ b/arch/x86/lib/memset_64.S
c6e2a6c8
MT
22231@@ -30,6 +30,7 @@
22232 movl %edx,%ecx
15a11c5b
MT
22233 rep stosb
22234 movq %r9,%rax
6e9df6a3 22235+ pax_force_retaddr
15a11c5b
MT
22236 ret
22237 .Lmemset_e:
22238 .previous
c6e2a6c8
MT
22239@@ -52,6 +53,7 @@
22240 movq %rdx,%rcx
15a11c5b
MT
22241 rep stosb
22242 movq %r9,%rax
6e9df6a3 22243+ pax_force_retaddr
15a11c5b
MT
22244 ret
22245 .Lmemset_e_e:
22246 .previous
c6e2a6c8 22247@@ -59,7 +61,7 @@
fe2de317
MT
22248 ENTRY(memset)
22249 ENTRY(__memset)
22250 CFI_STARTPROC
22251- movq %rdi,%r10
c6e2a6c8 22252+ movq %rdi,%r11
fe2de317
MT
22253
22254 /* expand byte value */
22255 movzbl %sil,%ecx
c6e2a6c8 22256@@ -117,7 +119,8 @@ ENTRY(__memset)
fe2de317 22257 jnz .Lloop_1
15a11c5b
MT
22258
22259 .Lende:
fe2de317 22260- movq %r10,%rax
c6e2a6c8 22261+ movq %r11,%rax
6e9df6a3 22262+ pax_force_retaddr
15a11c5b
MT
22263 ret
22264
22265 CFI_RESTORE_STATE
fe2de317
MT
22266diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22267index c9f2d9b..e7fd2c0 100644
22268--- a/arch/x86/lib/mmx_32.c
22269+++ b/arch/x86/lib/mmx_32.c
22270@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
58c5fc13
MT
22271 {
22272 void *p;
22273 int i;
22274+ unsigned long cr0;
22275
22276 if (unlikely(in_interrupt()))
22277 return __memcpy(to, from, len);
fe2de317 22278@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
58c5fc13
MT
22279 kernel_fpu_begin();
22280
22281 __asm__ __volatile__ (
22282- "1: prefetch (%0)\n" /* This set is 28 bytes */
22283- " prefetch 64(%0)\n"
22284- " prefetch 128(%0)\n"
22285- " prefetch 192(%0)\n"
22286- " prefetch 256(%0)\n"
22287+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22288+ " prefetch 64(%1)\n"
22289+ " prefetch 128(%1)\n"
22290+ " prefetch 192(%1)\n"
22291+ " prefetch 256(%1)\n"
22292 "2: \n"
22293 ".section .fixup, \"ax\"\n"
22294- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22295+ "3: \n"
22296+
22297+#ifdef CONFIG_PAX_KERNEXEC
22298+ " movl %%cr0, %0\n"
22299+ " movl %0, %%eax\n"
22300+ " andl $0xFFFEFFFF, %%eax\n"
22301+ " movl %%eax, %%cr0\n"
22302+#endif
22303+
22304+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22305+
22306+#ifdef CONFIG_PAX_KERNEXEC
22307+ " movl %0, %%cr0\n"
22308+#endif
22309+
22310 " jmp 2b\n"
22311 ".previous\n"
22312 _ASM_EXTABLE(1b, 3b)
22313- : : "r" (from));
22314+ : "=&r" (cr0) : "r" (from) : "ax");
22315
22316 for ( ; i > 5; i--) {
22317 __asm__ __volatile__ (
22318- "1: prefetch 320(%0)\n"
22319- "2: movq (%0), %%mm0\n"
22320- " movq 8(%0), %%mm1\n"
22321- " movq 16(%0), %%mm2\n"
22322- " movq 24(%0), %%mm3\n"
22323- " movq %%mm0, (%1)\n"
22324- " movq %%mm1, 8(%1)\n"
22325- " movq %%mm2, 16(%1)\n"
22326- " movq %%mm3, 24(%1)\n"
22327- " movq 32(%0), %%mm0\n"
22328- " movq 40(%0), %%mm1\n"
22329- " movq 48(%0), %%mm2\n"
22330- " movq 56(%0), %%mm3\n"
22331- " movq %%mm0, 32(%1)\n"
22332- " movq %%mm1, 40(%1)\n"
22333- " movq %%mm2, 48(%1)\n"
22334- " movq %%mm3, 56(%1)\n"
22335+ "1: prefetch 320(%1)\n"
22336+ "2: movq (%1), %%mm0\n"
22337+ " movq 8(%1), %%mm1\n"
22338+ " movq 16(%1), %%mm2\n"
22339+ " movq 24(%1), %%mm3\n"
22340+ " movq %%mm0, (%2)\n"
22341+ " movq %%mm1, 8(%2)\n"
22342+ " movq %%mm2, 16(%2)\n"
22343+ " movq %%mm3, 24(%2)\n"
22344+ " movq 32(%1), %%mm0\n"
22345+ " movq 40(%1), %%mm1\n"
22346+ " movq 48(%1), %%mm2\n"
22347+ " movq 56(%1), %%mm3\n"
22348+ " movq %%mm0, 32(%2)\n"
22349+ " movq %%mm1, 40(%2)\n"
22350+ " movq %%mm2, 48(%2)\n"
22351+ " movq %%mm3, 56(%2)\n"
22352 ".section .fixup, \"ax\"\n"
22353- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22354+ "3:\n"
22355+
22356+#ifdef CONFIG_PAX_KERNEXEC
22357+ " movl %%cr0, %0\n"
22358+ " movl %0, %%eax\n"
22359+ " andl $0xFFFEFFFF, %%eax\n"
22360+ " movl %%eax, %%cr0\n"
22361+#endif
22362+
22363+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22364+
22365+#ifdef CONFIG_PAX_KERNEXEC
22366+ " movl %0, %%cr0\n"
22367+#endif
22368+
22369 " jmp 2b\n"
22370 ".previous\n"
22371 _ASM_EXTABLE(1b, 3b)
22372- : : "r" (from), "r" (to) : "memory");
22373+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22374
22375 from += 64;
22376 to += 64;
22377@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22378 static void fast_copy_page(void *to, void *from)
22379 {
22380 int i;
22381+ unsigned long cr0;
22382
22383 kernel_fpu_begin();
22384
fe2de317 22385@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
58c5fc13
MT
22386 * but that is for later. -AV
22387 */
22388 __asm__ __volatile__(
22389- "1: prefetch (%0)\n"
22390- " prefetch 64(%0)\n"
22391- " prefetch 128(%0)\n"
22392- " prefetch 192(%0)\n"
22393- " prefetch 256(%0)\n"
22394+ "1: prefetch (%1)\n"
22395+ " prefetch 64(%1)\n"
22396+ " prefetch 128(%1)\n"
22397+ " prefetch 192(%1)\n"
22398+ " prefetch 256(%1)\n"
22399 "2: \n"
22400 ".section .fixup, \"ax\"\n"
22401- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22402+ "3: \n"
22403+
22404+#ifdef CONFIG_PAX_KERNEXEC
22405+ " movl %%cr0, %0\n"
22406+ " movl %0, %%eax\n"
22407+ " andl $0xFFFEFFFF, %%eax\n"
22408+ " movl %%eax, %%cr0\n"
22409+#endif
22410+
22411+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22412+
22413+#ifdef CONFIG_PAX_KERNEXEC
22414+ " movl %0, %%cr0\n"
22415+#endif
22416+
22417 " jmp 2b\n"
22418 ".previous\n"
22419- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22420+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22421
22422 for (i = 0; i < (4096-320)/64; i++) {
22423 __asm__ __volatile__ (
22424- "1: prefetch 320(%0)\n"
22425- "2: movq (%0), %%mm0\n"
22426- " movntq %%mm0, (%1)\n"
22427- " movq 8(%0), %%mm1\n"
22428- " movntq %%mm1, 8(%1)\n"
22429- " movq 16(%0), %%mm2\n"
22430- " movntq %%mm2, 16(%1)\n"
22431- " movq 24(%0), %%mm3\n"
22432- " movntq %%mm3, 24(%1)\n"
22433- " movq 32(%0), %%mm4\n"
22434- " movntq %%mm4, 32(%1)\n"
22435- " movq 40(%0), %%mm5\n"
22436- " movntq %%mm5, 40(%1)\n"
22437- " movq 48(%0), %%mm6\n"
22438- " movntq %%mm6, 48(%1)\n"
22439- " movq 56(%0), %%mm7\n"
22440- " movntq %%mm7, 56(%1)\n"
22441+ "1: prefetch 320(%1)\n"
22442+ "2: movq (%1), %%mm0\n"
22443+ " movntq %%mm0, (%2)\n"
22444+ " movq 8(%1), %%mm1\n"
22445+ " movntq %%mm1, 8(%2)\n"
22446+ " movq 16(%1), %%mm2\n"
22447+ " movntq %%mm2, 16(%2)\n"
22448+ " movq 24(%1), %%mm3\n"
22449+ " movntq %%mm3, 24(%2)\n"
22450+ " movq 32(%1), %%mm4\n"
22451+ " movntq %%mm4, 32(%2)\n"
22452+ " movq 40(%1), %%mm5\n"
22453+ " movntq %%mm5, 40(%2)\n"
22454+ " movq 48(%1), %%mm6\n"
22455+ " movntq %%mm6, 48(%2)\n"
22456+ " movq 56(%1), %%mm7\n"
22457+ " movntq %%mm7, 56(%2)\n"
22458 ".section .fixup, \"ax\"\n"
22459- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22460+ "3:\n"
22461+
22462+#ifdef CONFIG_PAX_KERNEXEC
22463+ " movl %%cr0, %0\n"
22464+ " movl %0, %%eax\n"
22465+ " andl $0xFFFEFFFF, %%eax\n"
22466+ " movl %%eax, %%cr0\n"
22467+#endif
22468+
22469+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22470+
22471+#ifdef CONFIG_PAX_KERNEXEC
22472+ " movl %0, %%cr0\n"
22473+#endif
22474+
22475 " jmp 2b\n"
22476 ".previous\n"
22477- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22478+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22479
22480 from += 64;
22481 to += 64;
22482@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22483 static void fast_copy_page(void *to, void *from)
22484 {
22485 int i;
22486+ unsigned long cr0;
22487
22488 kernel_fpu_begin();
22489
22490 __asm__ __volatile__ (
22491- "1: prefetch (%0)\n"
22492- " prefetch 64(%0)\n"
22493- " prefetch 128(%0)\n"
22494- " prefetch 192(%0)\n"
22495- " prefetch 256(%0)\n"
22496+ "1: prefetch (%1)\n"
22497+ " prefetch 64(%1)\n"
22498+ " prefetch 128(%1)\n"
22499+ " prefetch 192(%1)\n"
22500+ " prefetch 256(%1)\n"
22501 "2: \n"
22502 ".section .fixup, \"ax\"\n"
22503- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22504+ "3: \n"
22505+
22506+#ifdef CONFIG_PAX_KERNEXEC
22507+ " movl %%cr0, %0\n"
22508+ " movl %0, %%eax\n"
22509+ " andl $0xFFFEFFFF, %%eax\n"
22510+ " movl %%eax, %%cr0\n"
22511+#endif
22512+
22513+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22514+
22515+#ifdef CONFIG_PAX_KERNEXEC
22516+ " movl %0, %%cr0\n"
22517+#endif
22518+
22519 " jmp 2b\n"
22520 ".previous\n"
22521- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22522+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22523
22524 for (i = 0; i < 4096/64; i++) {
22525 __asm__ __volatile__ (
22526- "1: prefetch 320(%0)\n"
22527- "2: movq (%0), %%mm0\n"
22528- " movq 8(%0), %%mm1\n"
22529- " movq 16(%0), %%mm2\n"
22530- " movq 24(%0), %%mm3\n"
22531- " movq %%mm0, (%1)\n"
22532- " movq %%mm1, 8(%1)\n"
22533- " movq %%mm2, 16(%1)\n"
22534- " movq %%mm3, 24(%1)\n"
22535- " movq 32(%0), %%mm0\n"
22536- " movq 40(%0), %%mm1\n"
22537- " movq 48(%0), %%mm2\n"
22538- " movq 56(%0), %%mm3\n"
22539- " movq %%mm0, 32(%1)\n"
22540- " movq %%mm1, 40(%1)\n"
22541- " movq %%mm2, 48(%1)\n"
22542- " movq %%mm3, 56(%1)\n"
22543+ "1: prefetch 320(%1)\n"
22544+ "2: movq (%1), %%mm0\n"
22545+ " movq 8(%1), %%mm1\n"
22546+ " movq 16(%1), %%mm2\n"
22547+ " movq 24(%1), %%mm3\n"
22548+ " movq %%mm0, (%2)\n"
22549+ " movq %%mm1, 8(%2)\n"
22550+ " movq %%mm2, 16(%2)\n"
22551+ " movq %%mm3, 24(%2)\n"
22552+ " movq 32(%1), %%mm0\n"
22553+ " movq 40(%1), %%mm1\n"
22554+ " movq 48(%1), %%mm2\n"
22555+ " movq 56(%1), %%mm3\n"
22556+ " movq %%mm0, 32(%2)\n"
22557+ " movq %%mm1, 40(%2)\n"
22558+ " movq %%mm2, 48(%2)\n"
22559+ " movq %%mm3, 56(%2)\n"
22560 ".section .fixup, \"ax\"\n"
22561- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22562+ "3:\n"
22563+
22564+#ifdef CONFIG_PAX_KERNEXEC
22565+ " movl %%cr0, %0\n"
22566+ " movl %0, %%eax\n"
22567+ " andl $0xFFFEFFFF, %%eax\n"
22568+ " movl %%eax, %%cr0\n"
22569+#endif
22570+
22571+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22572+
22573+#ifdef CONFIG_PAX_KERNEXEC
22574+ " movl %0, %%cr0\n"
22575+#endif
22576+
22577 " jmp 2b\n"
22578 ".previous\n"
22579 _ASM_EXTABLE(1b, 3b)
22580- : : "r" (from), "r" (to) : "memory");
22581+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22582
22583 from += 64;
22584 to += 64;
fe2de317
MT
22585diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22586index 69fa106..adda88b 100644
22587--- a/arch/x86/lib/msr-reg.S
22588+++ b/arch/x86/lib/msr-reg.S
6e9df6a3
MT
22589@@ -3,6 +3,7 @@
22590 #include <asm/dwarf2.h>
22591 #include <asm/asm.h>
22592 #include <asm/msr.h>
22593+#include <asm/alternative-asm.h>
22594
22595 #ifdef CONFIG_X86_64
22596 /*
fe2de317
MT
22597@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22598 CFI_STARTPROC
22599 pushq_cfi %rbx
22600 pushq_cfi %rbp
22601- movq %rdi, %r10 /* Save pointer */
22602+ movq %rdi, %r9 /* Save pointer */
22603 xorl %r11d, %r11d /* Return value */
22604 movl (%rdi), %eax
22605 movl 4(%rdi), %ecx
22606@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22607 movl 28(%rdi), %edi
22608 CFI_REMEMBER_STATE
22609 1: \op
22610-2: movl %eax, (%r10)
22611+2: movl %eax, (%r9)
22612 movl %r11d, %eax /* Return value */
22613- movl %ecx, 4(%r10)
22614- movl %edx, 8(%r10)
22615- movl %ebx, 12(%r10)
22616- movl %ebp, 20(%r10)
22617- movl %esi, 24(%r10)
22618- movl %edi, 28(%r10)
22619+ movl %ecx, 4(%r9)
22620+ movl %edx, 8(%r9)
22621+ movl %ebx, 12(%r9)
22622+ movl %ebp, 20(%r9)
22623+ movl %esi, 24(%r9)
22624+ movl %edi, 28(%r9)
6e9df6a3
MT
22625 popq_cfi %rbp
22626 popq_cfi %rbx
22627+ pax_force_retaddr
22628 ret
22629 3:
22630 CFI_RESTORE_STATE
fe2de317
MT
22631diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22632index 36b0d15..d381858 100644
22633--- a/arch/x86/lib/putuser.S
22634+++ b/arch/x86/lib/putuser.S
6e9df6a3 22635@@ -15,7 +15,9 @@
58c5fc13
MT
22636 #include <asm/thread_info.h>
22637 #include <asm/errno.h>
22638 #include <asm/asm.h>
df50ba0c 22639-
58c5fc13 22640+#include <asm/segment.h>
df50ba0c 22641+#include <asm/pgtable.h>
6e9df6a3 22642+#include <asm/alternative-asm.h>
58c5fc13
MT
22643
22644 /*
df50ba0c 22645 * __put_user_X
6e9df6a3 22646@@ -29,52 +31,119 @@
ae4e228f
MT
22647 * as they get called from within inline assembly.
22648 */
22649
22650-#define ENTER CFI_STARTPROC ; \
22651- GET_THREAD_INFO(%_ASM_BX)
6e9df6a3 22652-#define EXIT ret ; \
ae4e228f 22653+#define ENTER CFI_STARTPROC
6e9df6a3 22654+#define EXIT pax_force_retaddr; ret ; \
ae4e228f
MT
22655 CFI_ENDPROC
22656
57199397
MT
22657+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22658+#define _DEST %_ASM_CX,%_ASM_BX
22659+#else
22660+#define _DEST %_ASM_CX
22661+#endif
bc901d79
MT
22662+
22663+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16454cff 22664+#define __copyuser_seg gs;
bc901d79
MT
22665+#else
22666+#define __copyuser_seg
22667+#endif
57199397 22668+
ae4e228f
MT
22669 .text
22670 ENTRY(__put_user_1)
58c5fc13 22671 ENTER
58c5fc13 22672+
bc901d79 22673+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
22674+ GET_THREAD_INFO(%_ASM_BX)
22675 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22676 jae bad_put_user
57199397 22677-1: movb %al,(%_ASM_CX)
df50ba0c
MT
22678+
22679+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22680+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22681+ cmp %_ASM_BX,%_ASM_CX
57199397
MT
22682+ jb 1234f
22683+ xor %ebx,%ebx
df50ba0c
MT
22684+1234:
22685+#endif
22686+
58c5fc13
MT
22687+#endif
22688+
16454cff 22689+1: __copyuser_seg movb %al,(_DEST)
58c5fc13
MT
22690 xor %eax,%eax
22691 EXIT
22692 ENDPROC(__put_user_1)
ae4e228f
MT
22693
22694 ENTRY(__put_user_2)
22695 ENTER
58c5fc13 22696+
bc901d79 22697+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
22698+ GET_THREAD_INFO(%_ASM_BX)
22699 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22700 sub $1,%_ASM_BX
22701 cmp %_ASM_BX,%_ASM_CX
22702 jae bad_put_user
57199397 22703-2: movw %ax,(%_ASM_CX)
df50ba0c
MT
22704+
22705+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22706+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22707+ cmp %_ASM_BX,%_ASM_CX
57199397
MT
22708+ jb 1234f
22709+ xor %ebx,%ebx
df50ba0c
MT
22710+1234:
22711+#endif
22712+
58c5fc13
MT
22713+#endif
22714+
16454cff 22715+2: __copyuser_seg movw %ax,(_DEST)
58c5fc13
MT
22716 xor %eax,%eax
22717 EXIT
22718 ENDPROC(__put_user_2)
ae4e228f
MT
22719
22720 ENTRY(__put_user_4)
22721 ENTER
58c5fc13 22722+
bc901d79 22723+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
22724+ GET_THREAD_INFO(%_ASM_BX)
22725 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22726 sub $3,%_ASM_BX
22727 cmp %_ASM_BX,%_ASM_CX
22728 jae bad_put_user
57199397 22729-3: movl %eax,(%_ASM_CX)
df50ba0c
MT
22730+
22731+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22732+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22733+ cmp %_ASM_BX,%_ASM_CX
57199397
MT
22734+ jb 1234f
22735+ xor %ebx,%ebx
df50ba0c
MT
22736+1234:
22737+#endif
22738+
58c5fc13
MT
22739+#endif
22740+
16454cff 22741+3: __copyuser_seg movl %eax,(_DEST)
58c5fc13
MT
22742 xor %eax,%eax
22743 EXIT
22744 ENDPROC(__put_user_4)
ae4e228f
MT
22745
22746 ENTRY(__put_user_8)
22747 ENTER
58c5fc13 22748+
bc901d79 22749+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
ae4e228f
MT
22750+ GET_THREAD_INFO(%_ASM_BX)
22751 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22752 sub $7,%_ASM_BX
22753 cmp %_ASM_BX,%_ASM_CX
22754 jae bad_put_user
57199397 22755-4: mov %_ASM_AX,(%_ASM_CX)
df50ba0c
MT
22756+
22757+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22758+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22759+ cmp %_ASM_BX,%_ASM_CX
57199397
MT
22760+ jb 1234f
22761+ xor %ebx,%ebx
df50ba0c
MT
22762+1234:
22763+#endif
22764+
58c5fc13
MT
22765+#endif
22766+
16454cff 22767+4: __copyuser_seg mov %_ASM_AX,(_DEST)
58c5fc13 22768 #ifdef CONFIG_X86_32
57199397 22769-5: movl %edx,4(%_ASM_CX)
16454cff 22770+5: __copyuser_seg movl %edx,4(_DEST)
58c5fc13 22771 #endif
58c5fc13
MT
22772 xor %eax,%eax
22773 EXIT
fe2de317
MT
22774diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
22775index 1cad221..de671ee 100644
22776--- a/arch/x86/lib/rwlock.S
22777+++ b/arch/x86/lib/rwlock.S
22778@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
22779 FRAME
22780 0: LOCK_PREFIX
22781 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22782+
22783+#ifdef CONFIG_PAX_REFCOUNT
22784+ jno 1234f
22785+ LOCK_PREFIX
22786+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
22787+ int $4
22788+1234:
22789+ _ASM_EXTABLE(1234b, 1234b)
22790+#endif
22791+
22792 1: rep; nop
22793 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
22794 jne 1b
22795 LOCK_PREFIX
6e9df6a3 22796 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
fe2de317
MT
22797+
22798+#ifdef CONFIG_PAX_REFCOUNT
22799+ jno 1234f
22800+ LOCK_PREFIX
22801+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
22802+ int $4
22803+1234:
22804+ _ASM_EXTABLE(1234b, 1234b)
22805+#endif
22806+
6e9df6a3
MT
22807 jnz 0b
22808 ENDFRAME
22809+ pax_force_retaddr
15a11c5b
MT
22810 ret
22811 CFI_ENDPROC
22812 END(__write_lock_failed)
fe2de317
MT
22813@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
22814 FRAME
22815 0: LOCK_PREFIX
22816 READ_LOCK_SIZE(inc) (%__lock_ptr)
22817+
22818+#ifdef CONFIG_PAX_REFCOUNT
22819+ jno 1234f
22820+ LOCK_PREFIX
22821+ READ_LOCK_SIZE(dec) (%__lock_ptr)
22822+ int $4
22823+1234:
22824+ _ASM_EXTABLE(1234b, 1234b)
22825+#endif
22826+
22827 1: rep; nop
22828 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
22829 js 1b
22830 LOCK_PREFIX
6e9df6a3 22831 READ_LOCK_SIZE(dec) (%__lock_ptr)
fe2de317
MT
22832+
22833+#ifdef CONFIG_PAX_REFCOUNT
22834+ jno 1234f
22835+ LOCK_PREFIX
22836+ READ_LOCK_SIZE(inc) (%__lock_ptr)
22837+ int $4
22838+1234:
22839+ _ASM_EXTABLE(1234b, 1234b)
22840+#endif
22841+
6e9df6a3
MT
22842 js 0b
22843 ENDFRAME
22844+ pax_force_retaddr
15a11c5b
MT
22845 ret
22846 CFI_ENDPROC
22847 END(__read_lock_failed)
fe2de317
MT
22848diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
22849index 5dff5f0..cadebf4 100644
22850--- a/arch/x86/lib/rwsem.S
22851+++ b/arch/x86/lib/rwsem.S
6e9df6a3
MT
22852@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
22853 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22854 CFI_RESTORE __ASM_REG(dx)
15a11c5b 22855 restore_common_regs
6e9df6a3 22856+ pax_force_retaddr
15a11c5b
MT
22857 ret
22858 CFI_ENDPROC
22859 ENDPROC(call_rwsem_down_read_failed)
6e9df6a3 22860@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
15a11c5b
MT
22861 movq %rax,%rdi
22862 call rwsem_down_write_failed
22863 restore_common_regs
6e9df6a3 22864+ pax_force_retaddr
15a11c5b
MT
22865 ret
22866 CFI_ENDPROC
22867 ENDPROC(call_rwsem_down_write_failed)
6e9df6a3 22868@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
15a11c5b
MT
22869 movq %rax,%rdi
22870 call rwsem_wake
22871 restore_common_regs
6e9df6a3
MT
22872-1: ret
22873+1: pax_force_retaddr
22874+ ret
15a11c5b
MT
22875 CFI_ENDPROC
22876 ENDPROC(call_rwsem_wake)
6e9df6a3
MT
22877
22878@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
22879 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
22880 CFI_RESTORE __ASM_REG(dx)
15a11c5b 22881 restore_common_regs
6e9df6a3 22882+ pax_force_retaddr
15a11c5b
MT
22883 ret
22884 CFI_ENDPROC
22885 ENDPROC(call_rwsem_downgrade_wake)
fe2de317
MT
22886diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
22887index a63efd6..ccecad8 100644
22888--- a/arch/x86/lib/thunk_64.S
22889+++ b/arch/x86/lib/thunk_64.S
6e9df6a3
MT
22890@@ -8,6 +8,7 @@
22891 #include <linux/linkage.h>
22892 #include <asm/dwarf2.h>
22893 #include <asm/calling.h>
22894+#include <asm/alternative-asm.h>
22895
22896 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
22897 .macro THUNK name, func, put_ret_addr_in_rdi=0
22898@@ -41,5 +42,6 @@
15a11c5b
MT
22899 SAVE_ARGS
22900 restore:
22901 RESTORE_ARGS
6e9df6a3
MT
22902+ pax_force_retaddr
22903 ret
15a11c5b 22904 CFI_ENDPROC
fe2de317 22905diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
c6e2a6c8 22906index ef2a6a5..3b28862 100644
fe2de317
MT
22907--- a/arch/x86/lib/usercopy_32.c
22908+++ b/arch/x86/lib/usercopy_32.c
c6e2a6c8 22909@@ -41,10 +41,12 @@ do { \
bc901d79
MT
22910 int __d0; \
22911 might_fault(); \
22912 __asm__ __volatile__( \
22913+ __COPYUSER_SET_ES \
22914 "0: rep; stosl\n" \
22915 " movl %2,%0\n" \
22916 "1: rep; stosb\n" \
22917 "2:\n" \
22918+ __COPYUSER_RESTORE_ES \
22919 ".section .fixup,\"ax\"\n" \
22920 "3: lea 0(%2,%0,4),%0\n" \
22921 " jmp 2b\n" \
c6e2a6c8 22922@@ -113,6 +115,7 @@ long strnlen_user(const char __user *s, long n)
58c5fc13
MT
22923 might_fault();
22924
22925 __asm__ __volatile__(
bc901d79 22926+ __COPYUSER_SET_ES
58c5fc13
MT
22927 " testl %0, %0\n"
22928 " jz 3f\n"
bc901d79 22929 " andl %0,%%ecx\n"
c6e2a6c8 22930@@ -121,6 +124,7 @@ long strnlen_user(const char __user *s, long n)
58c5fc13
MT
22931 " subl %%ecx,%0\n"
22932 " addl %0,%%eax\n"
22933 "1:\n"
bc901d79 22934+ __COPYUSER_RESTORE_ES
58c5fc13
MT
22935 ".section .fixup,\"ax\"\n"
22936 "2: xorl %%eax,%%eax\n"
22937 " jmp 1b\n"
c6e2a6c8 22938@@ -140,7 +144,7 @@ EXPORT_SYMBOL(strnlen_user);
58c5fc13
MT
22939
22940 #ifdef CONFIG_X86_INTEL_USERCOPY
22941 static unsigned long
22942-__copy_user_intel(void __user *to, const void *from, unsigned long size)
22943+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
bc901d79
MT
22944 {
22945 int d0, d1;
22946 __asm__ __volatile__(
c6e2a6c8 22947@@ -152,36 +156,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
bc901d79
MT
22948 " .align 2,0x90\n"
22949 "3: movl 0(%4), %%eax\n"
22950 "4: movl 4(%4), %%edx\n"
22951- "5: movl %%eax, 0(%3)\n"
22952- "6: movl %%edx, 4(%3)\n"
16454cff
MT
22953+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
22954+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
bc901d79
MT
22955 "7: movl 8(%4), %%eax\n"
22956 "8: movl 12(%4),%%edx\n"
22957- "9: movl %%eax, 8(%3)\n"
22958- "10: movl %%edx, 12(%3)\n"
16454cff
MT
22959+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
22960+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
bc901d79
MT
22961 "11: movl 16(%4), %%eax\n"
22962 "12: movl 20(%4), %%edx\n"
22963- "13: movl %%eax, 16(%3)\n"
22964- "14: movl %%edx, 20(%3)\n"
16454cff
MT
22965+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
22966+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
bc901d79
MT
22967 "15: movl 24(%4), %%eax\n"
22968 "16: movl 28(%4), %%edx\n"
22969- "17: movl %%eax, 24(%3)\n"
22970- "18: movl %%edx, 28(%3)\n"
16454cff
MT
22971+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
22972+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
bc901d79
MT
22973 "19: movl 32(%4), %%eax\n"
22974 "20: movl 36(%4), %%edx\n"
22975- "21: movl %%eax, 32(%3)\n"
22976- "22: movl %%edx, 36(%3)\n"
16454cff
MT
22977+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
22978+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
bc901d79
MT
22979 "23: movl 40(%4), %%eax\n"
22980 "24: movl 44(%4), %%edx\n"
22981- "25: movl %%eax, 40(%3)\n"
22982- "26: movl %%edx, 44(%3)\n"
16454cff
MT
22983+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
22984+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
bc901d79
MT
22985 "27: movl 48(%4), %%eax\n"
22986 "28: movl 52(%4), %%edx\n"
22987- "29: movl %%eax, 48(%3)\n"
22988- "30: movl %%edx, 52(%3)\n"
16454cff
MT
22989+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
22990+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
bc901d79
MT
22991 "31: movl 56(%4), %%eax\n"
22992 "32: movl 60(%4), %%edx\n"
22993- "33: movl %%eax, 56(%3)\n"
22994- "34: movl %%edx, 60(%3)\n"
16454cff
MT
22995+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
22996+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
bc901d79
MT
22997 " addl $-64, %0\n"
22998 " addl $64, %4\n"
22999 " addl $64, %3\n"
c6e2a6c8 23000@@ -191,10 +195,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
bc901d79
MT
23001 " shrl $2, %0\n"
23002 " andl $3, %%eax\n"
23003 " cld\n"
23004+ __COPYUSER_SET_ES
23005 "99: rep; movsl\n"
23006 "36: movl %%eax, %0\n"
23007 "37: rep; movsb\n"
23008 "100:\n"
23009+ __COPYUSER_RESTORE_ES
4c928ab7
MT
23010 ".section .fixup,\"ax\"\n"
23011 "101: lea 0(%%eax,%0,4),%0\n"
23012 " jmp 100b\n"
c6e2a6c8 23013@@ -247,46 +253,155 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
4c928ab7
MT
23014 }
23015
23016 static unsigned long
58c5fc13 23017+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
bc901d79
MT
23018+{
23019+ int d0, d1;
23020+ __asm__ __volatile__(
23021+ " .align 2,0x90\n"
16454cff 23022+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
bc901d79
MT
23023+ " cmpl $67, %0\n"
23024+ " jbe 3f\n"
16454cff 23025+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
bc901d79 23026+ " .align 2,0x90\n"
16454cff
MT
23027+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23028+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
bc901d79
MT
23029+ "5: movl %%eax, 0(%3)\n"
23030+ "6: movl %%edx, 4(%3)\n"
16454cff
MT
23031+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23032+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
bc901d79
MT
23033+ "9: movl %%eax, 8(%3)\n"
23034+ "10: movl %%edx, 12(%3)\n"
16454cff
MT
23035+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23036+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
bc901d79
MT
23037+ "13: movl %%eax, 16(%3)\n"
23038+ "14: movl %%edx, 20(%3)\n"
16454cff
MT
23039+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23040+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
bc901d79
MT
23041+ "17: movl %%eax, 24(%3)\n"
23042+ "18: movl %%edx, 28(%3)\n"
16454cff
MT
23043+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23044+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
bc901d79
MT
23045+ "21: movl %%eax, 32(%3)\n"
23046+ "22: movl %%edx, 36(%3)\n"
16454cff
MT
23047+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23048+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
bc901d79
MT
23049+ "25: movl %%eax, 40(%3)\n"
23050+ "26: movl %%edx, 44(%3)\n"
16454cff
MT
23051+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23052+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
bc901d79
MT
23053+ "29: movl %%eax, 48(%3)\n"
23054+ "30: movl %%edx, 52(%3)\n"
16454cff
MT
23055+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23056+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
bc901d79
MT
23057+ "33: movl %%eax, 56(%3)\n"
23058+ "34: movl %%edx, 60(%3)\n"
23059+ " addl $-64, %0\n"
23060+ " addl $64, %4\n"
23061+ " addl $64, %3\n"
23062+ " cmpl $63, %0\n"
23063+ " ja 1b\n"
23064+ "35: movl %0, %%eax\n"
23065+ " shrl $2, %0\n"
23066+ " andl $3, %%eax\n"
23067+ " cld\n"
16454cff 23068+ "99: rep; "__copyuser_seg" movsl\n"
bc901d79 23069+ "36: movl %%eax, %0\n"
16454cff 23070+ "37: rep; "__copyuser_seg" movsb\n"
bc901d79 23071+ "100:\n"
4c928ab7
MT
23072+ ".section .fixup,\"ax\"\n"
23073+ "101: lea 0(%%eax,%0,4),%0\n"
23074+ " jmp 100b\n"
23075+ ".previous\n"
23076+ ".section __ex_table,\"a\"\n"
23077+ " .align 4\n"
23078+ " .long 1b,100b\n"
23079+ " .long 2b,100b\n"
23080+ " .long 3b,100b\n"
23081+ " .long 4b,100b\n"
23082+ " .long 5b,100b\n"
23083+ " .long 6b,100b\n"
23084+ " .long 7b,100b\n"
23085+ " .long 8b,100b\n"
23086+ " .long 9b,100b\n"
23087+ " .long 10b,100b\n"
23088+ " .long 11b,100b\n"
23089+ " .long 12b,100b\n"
23090+ " .long 13b,100b\n"
23091+ " .long 14b,100b\n"
23092+ " .long 15b,100b\n"
23093+ " .long 16b,100b\n"
23094+ " .long 17b,100b\n"
23095+ " .long 18b,100b\n"
23096+ " .long 19b,100b\n"
23097+ " .long 20b,100b\n"
23098+ " .long 21b,100b\n"
23099+ " .long 22b,100b\n"
23100+ " .long 23b,100b\n"
23101+ " .long 24b,100b\n"
23102+ " .long 25b,100b\n"
23103+ " .long 26b,100b\n"
23104+ " .long 27b,100b\n"
23105+ " .long 28b,100b\n"
23106+ " .long 29b,100b\n"
23107+ " .long 30b,100b\n"
23108+ " .long 31b,100b\n"
23109+ " .long 32b,100b\n"
23110+ " .long 33b,100b\n"
23111+ " .long 34b,100b\n"
23112+ " .long 35b,100b\n"
23113+ " .long 36b,100b\n"
23114+ " .long 37b,100b\n"
23115+ " .long 99b,101b\n"
23116+ ".previous"
23117+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23118+ : "1"(to), "2"(from), "0"(size)
23119+ : "eax", "edx", "memory");
23120+ return size;
23121+}
23122+
23123+static unsigned long
23124+__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) __size_overflow(3);
23125+static unsigned long
23126 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23127 {
58c5fc13
MT
23128 int d0, d1;
23129 __asm__ __volatile__(
58c5fc13 23130 " .align 2,0x90\n"
bc901d79 23131- "0: movl 32(%4), %%eax\n"
16454cff 23132+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
58c5fc13 23133 " cmpl $67, %0\n"
bc901d79
MT
23134 " jbe 2f\n"
23135- "1: movl 64(%4), %%eax\n"
16454cff 23136+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
58c5fc13 23137 " .align 2,0x90\n"
bc901d79
MT
23138- "2: movl 0(%4), %%eax\n"
23139- "21: movl 4(%4), %%edx\n"
16454cff
MT
23140+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23141+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
bc901d79
MT
23142 " movl %%eax, 0(%3)\n"
23143 " movl %%edx, 4(%3)\n"
23144- "3: movl 8(%4), %%eax\n"
23145- "31: movl 12(%4),%%edx\n"
16454cff
MT
23146+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23147+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
bc901d79
MT
23148 " movl %%eax, 8(%3)\n"
23149 " movl %%edx, 12(%3)\n"
23150- "4: movl 16(%4), %%eax\n"
23151- "41: movl 20(%4), %%edx\n"
16454cff
MT
23152+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23153+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
bc901d79
MT
23154 " movl %%eax, 16(%3)\n"
23155 " movl %%edx, 20(%3)\n"
23156- "10: movl 24(%4), %%eax\n"
23157- "51: movl 28(%4), %%edx\n"
16454cff
MT
23158+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23159+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
bc901d79
MT
23160 " movl %%eax, 24(%3)\n"
23161 " movl %%edx, 28(%3)\n"
23162- "11: movl 32(%4), %%eax\n"
23163- "61: movl 36(%4), %%edx\n"
16454cff
MT
23164+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23165+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
bc901d79
MT
23166 " movl %%eax, 32(%3)\n"
23167 " movl %%edx, 36(%3)\n"
23168- "12: movl 40(%4), %%eax\n"
23169- "71: movl 44(%4), %%edx\n"
16454cff
MT
23170+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23171+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
bc901d79
MT
23172 " movl %%eax, 40(%3)\n"
23173 " movl %%edx, 44(%3)\n"
23174- "13: movl 48(%4), %%eax\n"
23175- "81: movl 52(%4), %%edx\n"
16454cff
MT
23176+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23177+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
bc901d79
MT
23178 " movl %%eax, 48(%3)\n"
23179 " movl %%edx, 52(%3)\n"
23180- "14: movl 56(%4), %%eax\n"
23181- "91: movl 60(%4), %%edx\n"
16454cff
MT
23182+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23183+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
bc901d79
MT
23184 " movl %%eax, 56(%3)\n"
23185 " movl %%edx, 60(%3)\n"
58c5fc13 23186 " addl $-64, %0\n"
c6e2a6c8 23187@@ -298,9 +413,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
bc901d79
MT
23188 " shrl $2, %0\n"
23189 " andl $3, %%eax\n"
23190 " cld\n"
23191- "6: rep; movsl\n"
16454cff 23192+ "6: rep; "__copyuser_seg" movsl\n"
58c5fc13 23193 " movl %%eax,%0\n"
bc901d79 23194- "7: rep; movsb\n"
16454cff 23195+ "7: rep; "__copyuser_seg" movsb\n"
58c5fc13 23196 "8:\n"
58c5fc13
MT
23197 ".section .fixup,\"ax\"\n"
23198 "9: lea 0(%%eax,%0,4),%0\n"
c6e2a6c8 23199@@ -347,47 +462,49 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
4c928ab7
MT
23200 */
23201
23202 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23203+ const void __user *from, unsigned long size) __size_overflow(3);
23204+static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23205 const void __user *from, unsigned long size)
23206 {
23207 int d0, d1;
58c5fc13
MT
23208
23209 __asm__ __volatile__(
58c5fc13 23210 " .align 2,0x90\n"
bc901d79 23211- "0: movl 32(%4), %%eax\n"
16454cff 23212+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
58c5fc13 23213 " cmpl $67, %0\n"
bc901d79
MT
23214 " jbe 2f\n"
23215- "1: movl 64(%4), %%eax\n"
16454cff 23216+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
58c5fc13 23217 " .align 2,0x90\n"
bc901d79
MT
23218- "2: movl 0(%4), %%eax\n"
23219- "21: movl 4(%4), %%edx\n"
16454cff
MT
23220+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23221+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
bc901d79
MT
23222 " movnti %%eax, 0(%3)\n"
23223 " movnti %%edx, 4(%3)\n"
23224- "3: movl 8(%4), %%eax\n"
23225- "31: movl 12(%4),%%edx\n"
16454cff
MT
23226+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23227+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
bc901d79
MT
23228 " movnti %%eax, 8(%3)\n"
23229 " movnti %%edx, 12(%3)\n"
23230- "4: movl 16(%4), %%eax\n"
23231- "41: movl 20(%4), %%edx\n"
16454cff
MT
23232+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23233+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
bc901d79
MT
23234 " movnti %%eax, 16(%3)\n"
23235 " movnti %%edx, 20(%3)\n"
23236- "10: movl 24(%4), %%eax\n"
23237- "51: movl 28(%4), %%edx\n"
16454cff
MT
23238+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23239+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
bc901d79
MT
23240 " movnti %%eax, 24(%3)\n"
23241 " movnti %%edx, 28(%3)\n"
23242- "11: movl 32(%4), %%eax\n"
23243- "61: movl 36(%4), %%edx\n"
16454cff
MT
23244+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23245+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
bc901d79
MT
23246 " movnti %%eax, 32(%3)\n"
23247 " movnti %%edx, 36(%3)\n"
23248- "12: movl 40(%4), %%eax\n"
23249- "71: movl 44(%4), %%edx\n"
16454cff
MT
23250+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23251+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
bc901d79
MT
23252 " movnti %%eax, 40(%3)\n"
23253 " movnti %%edx, 44(%3)\n"
23254- "13: movl 48(%4), %%eax\n"
23255- "81: movl 52(%4), %%edx\n"
16454cff
MT
23256+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23257+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
bc901d79
MT
23258 " movnti %%eax, 48(%3)\n"
23259 " movnti %%edx, 52(%3)\n"
23260- "14: movl 56(%4), %%eax\n"
23261- "91: movl 60(%4), %%edx\n"
16454cff
MT
23262+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23263+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
bc901d79
MT
23264 " movnti %%eax, 56(%3)\n"
23265 " movnti %%edx, 60(%3)\n"
58c5fc13 23266 " addl $-64, %0\n"
c6e2a6c8 23267@@ -400,9 +517,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
bc901d79
MT
23268 " shrl $2, %0\n"
23269 " andl $3, %%eax\n"
23270 " cld\n"
23271- "6: rep; movsl\n"
16454cff 23272+ "6: rep; "__copyuser_seg" movsl\n"
58c5fc13 23273 " movl %%eax,%0\n"
bc901d79 23274- "7: rep; movsb\n"
16454cff 23275+ "7: rep; "__copyuser_seg" movsb\n"
58c5fc13 23276 "8:\n"
58c5fc13
MT
23277 ".section .fixup,\"ax\"\n"
23278 "9: lea 0(%%eax,%0,4),%0\n"
c6e2a6c8 23279@@ -444,47 +561,49 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
4c928ab7
MT
23280 }
23281
23282 static unsigned long __copy_user_intel_nocache(void *to,
23283+ const void __user *from, unsigned long size) __size_overflow(3);
23284+static unsigned long __copy_user_intel_nocache(void *to,
23285 const void __user *from, unsigned long size)
23286 {
23287 int d0, d1;
58c5fc13
MT
23288
23289 __asm__ __volatile__(
58c5fc13 23290 " .align 2,0x90\n"
bc901d79 23291- "0: movl 32(%4), %%eax\n"
16454cff 23292+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
58c5fc13 23293 " cmpl $67, %0\n"
bc901d79
MT
23294 " jbe 2f\n"
23295- "1: movl 64(%4), %%eax\n"
16454cff 23296+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
58c5fc13 23297 " .align 2,0x90\n"
bc901d79
MT
23298- "2: movl 0(%4), %%eax\n"
23299- "21: movl 4(%4), %%edx\n"
16454cff
MT
23300+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23301+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
bc901d79
MT
23302 " movnti %%eax, 0(%3)\n"
23303 " movnti %%edx, 4(%3)\n"
23304- "3: movl 8(%4), %%eax\n"
23305- "31: movl 12(%4),%%edx\n"
16454cff
MT
23306+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23307+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
bc901d79
MT
23308 " movnti %%eax, 8(%3)\n"
23309 " movnti %%edx, 12(%3)\n"
23310- "4: movl 16(%4), %%eax\n"
23311- "41: movl 20(%4), %%edx\n"
16454cff
MT
23312+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23313+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
bc901d79
MT
23314 " movnti %%eax, 16(%3)\n"
23315 " movnti %%edx, 20(%3)\n"
23316- "10: movl 24(%4), %%eax\n"
23317- "51: movl 28(%4), %%edx\n"
16454cff
MT
23318+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23319+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
bc901d79
MT
23320 " movnti %%eax, 24(%3)\n"
23321 " movnti %%edx, 28(%3)\n"
23322- "11: movl 32(%4), %%eax\n"
23323- "61: movl 36(%4), %%edx\n"
16454cff
MT
23324+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23325+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
bc901d79
MT
23326 " movnti %%eax, 32(%3)\n"
23327 " movnti %%edx, 36(%3)\n"
23328- "12: movl 40(%4), %%eax\n"
23329- "71: movl 44(%4), %%edx\n"
16454cff
MT
23330+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23331+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
bc901d79
MT
23332 " movnti %%eax, 40(%3)\n"
23333 " movnti %%edx, 44(%3)\n"
23334- "13: movl 48(%4), %%eax\n"
23335- "81: movl 52(%4), %%edx\n"
16454cff
MT
23336+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23337+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
bc901d79
MT
23338 " movnti %%eax, 48(%3)\n"
23339 " movnti %%edx, 52(%3)\n"
23340- "14: movl 56(%4), %%eax\n"
23341- "91: movl 60(%4), %%edx\n"
16454cff
MT
23342+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23343+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
bc901d79
MT
23344 " movnti %%eax, 56(%3)\n"
23345 " movnti %%edx, 60(%3)\n"
58c5fc13 23346 " addl $-64, %0\n"
c6e2a6c8 23347@@ -497,9 +616,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
bc901d79
MT
23348 " shrl $2, %0\n"
23349 " andl $3, %%eax\n"
23350 " cld\n"
23351- "6: rep; movsl\n"
16454cff 23352+ "6: rep; "__copyuser_seg" movsl\n"
58c5fc13 23353 " movl %%eax,%0\n"
bc901d79 23354- "7: rep; movsb\n"
16454cff 23355+ "7: rep; "__copyuser_seg" movsb\n"
58c5fc13 23356 "8:\n"
58c5fc13
MT
23357 ".section .fixup,\"ax\"\n"
23358 "9: lea 0(%%eax,%0,4),%0\n"
c6e2a6c8 23359@@ -542,32 +661,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
58c5fc13
MT
23360 */
23361 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23362 unsigned long size);
23363-unsigned long __copy_user_intel(void __user *to, const void *from,
23364+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23365+ unsigned long size);
23366+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23367 unsigned long size);
23368 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23369 const void __user *from, unsigned long size);
23370 #endif /* CONFIG_X86_INTEL_USERCOPY */
23371
23372 /* Generic arbitrary sized copy. */
23373-#define __copy_user(to, from, size) \
bc901d79
MT
23374+#define __copy_user(to, from, size, prefix, set, restore) \
23375 do { \
23376 int __d0, __d1, __d2; \
23377 __asm__ __volatile__( \
23378+ set \
23379 " cmp $7,%0\n" \
23380 " jbe 1f\n" \
23381 " movl %1,%0\n" \
23382 " negl %0\n" \
23383 " andl $7,%0\n" \
23384 " subl %0,%3\n" \
58c5fc13 23385- "4: rep; movsb\n" \
16454cff 23386+ "4: rep; "prefix"movsb\n" \
bc901d79
MT
23387 " movl %3,%0\n" \
23388 " shrl $2,%0\n" \
23389 " andl $3,%3\n" \
23390 " .align 2,0x90\n" \
58c5fc13 23391- "0: rep; movsl\n" \
16454cff 23392+ "0: rep; "prefix"movsl\n" \
bc901d79 23393 " movl %3,%0\n" \
58c5fc13 23394- "1: rep; movsb\n" \
16454cff 23395+ "1: rep; "prefix"movsb\n" \
bc901d79
MT
23396 "2:\n" \
23397+ restore \
23398 ".section .fixup,\"ax\"\n" \
23399 "5: addl %3,%0\n" \
23400 " jmp 2b\n" \
c6e2a6c8 23401@@ -595,14 +718,14 @@ do { \
bc901d79
MT
23402 " negl %0\n" \
23403 " andl $7,%0\n" \
23404 " subl %0,%3\n" \
58c5fc13 23405- "4: rep; movsb\n" \
16454cff 23406+ "4: rep; "__copyuser_seg"movsb\n" \
bc901d79
MT
23407 " movl %3,%0\n" \
23408 " shrl $2,%0\n" \
23409 " andl $3,%3\n" \
23410 " .align 2,0x90\n" \
58c5fc13 23411- "0: rep; movsl\n" \
16454cff 23412+ "0: rep; "__copyuser_seg"movsl\n" \
bc901d79 23413 " movl %3,%0\n" \
58c5fc13 23414- "1: rep; movsb\n" \
16454cff 23415+ "1: rep; "__copyuser_seg"movsb\n" \
bc901d79
MT
23416 "2:\n" \
23417 ".section .fixup,\"ax\"\n" \
23418 "5: addl %3,%0\n" \
c6e2a6c8 23419@@ -688,9 +811,9 @@ survive:
58c5fc13
MT
23420 }
23421 #endif
23422 if (movsl_is_ok(to, from, n))
23423- __copy_user(to, from, n);
bc901d79 23424+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
58c5fc13
MT
23425 else
23426- n = __copy_user_intel(to, from, n);
23427+ n = __generic_copy_to_user_intel(to, from, n);
23428 return n;
23429 }
23430 EXPORT_SYMBOL(__copy_to_user_ll);
c6e2a6c8 23431@@ -710,10 +833,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
58c5fc13
MT
23432 unsigned long n)
23433 {
23434 if (movsl_is_ok(to, from, n))
23435- __copy_user(to, from, n);
bc901d79 23436+ __copy_user(to, from, n, __copyuser_seg, "", "");
58c5fc13
MT
23437 else
23438- n = __copy_user_intel((void __user *)to,
23439- (const void *)from, n);
23440+ n = __generic_copy_from_user_intel(to, from, n);
23441 return n;
23442 }
23443 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
c6e2a6c8 23444@@ -740,65 +862,50 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
58c5fc13
MT
23445 if (n > 64 && cpu_has_xmm2)
23446 n = __copy_user_intel_nocache(to, from, n);
23447 else
23448- __copy_user(to, from, n);
bc901d79 23449+ __copy_user(to, from, n, __copyuser_seg, "", "");
58c5fc13
MT
23450 #else
23451- __copy_user(to, from, n);
bc901d79 23452+ __copy_user(to, from, n, __copyuser_seg, "", "");
58c5fc13
MT
23453 #endif
23454 return n;
23455 }
23456 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23457
23458-/**
23459- * copy_to_user: - Copy a block of data into user space.
23460- * @to: Destination address, in user space.
23461- * @from: Source address, in kernel space.
23462- * @n: Number of bytes to copy.
23463- *
23464- * Context: User context only. This function may sleep.
23465- *
23466- * Copy data from kernel space to user space.
23467- *
23468- * Returns number of bytes that could not be copied.
23469- * On success, this will be zero.
23470- */
23471-unsigned long
23472-copy_to_user(void __user *to, const void *from, unsigned long n)
fe2de317 23473-{
58c5fc13
MT
23474- if (access_ok(VERIFY_WRITE, to, n))
23475- n = __copy_to_user(to, from, n);
23476- return n;
fe2de317 23477-}
58c5fc13 23478-EXPORT_SYMBOL(copy_to_user);
fe2de317 23479-
58c5fc13
MT
23480-/**
23481- * copy_from_user: - Copy a block of data from user space.
23482- * @to: Destination address, in kernel space.
23483- * @from: Source address, in user space.
23484- * @n: Number of bytes to copy.
23485- *
23486- * Context: User context only. This function may sleep.
23487- *
23488- * Copy data from user space to kernel space.
23489- *
23490- * Returns number of bytes that could not be copied.
23491- * On success, this will be zero.
23492- *
23493- * If some data could not be copied, this function will pad the copied
23494- * data to the requested size using zero bytes.
23495- */
23496-unsigned long
ae4e228f 23497-_copy_from_user(void *to, const void __user *from, unsigned long n)
fe2de317 23498-{
58c5fc13
MT
23499- if (access_ok(VERIFY_READ, from, n))
23500- n = __copy_from_user(to, from, n);
23501- else
23502- memset(to, 0, n);
23503- return n;
fe2de317 23504-}
ae4e228f 23505-EXPORT_SYMBOL(_copy_from_user);
fe2de317
MT
23506-
23507 void copy_from_user_overflow(void)
23508 {
23509 WARN(1, "Buffer overflow detected!\n");
23510 }
23511 EXPORT_SYMBOL(copy_from_user_overflow);
23512+
23513+void copy_to_user_overflow(void)
23514+{
23515+ WARN(1, "Buffer overflow detected!\n");
23516+}
ae4e228f 23517+EXPORT_SYMBOL(copy_to_user_overflow);
fe2de317 23518+
ae4e228f 23519+#ifdef CONFIG_PAX_MEMORY_UDEREF
bc901d79 23520+void __set_fs(mm_segment_t x)
fe2de317 23521+{
bc901d79
MT
23522+ switch (x.seg) {
23523+ case 0:
23524+ loadsegment(gs, 0);
23525+ break;
23526+ case TASK_SIZE_MAX:
23527+ loadsegment(gs, __USER_DS);
23528+ break;
23529+ case -1UL:
23530+ loadsegment(gs, __KERNEL_DS);
23531+ break;
23532+ default:
23533+ BUG();
23534+ }
23535+ return;
fe2de317 23536+}
71d190be 23537+EXPORT_SYMBOL(__set_fs);
ae4e228f
MT
23538+
23539+void set_fs(mm_segment_t x)
23540+{
58c5fc13 23541+ current_thread_info()->addr_limit = x;
bc901d79 23542+ __set_fs(x);
58c5fc13 23543+}
58c5fc13 23544+EXPORT_SYMBOL(set_fs);
bc901d79 23545+#endif
fe2de317 23546diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
c6e2a6c8 23547index 0d0326f..6a6155b 100644
fe2de317
MT
23548--- a/arch/x86/lib/usercopy_64.c
23549+++ b/arch/x86/lib/usercopy_64.c
c6e2a6c8 23550@@ -16,6 +16,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
df50ba0c
MT
23551 {
23552 long __d0;
23553 might_fault();
8308f9c9
MT
23554+
23555+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
23556+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23557+ addr += PAX_USER_SHADOW_BASE;
8308f9c9
MT
23558+#endif
23559+
df50ba0c
MT
23560 /* no memory constraint because it doesn't change any memory gcc knows
23561 about */
23562 asm volatile(
c6e2a6c8 23563@@ -100,12 +106,20 @@ long strlen_user(const char __user *s)
fe2de317
MT
23564 }
23565 EXPORT_SYMBOL(strlen_user);
df50ba0c 23566
fe2de317
MT
23567-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23568+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
df50ba0c
MT
23569 {
23570- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
6e9df6a3
MT
23571- return copy_user_generic((__force void *)to, (__force void *)from, len);
23572- }
23573- return len;
df50ba0c 23574+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
8308f9c9
MT
23575+
23576+#ifdef CONFIG_PAX_MEMORY_UDEREF
df50ba0c
MT
23577+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23578+ to += PAX_USER_SHADOW_BASE;
23579+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23580+ from += PAX_USER_SHADOW_BASE;
8308f9c9
MT
23581+#endif
23582+
6e9df6a3 23583+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
df50ba0c
MT
23584+ }
23585+ return len;
23586 }
23587 EXPORT_SYMBOL(copy_in_user);
23588
c6e2a6c8 23589@@ -115,7 +129,7 @@ EXPORT_SYMBOL(copy_in_user);
6e9df6a3
MT
23590 * it is not necessary to optimize tail handling.
23591 */
23592 unsigned long
23593-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
fe2de317 23594+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
6e9df6a3
MT
23595 {
23596 char c;
23597 unsigned zero_len;
c6e2a6c8
MT
23598@@ -132,3 +146,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23599 break;
23600 return len;
23601 }
23602+
23603+void copy_from_user_overflow(void)
23604+{
23605+ WARN(1, "Buffer overflow detected!\n");
23606+}
23607+EXPORT_SYMBOL(copy_from_user_overflow);
23608+
23609+void copy_to_user_overflow(void)
23610+{
23611+ WARN(1, "Buffer overflow detected!\n");
23612+}
23613+EXPORT_SYMBOL(copy_to_user_overflow);
fe2de317 23614diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
5e856224 23615index 1fb85db..8b3540b 100644
fe2de317
MT
23616--- a/arch/x86/mm/extable.c
23617+++ b/arch/x86/mm/extable.c
23618@@ -8,7 +8,7 @@ int fixup_exception(struct pt_regs *regs)
58c5fc13
MT
23619 const struct exception_table_entry *fixup;
23620
23621 #ifdef CONFIG_PNPBIOS
23622- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23623+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23624 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23625 extern u32 pnp_bios_is_utter_crap;
23626 pnp_bios_is_utter_crap = 1;
fe2de317 23627diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
c6e2a6c8 23628index 3ecfd1a..304d554 100644
fe2de317
MT
23629--- a/arch/x86/mm/fault.c
23630+++ b/arch/x86/mm/fault.c
6e9df6a3 23631@@ -13,11 +13,18 @@
ae4e228f 23632 #include <linux/perf_event.h> /* perf_sw_event */
bc901d79 23633 #include <linux/hugetlb.h> /* hstate_index_to_shift */
15a11c5b 23634 #include <linux/prefetch.h> /* prefetchw */
58c5fc13
MT
23635+#include <linux/unistd.h>
23636+#include <linux/compiler.h>
23637
23638 #include <asm/traps.h> /* dotraplinkage, ... */
23639 #include <asm/pgalloc.h> /* pgd_*(), ... */
23640 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
4c928ab7 23641 #include <asm/fixmap.h> /* VSYSCALL_START */
58c5fc13 23642+#include <asm/tlbflush.h>
df50ba0c
MT
23643+
23644+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23645+#include <asm/stacktrace.h>
df50ba0c 23646+#endif
58c5fc13
MT
23647
23648 /*
23649 * Page fault error code bits:
fe2de317 23650@@ -55,7 +62,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
58c5fc13
MT
23651 int ret = 0;
23652
23653 /* kprobe_running() needs smp_processor_id() */
23654- if (kprobes_built_in() && !user_mode_vm(regs)) {
23655+ if (kprobes_built_in() && !user_mode(regs)) {
23656 preempt_disable();
23657 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23658 ret = 1;
fe2de317 23659@@ -116,7 +123,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
bc901d79
MT
23660 return !instr_lo || (instr_lo>>1) == 1;
23661 case 0x00:
23662 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23663- if (probe_kernel_address(instr, opcode))
23664+ if (user_mode(regs)) {
6e9df6a3 23665+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
bc901d79
MT
23666+ return 0;
23667+ } else if (probe_kernel_address(instr, opcode))
23668 return 0;
23669
23670 *prefetch = (instr_lo == 0xF) &&
fe2de317 23671@@ -150,7 +160,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
bc901d79
MT
23672 while (instr < max_instr) {
23673 unsigned char opcode;
23674
23675- if (probe_kernel_address(instr, opcode))
23676+ if (user_mode(regs)) {
6e9df6a3 23677+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
bc901d79
MT
23678+ break;
23679+ } else if (probe_kernel_address(instr, opcode))
23680 break;
23681
23682 instr++;
fe2de317 23683@@ -181,6 +194,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
58c5fc13
MT
23684 force_sig_info(si_signo, &info, tsk);
23685 }
23686
6e9df6a3
MT
23687+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23688+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23689+#endif
23690+
58c5fc13
MT
23691+#ifdef CONFIG_PAX_EMUTRAMP
23692+static int pax_handle_fetch_fault(struct pt_regs *regs);
23693+#endif
23694+
23695+#ifdef CONFIG_PAX_PAGEEXEC
23696+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23697+{
23698+ pgd_t *pgd;
23699+ pud_t *pud;
23700+ pmd_t *pmd;
23701+
23702+ pgd = pgd_offset(mm, address);
23703+ if (!pgd_present(*pgd))
23704+ return NULL;
23705+ pud = pud_offset(pgd, address);
23706+ if (!pud_present(*pud))
23707+ return NULL;
23708+ pmd = pmd_offset(pud, address);
23709+ if (!pmd_present(*pmd))
23710+ return NULL;
23711+ return pmd;
23712+}
23713+#endif
23714+
23715 DEFINE_SPINLOCK(pgd_lock);
23716 LIST_HEAD(pgd_list);
23717
6e9df6a3 23718@@ -231,10 +272,22 @@ void vmalloc_sync_all(void)
16454cff
MT
23719 for (address = VMALLOC_START & PMD_MASK;
23720 address >= TASK_SIZE && address < FIXADDR_TOP;
df50ba0c 23721 address += PMD_SIZE) {
df50ba0c
MT
23722+
23723+#ifdef CONFIG_PAX_PER_CPU_PGD
23724+ unsigned long cpu;
23725+#else
23726 struct page *page;
23727+#endif
23728
16454cff 23729 spin_lock(&pgd_lock);
df50ba0c
MT
23730+
23731+#ifdef CONFIG_PAX_PER_CPU_PGD
4c928ab7 23732+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
df50ba0c 23733+ pgd_t *pgd = get_cpu_pgd(cpu);
bc901d79 23734+ pmd_t *ret;
df50ba0c
MT
23735+#else
23736 list_for_each_entry(page, &pgd_list, lru) {
df50ba0c 23737+ pgd_t *pgd = page_address(page);
bc901d79
MT
23738 spinlock_t *pgt_lock;
23739 pmd_t *ret;
23740
6e9df6a3 23741@@ -242,8 +295,13 @@ void vmalloc_sync_all(void)
bc901d79
MT
23742 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
23743
23744 spin_lock(pgt_lock);
23745- ret = vmalloc_sync_one(page_address(page), address);
df50ba0c
MT
23746+#endif
23747+
bc901d79
MT
23748+ ret = vmalloc_sync_one(pgd, address);
23749+
23750+#ifndef CONFIG_PAX_PER_CPU_PGD
23751 spin_unlock(pgt_lock);
23752+#endif
23753
23754 if (!ret)
df50ba0c 23755 break;
fe2de317 23756@@ -277,6 +335,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
df50ba0c
MT
23757 * an interrupt in the middle of a task switch..
23758 */
23759 pgd_paddr = read_cr3();
23760+
23761+#ifdef CONFIG_PAX_PER_CPU_PGD
23762+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23763+#endif
23764+
23765 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23766 if (!pmd_k)
23767 return -1;
fe2de317 23768@@ -372,7 +435,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
df50ba0c
MT
23769 * happen within a race in page table update. In the later
23770 * case just flush:
23771 */
23772+
23773+#ifdef CONFIG_PAX_PER_CPU_PGD
23774+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23775+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23776+#else
23777 pgd = pgd_offset(current->active_mm, address);
23778+#endif
23779+
23780 pgd_ref = pgd_offset_k(address);
23781 if (pgd_none(*pgd_ref))
23782 return -1;
4c928ab7 23783@@ -540,7 +610,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
58c5fc13
MT
23784 static int is_errata100(struct pt_regs *regs, unsigned long address)
23785 {
23786 #ifdef CONFIG_X86_64
23787- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
23788+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
23789 return 1;
23790 #endif
23791 return 0;
4c928ab7 23792@@ -567,7 +637,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
58c5fc13
MT
23793 }
23794
23795 static const char nx_warning[] = KERN_CRIT
23796-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
23797+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
23798
23799 static void
23800 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
4c928ab7 23801@@ -576,15 +646,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
58c5fc13
MT
23802 if (!oops_may_print())
23803 return;
23804
23805- if (error_code & PF_INSTR) {
ae4e228f 23806+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
58c5fc13
MT
23807 unsigned int level;
23808
23809 pte_t *pte = lookup_address(address, &level);
23810
23811 if (pte && pte_present(*pte) && !pte_exec(*pte))
23812- printk(nx_warning, current_uid());
23813+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
fe2de317
MT
23814 }
23815
58c5fc13 23816+#ifdef CONFIG_PAX_KERNEXEC
ae4e228f 23817+ if (init_mm.start_code <= address && address < init_mm.end_code) {
58c5fc13 23818+ if (current->signal->curr_ip)
ae4e228f
MT
23819+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23820+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
58c5fc13
MT
23821+ else
23822+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
23823+ current->comm, task_pid_nr(current), current_uid(), current_euid());
fe2de317 23824+ }
58c5fc13 23825+#endif
fe2de317 23826+
58c5fc13
MT
23827 printk(KERN_ALERT "BUG: unable to handle kernel ");
23828 if (address < PAGE_SIZE)
fe2de317 23829 printk(KERN_CONT "NULL pointer dereference");
5e856224 23830@@ -748,6 +829,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
6e9df6a3
MT
23831 }
23832 #endif
23833
58c5fc13 23834+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
6e9df6a3 23835+ if (pax_is_fetch_fault(regs, error_code, address)) {
58c5fc13
MT
23836+
23837+#ifdef CONFIG_PAX_EMUTRAMP
23838+ switch (pax_handle_fetch_fault(regs)) {
23839+ case 2:
23840+ return;
23841+ }
23842+#endif
23843+
6e9df6a3 23844+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
58c5fc13
MT
23845+ do_group_exit(SIGKILL);
23846+ }
23847+#endif
23848+
6e9df6a3
MT
23849 if (unlikely(show_unhandled_signals))
23850 show_signal_msg(regs, error_code, address, tsk);
58c5fc13 23851
5e856224 23852@@ -844,7 +940,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
6e9df6a3
MT
23853 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
23854 printk(KERN_ERR
23855 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
23856- tsk->comm, tsk->pid, address);
23857+ tsk->comm, task_pid_nr(tsk), address);
23858 code = BUS_MCEERR_AR;
23859 }
23860 #endif
5e856224 23861@@ -900,6 +996,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
58c5fc13
MT
23862 return 1;
23863 }
23864
23865+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23866+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
23867+{
23868+ pte_t *pte;
23869+ pmd_t *pmd;
23870+ spinlock_t *ptl;
23871+ unsigned char pte_mask;
23872+
ae4e228f 23873+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
58c5fc13
MT
23874+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
23875+ return 0;
23876+
23877+ /* PaX: it's our fault, let's handle it if we can */
23878+
23879+ /* PaX: take a look at read faults before acquiring any locks */
23880+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
23881+ /* instruction fetch attempt from a protected page in user mode */
23882+ up_read(&mm->mmap_sem);
23883+
23884+#ifdef CONFIG_PAX_EMUTRAMP
23885+ switch (pax_handle_fetch_fault(regs)) {
23886+ case 2:
23887+ return 1;
23888+ }
23889+#endif
23890+
23891+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
23892+ do_group_exit(SIGKILL);
23893+ }
23894+
23895+ pmd = pax_get_pmd(mm, address);
23896+ if (unlikely(!pmd))
23897+ return 0;
23898+
23899+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
23900+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
23901+ pte_unmap_unlock(pte, ptl);
23902+ return 0;
23903+ }
23904+
23905+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
23906+ /* write attempt to a protected page in user mode */
23907+ pte_unmap_unlock(pte, ptl);
23908+ return 0;
23909+ }
23910+
23911+#ifdef CONFIG_SMP
23912+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
23913+#else
23914+ if (likely(address > get_limit(regs->cs)))
23915+#endif
23916+ {
23917+ set_pte(pte, pte_mkread(*pte));
23918+ __flush_tlb_one(address);
23919+ pte_unmap_unlock(pte, ptl);
23920+ up_read(&mm->mmap_sem);
23921+ return 1;
23922+ }
23923+
23924+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
23925+
23926+ /*
23927+ * PaX: fill DTLB with user rights and retry
23928+ */
23929+ __asm__ __volatile__ (
58c5fc13
MT
23930+ "orb %2,(%1)\n"
23931+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
23932+/*
23933+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
23934+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
23935+ * page fault when examined during a TLB load attempt. this is true not only
23936+ * for PTEs holding a non-present entry but also present entries that will
23937+ * raise a page fault (such as those set up by PaX, or the copy-on-write
23938+ * mechanism). in effect it means that we do *not* need to flush the TLBs
23939+ * for our target pages since their PTEs are simply not in the TLBs at all.
23940+
23941+ * the best thing in omitting it is that we gain around 15-20% speed in the
23942+ * fast path of the page fault handler and can get rid of tracing since we
23943+ * can no longer flush unintended entries.
23944+ */
23945+ "invlpg (%0)\n"
23946+#endif
16454cff 23947+ __copyuser_seg"testb $0,(%0)\n"
58c5fc13 23948+ "xorb %3,(%1)\n"
58c5fc13 23949+ :
bc901d79 23950+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
58c5fc13
MT
23951+ : "memory", "cc");
23952+ pte_unmap_unlock(pte, ptl);
23953+ up_read(&mm->mmap_sem);
23954+ return 1;
23955+}
23956+#endif
23957+
23958 /*
23959 * Handle a spurious fault caused by a stale TLB entry.
23960 *
5e856224 23961@@ -972,6 +1161,9 @@ int show_unhandled_signals = 1;
58c5fc13 23962 static inline int
bc901d79 23963 access_error(unsigned long error_code, struct vm_area_struct *vma)
58c5fc13 23964 {
ae4e228f 23965+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
58c5fc13
MT
23966+ return 1;
23967+
bc901d79 23968 if (error_code & PF_WRITE) {
58c5fc13
MT
23969 /* write, present and write, not present: */
23970 if (unlikely(!(vma->vm_flags & VM_WRITE)))
c6e2a6c8 23971@@ -1005,18 +1197,33 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
58c5fc13
MT
23972 {
23973 struct vm_area_struct *vma;
23974 struct task_struct *tsk;
23975- unsigned long address;
23976 struct mm_struct *mm;
58c5fc13 23977 int fault;
bc901d79 23978 int write = error_code & PF_WRITE;
15a11c5b 23979 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
bc901d79 23980 (write ? FAULT_FLAG_WRITE : 0);
58c5fc13 23981
fe2de317
MT
23982- tsk = current;
23983- mm = tsk->mm;
23984-
23985 /* Get the faulting address: */
23986- address = read_cr2();
df50ba0c
MT
23987+ unsigned long address = read_cr2();
23988+
23989+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23990+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
23991+ if (!search_exception_tables(regs->ip)) {
c6e2a6c8 23992+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
df50ba0c
MT
23993+ bad_area_nosemaphore(regs, error_code, address);
23994+ return;
23995+ }
23996+ if (address < PAX_USER_SHADOW_BASE) {
23997+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
6e9df6a3 23998+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
66a7e928 23999+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
df50ba0c
MT
24000+ } else
24001+ address -= PAX_USER_SHADOW_BASE;
24002+ }
24003+#endif
58c5fc13 24004+
fe2de317
MT
24005+ tsk = current;
24006+ mm = tsk->mm;
58c5fc13 24007
58c5fc13
MT
24008 /*
24009 * Detect and handle instructions that would cause a page fault for
c6e2a6c8 24010@@ -1077,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
58c5fc13
MT
24011 * User-mode registers count as a user access even for any
24012 * potential system fault or CPU buglet:
24013 */
24014- if (user_mode_vm(regs)) {
24015+ if (user_mode(regs)) {
24016 local_irq_enable();
24017 error_code |= PF_USER;
24018 } else {
c6e2a6c8 24019@@ -1132,6 +1339,11 @@ retry:
58c5fc13
MT
24020 might_sleep();
24021 }
24022
24023+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24024+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24025+ return;
24026+#endif
24027+
24028 vma = find_vma(mm, address);
24029 if (unlikely(!vma)) {
24030 bad_area(regs, error_code, address);
c6e2a6c8 24031@@ -1143,18 +1355,24 @@ retry:
58c5fc13
MT
24032 bad_area(regs, error_code, address);
24033 return;
24034 }
24035- if (error_code & PF_USER) {
24036- /*
24037- * Accessing the stack below %sp is always a bug.
24038- * The large cushion allows instructions like enter
24039- * and pusha to work. ("enter $65535, $31" pushes
24040- * 32 pointers and then decrements %sp by 65535.)
24041- */
24042- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24043- bad_area(regs, error_code, address);
24044- return;
24045- }
24046+ /*
24047+ * Accessing the stack below %sp is always a bug.
24048+ * The large cushion allows instructions like enter
24049+ * and pusha to work. ("enter $65535, $31" pushes
24050+ * 32 pointers and then decrements %sp by 65535.)
24051+ */
24052+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24053+ bad_area(regs, error_code, address);
24054+ return;
df50ba0c 24055 }
58c5fc13
MT
24056+
24057+#ifdef CONFIG_PAX_SEGMEXEC
24058+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24059+ bad_area(regs, error_code, address);
24060+ return;
df50ba0c 24061+ }
58c5fc13
MT
24062+#endif
24063+
24064 if (unlikely(expand_stack(vma, address))) {
24065 bad_area(regs, error_code, address);
24066 return;
c6e2a6c8 24067@@ -1209,3 +1427,292 @@ good_area:
58c5fc13
MT
24068
24069 up_read(&mm->mmap_sem);
24070 }
24071+
6e9df6a3
MT
24072+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24073+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24074+{
24075+ struct mm_struct *mm = current->mm;
24076+ unsigned long ip = regs->ip;
24077+
24078+ if (v8086_mode(regs))
24079+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24080+
24081+#ifdef CONFIG_PAX_PAGEEXEC
24082+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24083+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24084+ return true;
24085+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24086+ return true;
24087+ return false;
24088+ }
24089+#endif
24090+
24091+#ifdef CONFIG_PAX_SEGMEXEC
24092+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24093+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24094+ return true;
24095+ return false;
24096+ }
24097+#endif
24098+
24099+ return false;
24100+}
24101+#endif
24102+
58c5fc13
MT
24103+#ifdef CONFIG_PAX_EMUTRAMP
24104+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24105+{
24106+ int err;
24107+
4c928ab7
MT
24108+ do { /* PaX: libffi trampoline emulation */
24109+ unsigned char mov, jmp;
24110+ unsigned int addr1, addr2;
24111+
24112+#ifdef CONFIG_X86_64
24113+ if ((regs->ip + 9) >> 32)
24114+ break;
24115+#endif
24116+
24117+ err = get_user(mov, (unsigned char __user *)regs->ip);
24118+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24119+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24120+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24121+
24122+ if (err)
24123+ break;
24124+
24125+ if (mov == 0xB8 && jmp == 0xE9) {
24126+ regs->ax = addr1;
24127+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24128+ return 2;
24129+ }
24130+ } while (0);
24131+
58c5fc13
MT
24132+ do { /* PaX: gcc trampoline emulation #1 */
24133+ unsigned char mov1, mov2;
24134+ unsigned short jmp;
24135+ unsigned int addr1, addr2;
24136+
24137+#ifdef CONFIG_X86_64
24138+ if ((regs->ip + 11) >> 32)
24139+ break;
24140+#endif
24141+
24142+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24143+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24144+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24145+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24146+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24147+
24148+ if (err)
24149+ break;
24150+
24151+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24152+ regs->cx = addr1;
24153+ regs->ax = addr2;
24154+ regs->ip = addr2;
24155+ return 2;
24156+ }
24157+ } while (0);
24158+
24159+ do { /* PaX: gcc trampoline emulation #2 */
24160+ unsigned char mov, jmp;
24161+ unsigned int addr1, addr2;
24162+
24163+#ifdef CONFIG_X86_64
24164+ if ((regs->ip + 9) >> 32)
24165+ break;
24166+#endif
24167+
24168+ err = get_user(mov, (unsigned char __user *)regs->ip);
24169+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24170+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24171+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24172+
24173+ if (err)
24174+ break;
24175+
24176+ if (mov == 0xB9 && jmp == 0xE9) {
24177+ regs->cx = addr1;
24178+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24179+ return 2;
24180+ }
24181+ } while (0);
24182+
24183+ return 1; /* PaX in action */
24184+}
24185+
24186+#ifdef CONFIG_X86_64
24187+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24188+{
24189+ int err;
24190+
4c928ab7
MT
24191+ do { /* PaX: libffi trampoline emulation */
24192+ unsigned short mov1, mov2, jmp1;
24193+ unsigned char stcclc, jmp2;
24194+ unsigned long addr1, addr2;
24195+
24196+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24197+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24198+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24199+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24200+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24201+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24202+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24203+
24204+ if (err)
24205+ break;
24206+
24207+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24208+ regs->r11 = addr1;
24209+ regs->r10 = addr2;
24210+ if (stcclc == 0xF8)
24211+ regs->flags &= ~X86_EFLAGS_CF;
24212+ else
24213+ regs->flags |= X86_EFLAGS_CF;
24214+ regs->ip = addr1;
24215+ return 2;
24216+ }
24217+ } while (0);
24218+
58c5fc13
MT
24219+ do { /* PaX: gcc trampoline emulation #1 */
24220+ unsigned short mov1, mov2, jmp1;
24221+ unsigned char jmp2;
24222+ unsigned int addr1;
24223+ unsigned long addr2;
24224+
24225+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24226+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24227+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24228+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24229+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24230+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24231+
24232+ if (err)
24233+ break;
24234+
24235+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24236+ regs->r11 = addr1;
24237+ regs->r10 = addr2;
24238+ regs->ip = addr1;
24239+ return 2;
24240+ }
24241+ } while (0);
24242+
24243+ do { /* PaX: gcc trampoline emulation #2 */
24244+ unsigned short mov1, mov2, jmp1;
24245+ unsigned char jmp2;
24246+ unsigned long addr1, addr2;
24247+
24248+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24249+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24250+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24251+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24252+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24253+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24254+
24255+ if (err)
24256+ break;
24257+
24258+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24259+ regs->r11 = addr1;
24260+ regs->r10 = addr2;
24261+ regs->ip = addr1;
24262+ return 2;
24263+ }
24264+ } while (0);
24265+
24266+ return 1; /* PaX in action */
24267+}
24268+#endif
24269+
24270+/*
24271+ * PaX: decide what to do with offenders (regs->ip = fault address)
24272+ *
24273+ * returns 1 when task should be killed
24274+ * 2 when gcc trampoline was detected
24275+ */
24276+static int pax_handle_fetch_fault(struct pt_regs *regs)
24277+{
24278+ if (v8086_mode(regs))
24279+ return 1;
24280+
24281+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24282+ return 1;
24283+
24284+#ifdef CONFIG_X86_32
24285+ return pax_handle_fetch_fault_32(regs);
24286+#else
24287+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24288+ return pax_handle_fetch_fault_32(regs);
24289+ else
24290+ return pax_handle_fetch_fault_64(regs);
24291+#endif
24292+}
24293+#endif
24294+
24295+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
6e9df6a3 24296+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
58c5fc13
MT
24297+{
24298+ long i;
24299+
24300+ printk(KERN_ERR "PAX: bytes at PC: ");
24301+ for (i = 0; i < 20; i++) {
24302+ unsigned char c;
6e9df6a3 24303+ if (get_user(c, (unsigned char __force_user *)pc+i))
58c5fc13
MT
24304+ printk(KERN_CONT "?? ");
24305+ else
24306+ printk(KERN_CONT "%02x ", c);
24307+ }
24308+ printk("\n");
24309+
24310+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
ae4e228f 24311+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
58c5fc13 24312+ unsigned long c;
6e9df6a3 24313+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
58c5fc13
MT
24314+#ifdef CONFIG_X86_32
24315+ printk(KERN_CONT "???????? ");
24316+#else
6e9df6a3
MT
24317+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24318+ printk(KERN_CONT "???????? ???????? ");
24319+ else
24320+ printk(KERN_CONT "???????????????? ");
58c5fc13 24321+#endif
6e9df6a3
MT
24322+ } else {
24323+#ifdef CONFIG_X86_64
24324+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24325+ printk(KERN_CONT "%08x ", (unsigned int)c);
24326+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24327+ } else
24328+#endif
24329+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24330+ }
58c5fc13
MT
24331+ }
24332+ printk("\n");
24333+}
24334+#endif
58c5fc13 24335+
ae4e228f
MT
24336+/**
24337+ * probe_kernel_write(): safely attempt to write to a location
24338+ * @dst: address to write to
24339+ * @src: pointer to the data that shall be written
24340+ * @size: size of the data chunk
24341+ *
24342+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24343+ * happens, handle that and return -EFAULT.
24344+ */
24345+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24346+{
24347+ long ret;
24348+ mm_segment_t old_fs = get_fs();
24349+
24350+ set_fs(KERNEL_DS);
24351+ pagefault_disable();
24352+ pax_open_kernel();
6e9df6a3 24353+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
ae4e228f
MT
24354+ pax_close_kernel();
24355+ pagefault_enable();
24356+ set_fs(old_fs);
24357+
24358+ return ret ? -EFAULT : 0;
24359+}
fe2de317 24360diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
4c928ab7 24361index dd74e46..7d26398 100644
fe2de317
MT
24362--- a/arch/x86/mm/gup.c
24363+++ b/arch/x86/mm/gup.c
4c928ab7 24364@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
ae4e228f
MT
24365 addr = start;
24366 len = (unsigned long) nr_pages << PAGE_SHIFT;
24367 end = start + len;
24368- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24369+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24370 (void __user *)start, len)))
24371 return 0;
58c5fc13 24372
fe2de317 24373diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
c6e2a6c8 24374index 6f31ee5..8ee4164 100644
fe2de317
MT
24375--- a/arch/x86/mm/highmem_32.c
24376+++ b/arch/x86/mm/highmem_32.c
24377@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
58c5fc13
MT
24378 idx = type + KM_TYPE_NR*smp_processor_id();
24379 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24380 BUG_ON(!pte_none(*(kmap_pte-idx)));
24381+
ae4e228f 24382+ pax_open_kernel();
58c5fc13 24383 set_pte(kmap_pte-idx, mk_pte(page, prot));
ae4e228f 24384+ pax_close_kernel();
fe2de317
MT
24385+
24386 arch_flush_lazy_mmu_mode();
58c5fc13 24387
58c5fc13 24388 return (void *)vaddr;
fe2de317 24389diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
c6e2a6c8 24390index f6679a7..8f795a3 100644
fe2de317
MT
24391--- a/arch/x86/mm/hugetlbpage.c
24392+++ b/arch/x86/mm/hugetlbpage.c
24393@@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
58c5fc13
MT
24394 struct hstate *h = hstate_file(file);
24395 struct mm_struct *mm = current->mm;
24396 struct vm_area_struct *vma;
24397- unsigned long start_addr;
24398+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24399+
24400+#ifdef CONFIG_PAX_SEGMEXEC
24401+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24402+ pax_task_size = SEGMEXEC_TASK_SIZE;
24403+#endif
6892158b
MT
24404+
24405+ pax_task_size -= PAGE_SIZE;
58c5fc13
MT
24406
24407 if (len > mm->cached_hole_size) {
24408- start_addr = mm->free_area_cache;
24409+ start_addr = mm->free_area_cache;
24410 } else {
24411- start_addr = TASK_UNMAPPED_BASE;
24412- mm->cached_hole_size = 0;
24413+ start_addr = mm->mmap_base;
24414+ mm->cached_hole_size = 0;
24415 }
24416
24417 full_search:
6892158b 24418@@ -280,26 +287,27 @@ full_search:
58c5fc13
MT
24419
24420 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24421 /* At this point: (!vma || addr < vma->vm_end). */
24422- if (TASK_SIZE - len < addr) {
24423+ if (pax_task_size - len < addr) {
24424 /*
24425 * Start a new search - just in case we missed
24426 * some holes.
24427 */
24428- if (start_addr != TASK_UNMAPPED_BASE) {
24429- start_addr = TASK_UNMAPPED_BASE;
24430+ if (start_addr != mm->mmap_base) {
24431+ start_addr = mm->mmap_base;
24432 mm->cached_hole_size = 0;
24433 goto full_search;
24434 }
57199397
MT
24435 return -ENOMEM;
24436 }
24437- if (!vma || addr + len <= vma->vm_start) {
24438- mm->free_area_cache = addr + len;
24439- return addr;
24440- }
24441+ if (check_heap_stack_gap(vma, addr, len))
24442+ break;
24443 if (addr + mm->cached_hole_size < vma->vm_start)
24444 mm->cached_hole_size = vma->vm_start - addr;
24445 addr = ALIGN(vma->vm_end, huge_page_size(h));
24446 }
24447+
24448+ mm->free_area_cache = addr + len;
24449+ return addr;
24450 }
24451
24452 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
c6e2a6c8 24453@@ -310,9 +318,8 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
58c5fc13 24454 struct mm_struct *mm = current->mm;
c6e2a6c8
MT
24455 struct vm_area_struct *vma;
24456 unsigned long base = mm->mmap_base;
24457- unsigned long addr = addr0;
24458+ unsigned long addr;
58c5fc13 24459 unsigned long largest_hole = mm->cached_hole_size;
c6e2a6c8 24460- unsigned long start_addr;
58c5fc13
MT
24461
24462 /* don't allow allocations above current base */
24463 if (mm->free_area_cache > base)
c6e2a6c8 24464@@ -322,16 +329,15 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
58c5fc13
MT
24465 largest_hole = 0;
24466 mm->free_area_cache = base;
24467 }
24468-try_again:
c6e2a6c8
MT
24469- start_addr = mm->free_area_cache;
24470
58c5fc13
MT
24471 /* make sure it can fit in the remaining address space */
24472 if (mm->free_area_cache < len)
24473 goto fail;
16454cff 24474
66a7e928 24475 /* either no address requested or can't fit in requested address hole */
16454cff 24476- addr = (mm->free_area_cache - len) & huge_page_mask(h);
c6e2a6c8 24477+ addr = mm->free_area_cache - len;
57199397 24478 do {
16454cff 24479+ addr &= huge_page_mask(h);
57199397
MT
24480 /*
24481 * Lookup failure means no vma is above this address,
24482 * i.e. return with success:
c6e2a6c8
MT
24483@@ -340,10 +346,10 @@ try_again:
24484 if (!vma)
24485 return addr;
24486
24487- if (addr + len <= vma->vm_start) {
57199397
MT
24488+ if (check_heap_stack_gap(vma, addr, len)) {
24489 /* remember the address as a hint for next time */
24490- mm->cached_hole_size = largest_hole;
24491- return (mm->free_area_cache = addr);
57199397
MT
24492+ mm->cached_hole_size = largest_hole;
24493+ return (mm->free_area_cache = addr);
c6e2a6c8
MT
24494 } else if (mm->free_area_cache == vma->vm_end) {
24495 /* pull free_area_cache down to the first hole */
24496 mm->free_area_cache = vma->vm_start;
24497@@ -352,29 +358,34 @@ try_again:
57199397
MT
24498
24499 /* remember the largest hole we saw so far */
24500 if (addr + largest_hole < vma->vm_start)
24501- largest_hole = vma->vm_start - addr;
24502+ largest_hole = vma->vm_start - addr;
24503
24504 /* try just below the current vma->vm_start */
16454cff
MT
24505- addr = (vma->vm_start - len) & huge_page_mask(h);
24506- } while (len <= vma->vm_start);
24507+ addr = skip_heap_stack_gap(vma, len);
24508+ } while (!IS_ERR_VALUE(addr));
58c5fc13
MT
24509
24510 fail:
24511 /*
24512- * if hint left us with no space for the requested
24513- * mapping then try again:
24514- */
c6e2a6c8 24515- if (start_addr != base) {
58c5fc13
MT
24516- mm->free_area_cache = base;
24517- largest_hole = 0;
58c5fc13
MT
24518- goto try_again;
24519- }
24520- /*
24521 * A failed mmap() very likely causes application failure,
24522 * so fall back to the bottom-up function here. This scenario
24523 * can happen with large stack limits and large mmap()
24524 * allocations.
24525 */
24526- mm->free_area_cache = TASK_UNMAPPED_BASE;
24527+
24528+#ifdef CONFIG_PAX_SEGMEXEC
24529+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24530+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24531+ else
24532+#endif
24533+
24534+ mm->mmap_base = TASK_UNMAPPED_BASE;
24535+
24536+#ifdef CONFIG_PAX_RANDMMAP
24537+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24538+ mm->mmap_base += mm->delta_mmap;
24539+#endif
24540+
24541+ mm->free_area_cache = mm->mmap_base;
24542 mm->cached_hole_size = ~0UL;
24543 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24544 len, pgoff, flags);
c6e2a6c8 24545@@ -382,6 +393,7 @@ fail:
58c5fc13
MT
24546 /*
24547 * Restore the topdown base:
24548 */
24549+ mm->mmap_base = base;
24550 mm->free_area_cache = base;
24551 mm->cached_hole_size = ~0UL;
24552
c6e2a6c8 24553@@ -395,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
58c5fc13
MT
24554 struct hstate *h = hstate_file(file);
24555 struct mm_struct *mm = current->mm;
24556 struct vm_area_struct *vma;
24557+ unsigned long pax_task_size = TASK_SIZE;
24558
24559 if (len & ~huge_page_mask(h))
24560 return -EINVAL;
24561- if (len > TASK_SIZE)
24562+
24563+#ifdef CONFIG_PAX_SEGMEXEC
24564+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24565+ pax_task_size = SEGMEXEC_TASK_SIZE;
24566+#endif
24567+
6892158b
MT
24568+ pax_task_size -= PAGE_SIZE;
24569+
58c5fc13
MT
24570+ if (len > pax_task_size)
24571 return -ENOMEM;
24572
24573 if (flags & MAP_FIXED) {
c6e2a6c8 24574@@ -410,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
58c5fc13
MT
24575 if (addr) {
24576 addr = ALIGN(addr, huge_page_size(h));
24577 vma = find_vma(mm, addr);
24578- if (TASK_SIZE - len >= addr &&
57199397
MT
24579- (!vma || addr + len <= vma->vm_start))
24580+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
58c5fc13
MT
24581 return addr;
24582 }
57199397 24583 if (mm->get_unmapped_area == arch_get_unmapped_area)
fe2de317 24584diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
c6e2a6c8 24585index 4f0cec7..00976ce 100644
fe2de317
MT
24586--- a/arch/x86/mm/init.c
24587+++ b/arch/x86/mm/init.c
c6e2a6c8 24588@@ -16,6 +16,8 @@
4c928ab7
MT
24589 #include <asm/tlb.h>
24590 #include <asm/proto.h>
5e856224 24591 #include <asm/dma.h> /* for MAX_DMA_PFN */
4c928ab7 24592+#include <asm/desc.h>
c6e2a6c8 24593+#include <asm/bios_ebda.h>
4c928ab7
MT
24594
24595 unsigned long __initdata pgt_buf_start;
24596 unsigned long __meminitdata pgt_buf_end;
c6e2a6c8 24597@@ -32,7 +34,7 @@ int direct_gbpages
fe2de317
MT
24598 static void __init find_early_table_space(unsigned long end, int use_pse,
24599 int use_gbpages)
24600 {
24601- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
24602+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
24603 phys_addr_t base;
24604
24605 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
c6e2a6c8
MT
24606@@ -311,10 +313,37 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24607 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24608 * mmio resources as well as potential bios/acpi data regions.
fe2de317 24609 */
c6e2a6c8
MT
24610+
24611+#ifdef CONFIG_GRKERNSEC_KMEM
24612+static unsigned int ebda_start __read_only;
24613+static unsigned int ebda_end __read_only;
24614+#endif
24615+
fe2de317
MT
24616 int devmem_is_allowed(unsigned long pagenr)
24617 {
24618+#ifdef CONFIG_GRKERNSEC_KMEM
24619+ /* allow BDA */
24620+ if (!pagenr)
24621+ return 1;
24622+ /* allow EBDA */
c6e2a6c8 24623+ if (pagenr >= ebda_start && pagenr < ebda_end)
fe2de317
MT
24624+ return 1;
24625+#else
24626+ if (!pagenr)
24627+ return 1;
24628+#ifdef CONFIG_VM86
24629+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
24630+ return 1;
24631+#endif
24632+#endif
24633+
24634+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24635+ return 1;
24636+#ifdef CONFIG_GRKERNSEC_KMEM
24637+ /* throw out everything else below 1MB */
24638 if (pagenr <= 256)
24639- return 1;
24640+ return 0;
24641+#endif
24642 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24643 return 0;
24644 if (!page_is_ram(pagenr))
c6e2a6c8
MT
24645@@ -371,8 +400,116 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24646 #endif
24647 }
fe2de317 24648
c6e2a6c8
MT
24649+#ifdef CONFIG_GRKERNSEC_KMEM
24650+static inline void gr_init_ebda(void)
24651+{
24652+ unsigned int ebda_addr;
24653+ unsigned int ebda_size = 0;
24654+
24655+ ebda_addr = get_bios_ebda();
24656+ if (ebda_addr) {
24657+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
24658+ ebda_size <<= 10;
24659+ }
24660+ if (ebda_addr && ebda_size) {
24661+ ebda_start = ebda_addr >> PAGE_SHIFT;
24662+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
24663+ } else {
24664+ ebda_start = 0x9f000 >> PAGE_SHIFT;
24665+ ebda_end = 0xa0000 >> PAGE_SHIFT;
24666+ }
24667+}
24668+#else
24669+static inline void gr_init_ebda(void) { }
24670+#endif
24671+
fe2de317
MT
24672 void free_initmem(void)
24673 {
fe2de317
MT
24674+#ifdef CONFIG_PAX_KERNEXEC
24675+#ifdef CONFIG_X86_32
24676+ /* PaX: limit KERNEL_CS to actual size */
24677+ unsigned long addr, limit;
24678+ struct desc_struct d;
24679+ int cpu;
c6e2a6c8
MT
24680+#else
24681+ pgd_t *pgd;
24682+ pud_t *pud;
24683+ pmd_t *pmd;
24684+ unsigned long addr, end;
24685+#endif
24686+#endif
24687+
24688+ gr_init_ebda();
fe2de317 24689+
c6e2a6c8
MT
24690+#ifdef CONFIG_PAX_KERNEXEC
24691+#ifdef CONFIG_X86_32
fe2de317
MT
24692+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24693+ limit = (limit - 1UL) >> PAGE_SHIFT;
24694+
24695+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
4c928ab7 24696+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
fe2de317
MT
24697+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24698+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24699+ }
24700+
24701+ /* PaX: make KERNEL_CS read-only */
24702+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24703+ if (!paravirt_enabled())
24704+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24705+/*
24706+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24707+ pgd = pgd_offset_k(addr);
24708+ pud = pud_offset(pgd, addr);
24709+ pmd = pmd_offset(pud, addr);
24710+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24711+ }
24712+*/
24713+#ifdef CONFIG_X86_PAE
24714+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24715+/*
24716+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24717+ pgd = pgd_offset_k(addr);
24718+ pud = pud_offset(pgd, addr);
24719+ pmd = pmd_offset(pud, addr);
24720+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24721+ }
24722+*/
24723+#endif
24724+
24725+#ifdef CONFIG_MODULES
24726+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24727+#endif
24728+
24729+#else
fe2de317
MT
24730+ /* PaX: make kernel code/rodata read-only, rest non-executable */
24731+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24732+ pgd = pgd_offset_k(addr);
24733+ pud = pud_offset(pgd, addr);
24734+ pmd = pmd_offset(pud, addr);
24735+ if (!pmd_present(*pmd))
24736+ continue;
24737+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24738+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24739+ else
24740+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24741+ }
24742+
24743+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24744+ end = addr + KERNEL_IMAGE_SIZE;
24745+ for (; addr < end; addr += PMD_SIZE) {
24746+ pgd = pgd_offset_k(addr);
24747+ pud = pud_offset(pgd, addr);
24748+ pmd = pmd_offset(pud, addr);
24749+ if (!pmd_present(*pmd))
24750+ continue;
24751+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24752+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24753+ }
24754+#endif
24755+
24756+ flush_tlb_all();
24757+#endif
24758+
24759 free_init_pages("unused kernel memory",
24760 (unsigned long)(&__init_begin),
24761 (unsigned long)(&__init_end));
24762diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
c6e2a6c8 24763index 575d86f..4987469 100644
fe2de317
MT
24764--- a/arch/x86/mm/init_32.c
24765+++ b/arch/x86/mm/init_32.c
c6e2a6c8 24766@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
58c5fc13
MT
24767 }
24768
24769 /*
24770- * Creates a middle page table and puts a pointer to it in the
24771- * given global directory entry. This only returns the gd entry
24772- * in non-PAE compilation mode, since the middle layer is folded.
24773- */
24774-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24775-{
24776- pud_t *pud;
24777- pmd_t *pmd_table;
24778-
24779-#ifdef CONFIG_X86_PAE
24780- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24781- if (after_bootmem)
ae4e228f 24782- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
58c5fc13
MT
24783- else
24784- pmd_table = (pmd_t *)alloc_low_page();
24785- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24786- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24787- pud = pud_offset(pgd, 0);
24788- BUG_ON(pmd_table != pmd_offset(pud, 0));
24789-
24790- return pmd_table;
24791- }
24792-#endif
24793- pud = pud_offset(pgd, 0);
24794- pmd_table = pmd_offset(pud, 0);
24795-
24796- return pmd_table;
24797-}
24798-
24799-/*
24800 * Create a page table and place a pointer to it in a middle page
24801 * directory entry:
24802 */
c6e2a6c8 24803@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
58c5fc13
MT
24804 page_table = (pte_t *)alloc_low_page();
24805
24806 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
24807+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24808+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
24809+#else
24810 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
24811+#endif
24812 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
24813 }
24814
24815 return pte_offset_kernel(pmd, 0);
24816 }
24817
24818+static pmd_t * __init one_md_table_init(pgd_t *pgd)
24819+{
24820+ pud_t *pud;
24821+ pmd_t *pmd_table;
24822+
24823+ pud = pud_offset(pgd, 0);
24824+ pmd_table = pmd_offset(pud, 0);
24825+
24826+ return pmd_table;
24827+}
24828+
24829 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
24830 {
24831 int pgd_idx = pgd_index(vaddr);
c6e2a6c8 24832@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
58c5fc13
MT
24833 int pgd_idx, pmd_idx;
24834 unsigned long vaddr;
24835 pgd_t *pgd;
24836+ pud_t *pud;
24837 pmd_t *pmd;
24838 pte_t *pte = NULL;
24839
c6e2a6c8 24840@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
58c5fc13
MT
24841 pgd = pgd_base + pgd_idx;
24842
24843 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
24844- pmd = one_md_table_init(pgd);
24845- pmd = pmd + pmd_index(vaddr);
24846+ pud = pud_offset(pgd, vaddr);
24847+ pmd = pmd_offset(pud, vaddr);
24848+
24849+#ifdef CONFIG_X86_PAE
24850+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24851+#endif
24852+
24853 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
24854 pmd++, pmd_idx++) {
24855 pte = page_table_kmap_check(one_page_table_init(pmd),
c6e2a6c8 24856@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
58c5fc13
MT
24857 }
24858 }
24859
24860-static inline int is_kernel_text(unsigned long addr)
24861+static inline int is_kernel_text(unsigned long start, unsigned long end)
24862 {
16454cff 24863- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
58c5fc13
MT
24864- return 1;
24865- return 0;
ae4e228f 24866+ if ((start > ktla_ktva((unsigned long)_etext) ||
58c5fc13
MT
24867+ end <= ktla_ktva((unsigned long)_stext)) &&
24868+ (start > ktla_ktva((unsigned long)_einittext) ||
24869+ end <= ktla_ktva((unsigned long)_sinittext)) &&
ae4e228f
MT
24870+
24871+#ifdef CONFIG_ACPI_SLEEP
24872+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
24873+#endif
24874+
58c5fc13
MT
24875+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
24876+ return 0;
24877+ return 1;
24878 }
24879
24880 /*
c6e2a6c8 24881@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
df50ba0c 24882 unsigned long last_map_addr = end;
58c5fc13
MT
24883 unsigned long start_pfn, end_pfn;
24884 pgd_t *pgd_base = swapper_pg_dir;
24885- int pgd_idx, pmd_idx, pte_ofs;
24886+ unsigned int pgd_idx, pmd_idx, pte_ofs;
24887 unsigned long pfn;
24888 pgd_t *pgd;
24889+ pud_t *pud;
24890 pmd_t *pmd;
24891 pte_t *pte;
24892 unsigned pages_2m, pages_4k;
c6e2a6c8 24893@@ -280,8 +281,13 @@ repeat:
58c5fc13
MT
24894 pfn = start_pfn;
24895 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24896 pgd = pgd_base + pgd_idx;
24897- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
24898- pmd = one_md_table_init(pgd);
24899+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
24900+ pud = pud_offset(pgd, 0);
24901+ pmd = pmd_offset(pud, 0);
24902+
24903+#ifdef CONFIG_X86_PAE
24904+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
24905+#endif
24906
24907 if (pfn >= end_pfn)
24908 continue;
c6e2a6c8 24909@@ -293,14 +299,13 @@ repeat:
58c5fc13
MT
24910 #endif
24911 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
24912 pmd++, pmd_idx++) {
24913- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
24914+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
24915
24916 /*
24917 * Map with big pages if possible, otherwise
24918 * create normal page tables:
24919 */
24920 if (use_pse) {
24921- unsigned int addr2;
24922 pgprot_t prot = PAGE_KERNEL_LARGE;
24923 /*
24924 * first pass will use the same initial
c6e2a6c8 24925@@ -310,11 +315,7 @@ repeat:
58c5fc13
MT
24926 __pgprot(PTE_IDENT_ATTR |
24927 _PAGE_PSE);
24928
24929- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
24930- PAGE_OFFSET + PAGE_SIZE-1;
24931-
24932- if (is_kernel_text(addr) ||
24933- is_kernel_text(addr2))
24934+ if (is_kernel_text(address, address + PMD_SIZE))
24935 prot = PAGE_KERNEL_LARGE_EXEC;
24936
24937 pages_2m++;
c6e2a6c8 24938@@ -331,7 +332,7 @@ repeat:
58c5fc13
MT
24939 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
24940 pte += pte_ofs;
24941 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
24942- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
24943+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
24944 pgprot_t prot = PAGE_KERNEL;
24945 /*
24946 * first pass will use the same initial
c6e2a6c8 24947@@ -339,7 +340,7 @@ repeat:
58c5fc13
MT
24948 */
24949 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
24950
24951- if (is_kernel_text(addr))
24952+ if (is_kernel_text(address, address + PAGE_SIZE))
24953 prot = PAGE_KERNEL_EXEC;
24954
24955 pages_4k++;
c6e2a6c8 24956@@ -465,7 +466,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
58c5fc13
MT
24957
24958 pud = pud_offset(pgd, va);
24959 pmd = pmd_offset(pud, va);
24960- if (!pmd_present(*pmd))
24961+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
24962 break;
24963
24964 pte = pte_offset_kernel(pmd, va);
c6e2a6c8 24965@@ -517,12 +518,10 @@ void __init early_ioremap_page_table_range_init(void)
58c5fc13
MT
24966
24967 static void __init pagetable_init(void)
24968 {
24969- pgd_t *pgd_base = swapper_pg_dir;
24970-
24971- permanent_kmaps_init(pgd_base);
24972+ permanent_kmaps_init(swapper_pg_dir);
24973 }
24974
58c5fc13
MT
24975-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24976+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
24977 EXPORT_SYMBOL_GPL(__supported_pte_mask);
24978
24979 /* user-defined highmem size */
c6e2a6c8 24980@@ -734,6 +733,12 @@ void __init mem_init(void)
df50ba0c
MT
24981
24982 pci_iommu_alloc();
24983
24984+#ifdef CONFIG_PAX_PER_CPU_PGD
24985+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
24986+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
24987+ KERNEL_PGD_PTRS);
24988+#endif
24989+
24990 #ifdef CONFIG_FLATMEM
24991 BUG_ON(!mem_map);
24992 #endif
c6e2a6c8 24993@@ -760,7 +765,7 @@ void __init mem_init(void)
5e856224 24994 reservedpages++;
58c5fc13
MT
24995
24996 codesize = (unsigned long) &_etext - (unsigned long) &_text;
24997- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
24998+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
24999 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25000
ae4e228f 25001 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
c6e2a6c8 25002@@ -801,10 +806,10 @@ void __init mem_init(void)
58c5fc13
MT
25003 ((unsigned long)&__init_end -
25004 (unsigned long)&__init_begin) >> 10,
25005
25006- (unsigned long)&_etext, (unsigned long)&_edata,
25007- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25008+ (unsigned long)&_sdata, (unsigned long)&_edata,
25009+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25010
25011- (unsigned long)&_text, (unsigned long)&_etext,
25012+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25013 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25014
25015 /*
c6e2a6c8 25016@@ -882,6 +887,7 @@ void set_kernel_text_rw(void)
ae4e228f
MT
25017 if (!kernel_set_to_readonly)
25018 return;
58c5fc13 25019
ae4e228f
MT
25020+ start = ktla_ktva(start);
25021 pr_debug("Set kernel text: %lx - %lx for read write\n",
25022 start, start+size);
25023
c6e2a6c8 25024@@ -896,6 +902,7 @@ void set_kernel_text_ro(void)
ae4e228f
MT
25025 if (!kernel_set_to_readonly)
25026 return;
25027
25028+ start = ktla_ktva(start);
25029 pr_debug("Set kernel text: %lx - %lx for read only\n",
25030 start, start+size);
25031
c6e2a6c8 25032@@ -924,6 +931,7 @@ void mark_rodata_ro(void)
ae4e228f
MT
25033 unsigned long start = PFN_ALIGN(_text);
25034 unsigned long size = PFN_ALIGN(_etext) - start;
25035
25036+ start = ktla_ktva(start);
25037 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25038 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25039 size >> 10);
fe2de317 25040diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
c6e2a6c8 25041index fc18be0..e539653 100644
fe2de317
MT
25042--- a/arch/x86/mm/init_64.c
25043+++ b/arch/x86/mm/init_64.c
c6e2a6c8 25044@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
ae4e228f
MT
25045 * around without checking the pgd every time.
25046 */
25047
25048-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
25049+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
25050 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25051
25052 int force_personality32;
c6e2a6c8 25053@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
16454cff 25054
bc901d79
MT
25055 for (address = start; address <= end; address += PGDIR_SIZE) {
25056 const pgd_t *pgd_ref = pgd_offset_k(address);
bc901d79
MT
25057+
25058+#ifdef CONFIG_PAX_PER_CPU_PGD
25059+ unsigned long cpu;
25060+#else
25061 struct page *page;
25062+#endif
25063
25064 if (pgd_none(*pgd_ref))
25065 continue;
25066
16454cff 25067 spin_lock(&pgd_lock);
bc901d79
MT
25068+
25069+#ifdef CONFIG_PAX_PER_CPU_PGD
4c928ab7 25070+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
bc901d79
MT
25071+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
25072+#else
25073 list_for_each_entry(page, &pgd_list, lru) {
25074 pgd_t *pgd;
25075 spinlock_t *pgt_lock;
c6e2a6c8 25076@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
16454cff 25077 /* the pgt_lock only for Xen */
bc901d79
MT
25078 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
25079 spin_lock(pgt_lock);
25080+#endif
25081
25082 if (pgd_none(*pgd))
25083 set_pgd(pgd, *pgd_ref);
c6e2a6c8 25084@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
bc901d79
MT
25085 BUG_ON(pgd_page_vaddr(*pgd)
25086 != pgd_page_vaddr(*pgd_ref));
25087
25088+#ifndef CONFIG_PAX_PER_CPU_PGD
25089 spin_unlock(pgt_lock);
25090+#endif
25091+
25092 }
16454cff 25093 spin_unlock(&pgd_lock);
bc901d79 25094 }
c6e2a6c8 25095@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
5e856224
MT
25096 {
25097 if (pgd_none(*pgd)) {
25098 pud_t *pud = (pud_t *)spp_getpage();
25099- pgd_populate(&init_mm, pgd, pud);
25100+ pgd_populate_kernel(&init_mm, pgd, pud);
25101 if (pud != pud_offset(pgd, 0))
25102 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
25103 pud, pud_offset(pgd, 0));
c6e2a6c8 25104@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
5e856224
MT
25105 {
25106 if (pud_none(*pud)) {
25107 pmd_t *pmd = (pmd_t *) spp_getpage();
25108- pud_populate(&init_mm, pud, pmd);
25109+ pud_populate_kernel(&init_mm, pud, pmd);
25110 if (pmd != pmd_offset(pud, 0))
25111 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
25112 pmd, pmd_offset(pud, 0));
c6e2a6c8 25113@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
58c5fc13
MT
25114 pmd = fill_pmd(pud, vaddr);
25115 pte = fill_pte(pmd, vaddr);
25116
ae4e228f 25117+ pax_open_kernel();
58c5fc13 25118 set_pte(pte, new_pte);
ae4e228f 25119+ pax_close_kernel();
58c5fc13 25120
58c5fc13
MT
25121 /*
25122 * It's enough to flush this one mapping.
c6e2a6c8 25123@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
58c5fc13
MT
25124 pgd = pgd_offset_k((unsigned long)__va(phys));
25125 if (pgd_none(*pgd)) {
25126 pud = (pud_t *) spp_getpage();
25127- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25128- _PAGE_USER));
25129+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25130 }
25131 pud = pud_offset(pgd, (unsigned long)__va(phys));
25132 if (pud_none(*pud)) {
25133 pmd = (pmd_t *) spp_getpage();
25134- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25135- _PAGE_USER));
25136+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25137 }
25138 pmd = pmd_offset(pud, phys);
25139 BUG_ON(!pmd_none(*pmd));
c6e2a6c8 25140@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
6e9df6a3
MT
25141 if (pfn >= pgt_buf_top)
25142 panic("alloc_low_page: ran out of memory");
25143
25144- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25145+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
25146 clear_page(adr);
25147 *phys = pfn * PAGE_SIZE;
25148 return adr;
c6e2a6c8 25149@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
6e9df6a3
MT
25150
25151 phys = __pa(virt);
25152 left = phys & (PAGE_SIZE - 1);
25153- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25154+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
25155 adr = (void *)(((unsigned long)adr) | left);
25156
25157 return adr;
c6e2a6c8 25158@@ -545,7 +559,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
5e856224
MT
25159 unmap_low_page(pmd);
25160
25161 spin_lock(&init_mm.page_table_lock);
25162- pud_populate(&init_mm, pud, __va(pmd_phys));
25163+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
25164 spin_unlock(&init_mm.page_table_lock);
25165 }
25166 __flush_tlb_all();
c6e2a6c8 25167@@ -591,7 +605,7 @@ kernel_physical_mapping_init(unsigned long start,
5e856224
MT
25168 unmap_low_page(pud);
25169
25170 spin_lock(&init_mm.page_table_lock);
25171- pgd_populate(&init_mm, pgd, __va(pud_phys));
25172+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
25173 spin_unlock(&init_mm.page_table_lock);
25174 pgd_changed = true;
25175 }
c6e2a6c8 25176@@ -683,6 +697,12 @@ void __init mem_init(void)
df50ba0c
MT
25177
25178 pci_iommu_alloc();
25179
25180+#ifdef CONFIG_PAX_PER_CPU_PGD
25181+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25182+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25183+ KERNEL_PGD_PTRS);
25184+#endif
25185+
25186 /* clear_bss() already clear the empty_zero_page */
25187
25188 reservedpages = 0;
c6e2a6c8 25189@@ -843,8 +863,8 @@ int kern_addr_valid(unsigned long addr)
58c5fc13
MT
25190 static struct vm_area_struct gate_vma = {
25191 .vm_start = VSYSCALL_START,
25192 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25193- .vm_page_prot = PAGE_READONLY_EXEC,
25194- .vm_flags = VM_READ | VM_EXEC
25195+ .vm_page_prot = PAGE_READONLY,
25196+ .vm_flags = VM_READ
25197 };
25198
66a7e928 25199 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
c6e2a6c8 25200@@ -878,7 +898,7 @@ int in_gate_area_no_mm(unsigned long addr)
58c5fc13
MT
25201
25202 const char *arch_vma_name(struct vm_area_struct *vma)
25203 {
25204- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25205+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25206 return "[vdso]";
25207 if (vma == &gate_vma)
25208 return "[vsyscall]";
fe2de317
MT
25209diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25210index 7b179b4..6bd1777 100644
25211--- a/arch/x86/mm/iomap_32.c
25212+++ b/arch/x86/mm/iomap_32.c
25213@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
bc901d79 25214 type = kmap_atomic_idx_push();
58c5fc13
MT
25215 idx = type + KM_TYPE_NR * smp_processor_id();
25216 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25217+
ae4e228f 25218+ pax_open_kernel();
58c5fc13 25219 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
ae4e228f 25220+ pax_close_kernel();
58c5fc13
MT
25221+
25222 arch_flush_lazy_mmu_mode();
25223
25224 return (void *)vaddr;
fe2de317 25225diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
4c928ab7 25226index be1ef57..55f0160 100644
fe2de317
MT
25227--- a/arch/x86/mm/ioremap.c
25228+++ b/arch/x86/mm/ioremap.c
25229@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
6892158b 25230 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
58c5fc13
MT
25231 int is_ram = page_is_ram(pfn);
25232
ae4e228f
MT
25233- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25234+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25235 return NULL;
25236 WARN_ON_ONCE(is_ram);
58c5fc13 25237 }
4c928ab7
MT
25238@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25239
25240 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25241 if (page_is_ram(start >> PAGE_SHIFT))
25242+#ifdef CONFIG_HIGHMEM
25243+ if ((start >> PAGE_SHIFT) < max_low_pfn)
25244+#endif
25245 return __va(phys);
25246
25247 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
25248@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
58c5fc13
MT
25249 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25250
25251 static __initdata int after_paging_init;
25252-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25253+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25254
25255 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25256 {
4c928ab7 25257@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
58c5fc13
MT
25258 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25259
25260 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25261- memset(bm_pte, 0, sizeof(bm_pte));
25262- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25263+ pmd_populate_user(&init_mm, pmd, bm_pte);
25264
25265 /*
25266 * The boot-ioremap range spans multiple pmds, for which
fe2de317
MT
25267diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25268index d87dd6d..bf3fa66 100644
25269--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25270+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25271@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
ae4e228f
MT
25272 * memory (e.g. tracked pages)? For now, we need this to avoid
25273 * invoking kmemcheck for PnP BIOS calls.
25274 */
25275- if (regs->flags & X86_VM_MASK)
25276+ if (v8086_mode(regs))
25277 return false;
25278- if (regs->cs != __KERNEL_CS)
25279+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25280 return false;
25281
25282 pte = kmemcheck_pte_lookup(address);
fe2de317 25283diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
4c928ab7 25284index 845df68..1d8d29f 100644
fe2de317
MT
25285--- a/arch/x86/mm/mmap.c
25286+++ b/arch/x86/mm/mmap.c
4c928ab7 25287@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
ae4e228f 25288 * Leave an at least ~128 MB hole with possible stack randomization.
58c5fc13 25289 */
ae4e228f 25290 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
58c5fc13
MT
25291-#define MAX_GAP (TASK_SIZE/6*5)
25292+#define MAX_GAP (pax_task_size/6*5)
25293
4c928ab7
MT
25294 static int mmap_is_legacy(void)
25295 {
25296@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
58c5fc13
MT
25297 return rnd << PAGE_SHIFT;
25298 }
25299
25300-static unsigned long mmap_base(void)
25301+static unsigned long mmap_base(struct mm_struct *mm)
25302 {
df50ba0c 25303 unsigned long gap = rlimit(RLIMIT_STACK);
58c5fc13
MT
25304+ unsigned long pax_task_size = TASK_SIZE;
25305+
25306+#ifdef CONFIG_PAX_SEGMEXEC
25307+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25308+ pax_task_size = SEGMEXEC_TASK_SIZE;
25309+#endif
25310
25311 if (gap < MIN_GAP)
25312 gap = MIN_GAP;
25313 else if (gap > MAX_GAP)
25314 gap = MAX_GAP;
25315
25316- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25317+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25318 }
25319
25320 /*
25321 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25322 * does, but not when emulating X86_32
25323 */
25324-static unsigned long mmap_legacy_base(void)
25325+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25326 {
25327- if (mmap_is_ia32())
25328+ if (mmap_is_ia32()) {
25329+
25330+#ifdef CONFIG_PAX_SEGMEXEC
25331+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25332+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25333+ else
25334+#endif
25335+
25336 return TASK_UNMAPPED_BASE;
25337- else
25338+ } else
25339 return TASK_UNMAPPED_BASE + mmap_rnd();
25340 }
25341
4c928ab7 25342@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
58c5fc13
MT
25343 void arch_pick_mmap_layout(struct mm_struct *mm)
25344 {
25345 if (mmap_is_legacy()) {
25346- mm->mmap_base = mmap_legacy_base();
25347+ mm->mmap_base = mmap_legacy_base(mm);
25348+
25349+#ifdef CONFIG_PAX_RANDMMAP
25350+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25351+ mm->mmap_base += mm->delta_mmap;
25352+#endif
25353+
25354 mm->get_unmapped_area = arch_get_unmapped_area;
25355 mm->unmap_area = arch_unmap_area;
25356 } else {
25357- mm->mmap_base = mmap_base();
25358+ mm->mmap_base = mmap_base(mm);
25359+
25360+#ifdef CONFIG_PAX_RANDMMAP
25361+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25362+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25363+#endif
25364+
25365 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25366 mm->unmap_area = arch_unmap_area_topdown;
25367 }
fe2de317 25368diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
5e856224 25369index dc0b727..dc9d71a 100644
fe2de317
MT
25370--- a/arch/x86/mm/mmio-mod.c
25371+++ b/arch/x86/mm/mmio-mod.c
4c928ab7 25372@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
15a11c5b
MT
25373 break;
25374 default:
25375 {
25376- unsigned char *ip = (unsigned char *)instptr;
25377+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25378 my_trace->opcode = MMIO_UNKNOWN_OP;
25379 my_trace->width = 0;
25380 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
4c928ab7 25381@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
8308f9c9
MT
25382 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25383 void __iomem *addr)
25384 {
25385- static atomic_t next_id;
25386+ static atomic_unchecked_t next_id;
25387 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25388 /* These are page-unaligned. */
25389 struct mmiotrace_map map = {
4c928ab7 25390@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
8308f9c9
MT
25391 .private = trace
25392 },
25393 .phys = offset,
25394- .id = atomic_inc_return(&next_id)
25395+ .id = atomic_inc_return_unchecked(&next_id)
25396 };
25397 map.map_id = trace->id;
25398
fe2de317
MT
25399diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25400index b008656..773eac2 100644
25401--- a/arch/x86/mm/pageattr-test.c
25402+++ b/arch/x86/mm/pageattr-test.c
25403@@ -36,7 +36,7 @@ enum {
25404
25405 static int pte_testbit(pte_t pte)
25406 {
25407- return pte_flags(pte) & _PAGE_UNUSED1;
25408+ return pte_flags(pte) & _PAGE_CPA_TEST;
25409 }
25410
25411 struct split_state {
25412diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
5e856224 25413index e1ebde3..b1e1db38 100644
fe2de317
MT
25414--- a/arch/x86/mm/pageattr.c
25415+++ b/arch/x86/mm/pageattr.c
25416@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
df50ba0c 25417 */
16454cff
MT
25418 #ifdef CONFIG_PCI_BIOS
25419 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
df50ba0c
MT
25420- pgprot_val(forbidden) |= _PAGE_NX;
25421+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
16454cff 25422 #endif
df50ba0c
MT
25423
25424 /*
fe2de317 25425@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
58c5fc13
MT
25426 * Does not cover __inittext since that is gone later on. On
25427 * 64bit we do not enforce !NX on the low mapping
25428 */
25429- if (within(address, (unsigned long)_text, (unsigned long)_etext))
df50ba0c 25430- pgprot_val(forbidden) |= _PAGE_NX;
58c5fc13 25431+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
df50ba0c 25432+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
58c5fc13
MT
25433
25434+#ifdef CONFIG_DEBUG_RODATA
25435 /*
25436 * The .rodata section needs to be read-only. Using the pfn
25437 * catches all aliases.
fe2de317 25438@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
58c5fc13
MT
25439 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25440 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25441 pgprot_val(forbidden) |= _PAGE_RW;
25442+#endif
25443
ae4e228f
MT
25444 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
25445 /*
fe2de317 25446@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
df50ba0c
MT
25447 }
25448 #endif
25449
25450+#ifdef CONFIG_PAX_KERNEXEC
25451+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25452+ pgprot_val(forbidden) |= _PAGE_RW;
25453+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25454+ }
25455+#endif
25456+
25457 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25458
25459 return prot;
16454cff 25460@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
58c5fc13
MT
25461 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25462 {
58c5fc13 25463 /* change init_mm */
ae4e228f 25464+ pax_open_kernel();
58c5fc13 25465 set_pte_atomic(kpte, pte);
58c5fc13
MT
25466+
25467 #ifdef CONFIG_X86_32
25468 if (!SHARED_KERNEL_PMD) {
df50ba0c
MT
25469+
25470+#ifdef CONFIG_PAX_PER_CPU_PGD
25471+ unsigned long cpu;
25472+#else
58c5fc13 25473 struct page *page;
df50ba0c
MT
25474+#endif
25475
25476+#ifdef CONFIG_PAX_PER_CPU_PGD
4c928ab7 25477+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
df50ba0c
MT
25478+ pgd_t *pgd = get_cpu_pgd(cpu);
25479+#else
25480 list_for_each_entry(page, &pgd_list, lru) {
25481- pgd_t *pgd;
25482+ pgd_t *pgd = (pgd_t *)page_address(page);
25483+#endif
25484+
25485 pud_t *pud;
25486 pmd_t *pmd;
25487
25488- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25489+ pgd += pgd_index(address);
25490 pud = pud_offset(pgd, address);
25491 pmd = pmd_offset(pud, address);
25492 set_pte_atomic((pte_t *)pmd, pte);
25493 }
25494 }
25495 #endif
25496+ pax_close_kernel();
25497 }
25498
25499 static int
fe2de317
MT
25500diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25501index f6ff57b..481690f 100644
25502--- a/arch/x86/mm/pat.c
25503+++ b/arch/x86/mm/pat.c
57199397
MT
25504@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
25505
25506 if (!entry) {
58c5fc13
MT
25507 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25508- current->comm, current->pid, start, end);
25509+ current->comm, task_pid_nr(current), start, end);
57199397 25510 return -EINVAL;
58c5fc13
MT
25511 }
25512
fe2de317 25513@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
57199397
MT
25514 while (cursor < to) {
25515 if (!devmem_is_allowed(pfn)) {
25516 printk(KERN_INFO
25517- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25518- current->comm, from, to);
25519+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25520+ current->comm, from, to, cursor);
25521 return 0;
25522 }
25523 cursor += PAGE_SIZE;
fe2de317 25524@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
58c5fc13
MT
25525 printk(KERN_INFO
25526 "%s:%d ioremap_change_attr failed %s "
25527 "for %Lx-%Lx\n",
25528- current->comm, current->pid,
25529+ current->comm, task_pid_nr(current),
25530 cattr_name(flags),
25531 base, (unsigned long long)(base + size));
25532 return -EINVAL;
fe2de317 25533@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
57199397
MT
25534 if (want_flags != flags) {
25535 printk(KERN_WARNING
25536 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
25537- current->comm, current->pid,
25538+ current->comm, task_pid_nr(current),
25539 cattr_name(want_flags),
25540 (unsigned long long)paddr,
25541 (unsigned long long)(paddr + size),
fe2de317 25542@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
58c5fc13
MT
25543 free_memtype(paddr, paddr + size);
25544 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25545 " for %Lx-%Lx, got %s\n",
25546- current->comm, current->pid,
25547+ current->comm, task_pid_nr(current),
25548 cattr_name(want_flags),
25549 (unsigned long long)paddr,
25550 (unsigned long long)(paddr + size),
fe2de317
MT
25551diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25552index 9f0614d..92ae64a 100644
25553--- a/arch/x86/mm/pf_in.c
25554+++ b/arch/x86/mm/pf_in.c
25555@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
15a11c5b
MT
25556 int i;
25557 enum reason_type rv = OTHERS;
25558
25559- p = (unsigned char *)ins_addr;
25560+ p = (unsigned char *)ktla_ktva(ins_addr);
25561 p += skip_prefix(p, &prf);
25562 p += get_opcode(p, &opcode);
25563
fe2de317 25564@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
15a11c5b
MT
25565 struct prefix_bits prf;
25566 int i;
25567
25568- p = (unsigned char *)ins_addr;
25569+ p = (unsigned char *)ktla_ktva(ins_addr);
25570 p += skip_prefix(p, &prf);
25571 p += get_opcode(p, &opcode);
25572
fe2de317 25573@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
15a11c5b
MT
25574 struct prefix_bits prf;
25575 int i;
25576
25577- p = (unsigned char *)ins_addr;
25578+ p = (unsigned char *)ktla_ktva(ins_addr);
25579 p += skip_prefix(p, &prf);
25580 p += get_opcode(p, &opcode);
25581
fe2de317 25582@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
15a11c5b
MT
25583 struct prefix_bits prf;
25584 int i;
25585
25586- p = (unsigned char *)ins_addr;
25587+ p = (unsigned char *)ktla_ktva(ins_addr);
25588 p += skip_prefix(p, &prf);
25589 p += get_opcode(p, &opcode);
25590 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
fe2de317 25591@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
15a11c5b
MT
25592 struct prefix_bits prf;
25593 int i;
25594
25595- p = (unsigned char *)ins_addr;
25596+ p = (unsigned char *)ktla_ktva(ins_addr);
25597 p += skip_prefix(p, &prf);
25598 p += get_opcode(p, &opcode);
25599 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
fe2de317 25600diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
c6e2a6c8 25601index 8573b83..4f3ed7e 100644
fe2de317
MT
25602--- a/arch/x86/mm/pgtable.c
25603+++ b/arch/x86/mm/pgtable.c
c6e2a6c8 25604@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
df50ba0c
MT
25605 list_del(&page->lru);
25606 }
25607
25608-#define UNSHARED_PTRS_PER_PGD \
25609- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25610+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25611+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
bc901d79 25612
c6e2a6c8 25613+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
df50ba0c 25614+{
c6e2a6c8
MT
25615+ unsigned int count = USER_PGD_PTRS;
25616
df50ba0c 25617+ while (count--)
bc901d79 25618+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
df50ba0c
MT
25619+}
25620+#endif
c6e2a6c8 25621+
df50ba0c 25622+#ifdef CONFIG_PAX_PER_CPU_PGD
c6e2a6c8 25623+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
df50ba0c 25624+{
c6e2a6c8
MT
25625+ unsigned int count = USER_PGD_PTRS;
25626+
5e856224
MT
25627+ while (count--) {
25628+ pgd_t pgd;
df50ba0c 25629+
5e856224
MT
25630+#ifdef CONFIG_X86_64
25631+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
df50ba0c 25632+#else
5e856224
MT
25633+ pgd = *src++;
25634+#endif
25635+
25636+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25637+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
df50ba0c 25638+#endif
fe2de317 25639+
5e856224
MT
25640+ *dst++ = pgd;
25641+ }
25642+
df50ba0c
MT
25643+}
25644+#endif
25645+
df50ba0c
MT
25646+#ifdef CONFIG_X86_64
25647+#define pxd_t pud_t
25648+#define pyd_t pgd_t
25649+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25650+#define pxd_free(mm, pud) pud_free((mm), (pud))
25651+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
4c928ab7 25652+#define pyd_offset(mm, address) pgd_offset((mm), (address))
df50ba0c
MT
25653+#define PYD_SIZE PGDIR_SIZE
25654+#else
25655+#define pxd_t pmd_t
25656+#define pyd_t pud_t
25657+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25658+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25659+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
4c928ab7 25660+#define pyd_offset(mm, address) pud_offset((mm), (address))
df50ba0c
MT
25661+#define PYD_SIZE PUD_SIZE
25662+#endif
66a7e928
MT
25663+
25664+#ifdef CONFIG_PAX_PER_CPU_PGD
25665+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
25666+static inline void pgd_dtor(pgd_t *pgd) {}
df50ba0c 25667+#else
bc901d79 25668 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
df50ba0c 25669 {
66a7e928 25670 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
c6e2a6c8 25671@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
df50ba0c 25672 pgd_list_del(pgd);
16454cff 25673 spin_unlock(&pgd_lock);
df50ba0c
MT
25674 }
25675+#endif
25676
25677 /*
25678 * List of all pgd's needed for non-PAE so it can invalidate entries
c6e2a6c8 25679@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
df50ba0c
MT
25680 * -- wli
25681 */
25682
25683-#ifdef CONFIG_X86_PAE
25684+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25685 /*
25686 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25687 * updating the top-level pagetable entries to guarantee the
c6e2a6c8 25688@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
df50ba0c
MT
25689 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25690 * and initialize the kernel pmds here.
25691 */
25692-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25693+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25694
25695 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25696 {
c6e2a6c8 25697@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
16454cff
MT
25698 */
25699 flush_tlb_mm(mm);
df50ba0c
MT
25700 }
25701+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25702+#define PREALLOCATED_PXDS USER_PGD_PTRS
25703 #else /* !CONFIG_X86_PAE */
25704
25705 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25706-#define PREALLOCATED_PMDS 0
25707+#define PREALLOCATED_PXDS 0
25708
25709 #endif /* CONFIG_X86_PAE */
25710
25711-static void free_pmds(pmd_t *pmds[])
25712+static void free_pxds(pxd_t *pxds[])
25713 {
25714 int i;
25715
25716- for(i = 0; i < PREALLOCATED_PMDS; i++)
25717- if (pmds[i])
25718- free_page((unsigned long)pmds[i]);
25719+ for(i = 0; i < PREALLOCATED_PXDS; i++)
25720+ if (pxds[i])
25721+ free_page((unsigned long)pxds[i]);
25722 }
25723
25724-static int preallocate_pmds(pmd_t *pmds[])
25725+static int preallocate_pxds(pxd_t *pxds[])
25726 {
25727 int i;
25728 bool failed = false;
25729
25730- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25731- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25732- if (pmd == NULL)
25733+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25734+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25735+ if (pxd == NULL)
25736 failed = true;
25737- pmds[i] = pmd;
25738+ pxds[i] = pxd;
25739 }
25740
25741 if (failed) {
25742- free_pmds(pmds);
25743+ free_pxds(pxds);
25744 return -ENOMEM;
25745 }
25746
c6e2a6c8 25747@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
df50ba0c
MT
25748 * preallocate which never got a corresponding vma will need to be
25749 * freed manually.
25750 */
25751-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25752+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25753 {
25754 int i;
25755
25756- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25757+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25758 pgd_t pgd = pgdp[i];
25759
25760 if (pgd_val(pgd) != 0) {
25761- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25762+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25763
25764- pgdp[i] = native_make_pgd(0);
25765+ set_pgd(pgdp + i, native_make_pgd(0));
25766
25767- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25768- pmd_free(mm, pmd);
25769+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25770+ pxd_free(mm, pxd);
25771 }
25772 }
25773 }
25774
25775-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25776+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25777 {
25778- pud_t *pud;
25779+ pyd_t *pyd;
25780 unsigned long addr;
25781 int i;
25782
25783- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25784+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25785 return;
25786
25787- pud = pud_offset(pgd, 0);
25788+#ifdef CONFIG_X86_64
25789+ pyd = pyd_offset(mm, 0L);
25790+#else
25791+ pyd = pyd_offset(pgd, 0L);
25792+#endif
25793
25794- for (addr = i = 0; i < PREALLOCATED_PMDS;
25795- i++, pud++, addr += PUD_SIZE) {
25796- pmd_t *pmd = pmds[i];
25797+ for (addr = i = 0; i < PREALLOCATED_PXDS;
25798+ i++, pyd++, addr += PYD_SIZE) {
25799+ pxd_t *pxd = pxds[i];
25800
25801 if (i >= KERNEL_PGD_BOUNDARY)
25802- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25803- sizeof(pmd_t) * PTRS_PER_PMD);
25804+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25805+ sizeof(pxd_t) * PTRS_PER_PMD);
25806
25807- pud_populate(mm, pud, pmd);
25808+ pyd_populate(mm, pyd, pxd);
25809 }
25810 }
25811
25812 pgd_t *pgd_alloc(struct mm_struct *mm)
25813 {
25814 pgd_t *pgd;
25815- pmd_t *pmds[PREALLOCATED_PMDS];
25816+ pxd_t *pxds[PREALLOCATED_PXDS];
df50ba0c
MT
25817
25818 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
16454cff 25819
c6e2a6c8 25820@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
df50ba0c
MT
25821
25822 mm->pgd = pgd;
25823
25824- if (preallocate_pmds(pmds) != 0)
25825+ if (preallocate_pxds(pxds) != 0)
25826 goto out_free_pgd;
25827
25828 if (paravirt_pgd_alloc(mm) != 0)
25829- goto out_free_pmds;
25830+ goto out_free_pxds;
25831
25832 /*
25833 * Make sure that pre-populating the pmds is atomic with
c6e2a6c8 25834@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
16454cff 25835 spin_lock(&pgd_lock);
df50ba0c 25836
bc901d79 25837 pgd_ctor(mm, pgd);
df50ba0c
MT
25838- pgd_prepopulate_pmd(mm, pgd, pmds);
25839+ pgd_prepopulate_pxd(mm, pgd, pxds);
25840
16454cff 25841 spin_unlock(&pgd_lock);
df50ba0c
MT
25842
25843 return pgd;
25844
25845-out_free_pmds:
25846- free_pmds(pmds);
25847+out_free_pxds:
25848+ free_pxds(pxds);
25849 out_free_pgd:
25850 free_page((unsigned long)pgd);
25851 out:
c6e2a6c8 25852@@ -295,7 +356,7 @@ out:
df50ba0c
MT
25853
25854 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
25855 {
25856- pgd_mop_up_pmds(mm, pgd);
25857+ pgd_mop_up_pxds(mm, pgd);
25858 pgd_dtor(pgd);
25859 paravirt_pgd_free(mm, pgd);
25860 free_page((unsigned long)pgd);
fe2de317 25861diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
c6e2a6c8 25862index a69bcb8..19068ab 100644
fe2de317
MT
25863--- a/arch/x86/mm/pgtable_32.c
25864+++ b/arch/x86/mm/pgtable_32.c
c6e2a6c8 25865@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
fe2de317
MT
25866 return;
25867 }
25868 pte = pte_offset_kernel(pmd, vaddr);
25869+
25870+ pax_open_kernel();
25871 if (pte_val(pteval))
25872 set_pte_at(&init_mm, vaddr, pte, pteval);
25873 else
25874 pte_clear(&init_mm, vaddr, pte);
25875+ pax_close_kernel();
25876
25877 /*
25878 * It's enough to flush this one mapping.
25879diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
25880index 410531d..0f16030 100644
25881--- a/arch/x86/mm/setup_nx.c
25882+++ b/arch/x86/mm/setup_nx.c
efbe55a5 25883@@ -5,8 +5,10 @@
df50ba0c
MT
25884 #include <asm/pgtable.h>
25885 #include <asm/proto.h>
25886
25887+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25888 static int disable_nx __cpuinitdata;
df50ba0c 25889
efbe55a5 25890+#ifndef CONFIG_PAX_PAGEEXEC
df50ba0c
MT
25891 /*
25892 * noexec = on|off
25893 *
fe2de317 25894@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
df50ba0c
MT
25895 return 0;
25896 }
25897 early_param("noexec", noexec_setup);
efbe55a5
MT
25898+#endif
25899+
df50ba0c
MT
25900+#endif
25901
25902 void __cpuinit x86_configure_nx(void)
25903 {
25904+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
25905 if (cpu_has_nx && !disable_nx)
25906 __supported_pte_mask |= _PAGE_NX;
25907 else
25908+#endif
25909 __supported_pte_mask &= ~_PAGE_NX;
25910 }
25911
fe2de317
MT
25912diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
25913index d6c0418..06a0ad5 100644
25914--- a/arch/x86/mm/tlb.c
25915+++ b/arch/x86/mm/tlb.c
bc901d79 25916@@ -65,7 +65,11 @@ void leave_mm(int cpu)
df50ba0c
MT
25917 BUG();
25918 cpumask_clear_cpu(cpu,
25919 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
25920+
25921+#ifndef CONFIG_PAX_PER_CPU_PGD
25922 load_cr3(swapper_pg_dir);
25923+#endif
25924+
25925 }
25926 EXPORT_SYMBOL_GPL(leave_mm);
25927
fe2de317 25928diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
c6e2a6c8 25929index 877b9a1..a8ecf42 100644
fe2de317
MT
25930--- a/arch/x86/net/bpf_jit.S
25931+++ b/arch/x86/net/bpf_jit.S
6e9df6a3
MT
25932@@ -9,6 +9,7 @@
25933 */
25934 #include <linux/linkage.h>
25935 #include <asm/dwarf2.h>
25936+#include <asm/alternative-asm.h>
25937
25938 /*
25939 * Calling convention :
c6e2a6c8 25940@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
6e9df6a3
MT
25941 jle bpf_slow_path_word
25942 mov (SKBDATA,%rsi),%eax
25943 bswap %eax /* ntohl() */
25944+ pax_force_retaddr
25945 ret
25946
c6e2a6c8
MT
25947 sk_load_half:
25948@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
6e9df6a3
MT
25949 jle bpf_slow_path_half
25950 movzwl (SKBDATA,%rsi),%eax
25951 rol $8,%ax # ntohs()
25952+ pax_force_retaddr
25953 ret
25954
c6e2a6c8
MT
25955 sk_load_byte:
25956@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
6e9df6a3
MT
25957 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
25958 jle bpf_slow_path_byte
25959 movzbl (SKBDATA,%rsi),%eax
25960+ pax_force_retaddr
25961 ret
25962
25963 /**
c6e2a6c8 25964@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
6e9df6a3
MT
25965 movzbl (SKBDATA,%rsi),%ebx
25966 and $15,%bl
25967 shl $2,%bl
6e9df6a3
MT
25968+ pax_force_retaddr
25969 ret
25970
25971 /* rsi contains offset and can be scratched */
c6e2a6c8 25972@@ -109,6 +114,7 @@ bpf_slow_path_word:
6e9df6a3
MT
25973 js bpf_error
25974 mov -12(%rbp),%eax
25975 bswap %eax
25976+ pax_force_retaddr
25977 ret
25978
25979 bpf_slow_path_half:
c6e2a6c8 25980@@ -117,12 +123,14 @@ bpf_slow_path_half:
6e9df6a3
MT
25981 mov -12(%rbp),%ax
25982 rol $8,%ax
25983 movzwl %ax,%eax
25984+ pax_force_retaddr
25985 ret
25986
25987 bpf_slow_path_byte:
25988 bpf_slow_path_common(1)
25989 js bpf_error
25990 movzbl -12(%rbp),%eax
25991+ pax_force_retaddr
25992 ret
25993
25994 bpf_slow_path_byte_msh:
c6e2a6c8 25995@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
6e9df6a3
MT
25996 and $15,%al
25997 shl $2,%al
25998 xchg %eax,%ebx
c6e2a6c8
MT
25999+ pax_force_retaddr
26000 ret
26001
26002 #define sk_negative_common(SIZE) \
26003@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
26004 sk_negative_common(4)
26005 mov (%rax), %eax
26006 bswap %eax
26007+ pax_force_retaddr
26008 ret
26009
26010 bpf_slow_path_half_neg:
26011@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
26012 mov (%rax),%ax
26013 rol $8,%ax
26014 movzwl %ax,%eax
26015+ pax_force_retaddr
26016 ret
26017
26018 bpf_slow_path_byte_neg:
26019@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
26020 .globl sk_load_byte_negative_offset
26021 sk_negative_common(1)
26022 movzbl (%rax), %eax
26023+ pax_force_retaddr
26024 ret
26025
26026 bpf_slow_path_byte_msh_neg:
26027@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
26028 and $15,%al
26029 shl $2,%al
26030 xchg %eax,%ebx
26031+ pax_force_retaddr
26032 ret
26033
26034 bpf_error:
26035@@ -197,4 +210,5 @@ bpf_error:
26036 xor %eax,%eax
26037 mov -8(%rbp),%rbx
26038 leaveq
6e9df6a3
MT
26039+ pax_force_retaddr
26040 ret
fe2de317 26041diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
c6e2a6c8 26042index 0597f95..a12c36e 100644
fe2de317
MT
26043--- a/arch/x86/net/bpf_jit_comp.c
26044+++ b/arch/x86/net/bpf_jit_comp.c
c6e2a6c8 26045@@ -120,6 +120,11 @@ static inline void bpf_flush_icache(void *start, void *end)
fe2de317
MT
26046 set_fs(old_fs);
26047 }
26048
26049+struct bpf_jit_work {
26050+ struct work_struct work;
26051+ void *image;
26052+};
c6e2a6c8
MT
26053+
26054 #define CHOOSE_LOAD_FUNC(K, func) \
26055 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
fe2de317 26056
c6e2a6c8 26057@@ -146,6 +151,10 @@ void bpf_jit_compile(struct sk_filter *fp)
fe2de317
MT
26058 if (addrs == NULL)
26059 return;
26060
26061+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
26062+ if (!fp->work)
26063+ goto out;
26064+
26065 /* Before first pass, make a rough estimation of addrs[]
26066 * each bpf instruction is translated to less than 64 bytes
26067 */
c6e2a6c8 26068@@ -589,17 +598,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
4c928ab7
MT
26069 break;
26070 default:
26071 /* hmm, too complex filter, give up with jit compiler */
26072- goto out;
26073+ goto error;
26074 }
26075 ilen = prog - temp;
fe2de317
MT
26076 if (image) {
26077 if (unlikely(proglen + ilen > oldproglen)) {
26078 pr_err("bpb_jit_compile fatal error\n");
26079- kfree(addrs);
26080- module_free(NULL, image);
26081- return;
26082+ module_free_exec(NULL, image);
4c928ab7 26083+ goto error;
fe2de317
MT
26084 }
26085+ pax_open_kernel();
26086 memcpy(image + proglen, temp, ilen);
26087+ pax_close_kernel();
26088 }
26089 proglen += ilen;
26090 addrs[i] = proglen;
c6e2a6c8 26091@@ -620,11 +630,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
fe2de317
MT
26092 break;
26093 }
26094 if (proglen == oldproglen) {
26095- image = module_alloc(max_t(unsigned int,
4c928ab7
MT
26096- proglen,
26097- sizeof(struct work_struct)));
26098+ image = module_alloc_exec(proglen);
fe2de317 26099 if (!image)
4c928ab7
MT
26100- goto out;
26101+ goto error;
26102 }
26103 oldproglen = proglen;
fe2de317 26104 }
c6e2a6c8 26105@@ -640,7 +648,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
4c928ab7
MT
26106 bpf_flush_icache(image, image + proglen);
26107
26108 fp->bpf_func = (void *)image;
26109- }
26110+ } else
26111+error:
26112+ kfree(fp->work);
26113+
fe2de317 26114 out:
fe2de317
MT
26115 kfree(addrs);
26116 return;
c6e2a6c8 26117@@ -648,18 +659,20 @@ out:
fe2de317
MT
26118
26119 static void jit_free_defer(struct work_struct *arg)
26120 {
26121- module_free(NULL, arg);
4c928ab7 26122+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
fe2de317
MT
26123+ kfree(arg);
26124 }
26125
26126 /* run from softirq, we must use a work_struct to call
26127- * module_free() from process context
26128+ * module_free_exec() from process context
26129 */
26130 void bpf_jit_free(struct sk_filter *fp)
26131 {
26132 if (fp->bpf_func != sk_run_filter) {
26133- struct work_struct *work = (struct work_struct *)fp->bpf_func;
26134+ struct work_struct *work = &fp->work->work;
26135
26136 INIT_WORK(work, jit_free_defer);
26137+ fp->work->image = fp->bpf_func;
26138 schedule_work(work);
26139 }
26140 }
26141diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
c6e2a6c8 26142index d6aa6e8..266395a 100644
fe2de317
MT
26143--- a/arch/x86/oprofile/backtrace.c
26144+++ b/arch/x86/oprofile/backtrace.c
26145@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
6e9df6a3
MT
26146 struct stack_frame_ia32 *fp;
26147 unsigned long bytes;
26148
26149- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26150+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26151 if (bytes != sizeof(bufhead))
26152 return NULL;
26153
26154- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
26155+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
26156
26157 oprofile_add_trace(bufhead[0].return_address);
26158
fe2de317 26159@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
6e9df6a3
MT
26160 struct stack_frame bufhead[2];
26161 unsigned long bytes;
26162
26163- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
26164+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
26165 if (bytes != sizeof(bufhead))
26166 return NULL;
26167
fe2de317 26168@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
58c5fc13 26169 {
bc901d79 26170 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
58c5fc13
MT
26171
26172- if (!user_mode_vm(regs)) {
26173+ if (!user_mode(regs)) {
26174 unsigned long stack = kernel_stack_pointer(regs);
26175 if (depth)
66a7e928 26176 dump_trace(NULL, regs, (unsigned long *)stack, 0,
fe2de317 26177diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
c6e2a6c8 26178index 140942f..8a5cc55 100644
fe2de317
MT
26179--- a/arch/x86/pci/mrst.c
26180+++ b/arch/x86/pci/mrst.c
c6e2a6c8
MT
26181@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
26182 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
15a11c5b
MT
26183 pci_mmcfg_late_init();
26184 pcibios_enable_irq = mrst_pci_irq_enable;
26185- pci_root_ops = pci_mrst_ops;
26186+ pax_open_kernel();
26187+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
26188+ pax_close_kernel();
c6e2a6c8 26189 pci_soc_mode = 1;
15a11c5b
MT
26190 /* Continue with standard init */
26191 return 1;
fe2de317 26192diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
5e856224 26193index da8fe05..7ee6704 100644
fe2de317
MT
26194--- a/arch/x86/pci/pcbios.c
26195+++ b/arch/x86/pci/pcbios.c
16454cff 26196@@ -79,50 +79,93 @@ union bios32 {
58c5fc13
MT
26197 static struct {
26198 unsigned long address;
26199 unsigned short segment;
26200-} bios32_indirect = { 0, __KERNEL_CS };
26201+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26202
26203 /*
26204 * Returns the entry point for the given service, NULL on error
26205 */
26206
26207-static unsigned long bios32_service(unsigned long service)
26208+static unsigned long __devinit bios32_service(unsigned long service)
26209 {
26210 unsigned char return_code; /* %al */
26211 unsigned long address; /* %ebx */
26212 unsigned long length; /* %ecx */
26213 unsigned long entry; /* %edx */
26214 unsigned long flags;
26215+ struct desc_struct d, *gdt;
58c5fc13
MT
26216
26217 local_irq_save(flags);
26218- __asm__("lcall *(%%edi); cld"
26219+
26220+ gdt = get_cpu_gdt_table(smp_processor_id());
26221+
58c5fc13
MT
26222+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26223+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26224+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26225+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26226+
58c5fc13
MT
26227+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26228 : "=a" (return_code),
26229 "=b" (address),
26230 "=c" (length),
26231 "=d" (entry)
26232 : "0" (service),
26233 "1" (0),
26234- "D" (&bios32_indirect));
26235+ "D" (&bios32_indirect),
26236+ "r"(__PCIBIOS_DS)
26237+ : "memory");
26238+
ae4e228f 26239+ pax_open_kernel();
58c5fc13
MT
26240+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26241+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26242+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26243+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
ae4e228f 26244+ pax_close_kernel();
58c5fc13
MT
26245+
26246 local_irq_restore(flags);
26247
26248 switch (return_code) {
26249- case 0:
26250- return address + entry;
26251- case 0x80: /* Not present */
26252- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26253- return 0;
26254- default: /* Shouldn't happen */
26255- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26256- service, return_code);
26257+ case 0: {
26258+ int cpu;
26259+ unsigned char flags;
26260+
26261+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26262+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26263+ printk(KERN_WARNING "bios32_service: not valid\n");
26264 return 0;
26265+ }
26266+ address = address + PAGE_OFFSET;
26267+ length += 16UL; /* some BIOSs underreport this... */
26268+ flags = 4;
26269+ if (length >= 64*1024*1024) {
26270+ length >>= PAGE_SHIFT;
26271+ flags |= 8;
26272+ }
26273+
4c928ab7 26274+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
58c5fc13
MT
26275+ gdt = get_cpu_gdt_table(cpu);
26276+ pack_descriptor(&d, address, length, 0x9b, flags);
26277+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26278+ pack_descriptor(&d, address, length, 0x93, flags);
26279+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26280+ }
58c5fc13
MT
26281+ return entry;
26282+ }
26283+ case 0x80: /* Not present */
26284+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26285+ return 0;
26286+ default: /* Shouldn't happen */
26287+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26288+ service, return_code);
26289+ return 0;
26290 }
26291 }
26292
26293 static struct {
26294 unsigned long address;
26295 unsigned short segment;
26296-} pci_indirect = { 0, __KERNEL_CS };
26297+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26298
26299-static int pci_bios_present;
26300+static int pci_bios_present __read_only;
26301
26302 static int __devinit check_pcibios(void)
26303 {
16454cff 26304@@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
58c5fc13
MT
26305 unsigned long flags, pcibios_entry;
26306
26307 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26308- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26309+ pci_indirect.address = pcibios_entry;
26310
26311 local_irq_save(flags);
26312- __asm__(
26313- "lcall *(%%edi); cld\n\t"
26314+ __asm__("movw %w6, %%ds\n\t"
26315+ "lcall *%%ss:(%%edi); cld\n\t"
26316+ "push %%ss\n\t"
26317+ "pop %%ds\n\t"
26318 "jc 1f\n\t"
26319 "xor %%ah, %%ah\n"
26320 "1:"
16454cff 26321@@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
58c5fc13
MT
26322 "=b" (ebx),
26323 "=c" (ecx)
26324 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26325- "D" (&pci_indirect)
26326+ "D" (&pci_indirect),
26327+ "r" (__PCIBIOS_DS)
26328 : "memory");
26329 local_irq_restore(flags);
26330
fe2de317 26331@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
26332
26333 switch (len) {
26334 case 1:
26335- __asm__("lcall *(%%esi); cld\n\t"
26336+ __asm__("movw %w6, %%ds\n\t"
26337+ "lcall *%%ss:(%%esi); cld\n\t"
26338+ "push %%ss\n\t"
26339+ "pop %%ds\n\t"
26340 "jc 1f\n\t"
26341 "xor %%ah, %%ah\n"
26342 "1:"
fe2de317 26343@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
26344 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26345 "b" (bx),
26346 "D" ((long)reg),
26347- "S" (&pci_indirect));
26348+ "S" (&pci_indirect),
26349+ "r" (__PCIBIOS_DS));
26350 /*
26351 * Zero-extend the result beyond 8 bits, do not trust the
26352 * BIOS having done it:
fe2de317 26353@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
26354 *value &= 0xff;
26355 break;
26356 case 2:
26357- __asm__("lcall *(%%esi); cld\n\t"
26358+ __asm__("movw %w6, %%ds\n\t"
26359+ "lcall *%%ss:(%%esi); cld\n\t"
26360+ "push %%ss\n\t"
26361+ "pop %%ds\n\t"
26362 "jc 1f\n\t"
26363 "xor %%ah, %%ah\n"
26364 "1:"
fe2de317 26365@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
26366 : "1" (PCIBIOS_READ_CONFIG_WORD),
26367 "b" (bx),
26368 "D" ((long)reg),
26369- "S" (&pci_indirect));
26370+ "S" (&pci_indirect),
26371+ "r" (__PCIBIOS_DS));
26372 /*
26373 * Zero-extend the result beyond 16 bits, do not trust the
26374 * BIOS having done it:
fe2de317 26375@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
26376 *value &= 0xffff;
26377 break;
26378 case 4:
26379- __asm__("lcall *(%%esi); cld\n\t"
26380+ __asm__("movw %w6, %%ds\n\t"
26381+ "lcall *%%ss:(%%esi); cld\n\t"
26382+ "push %%ss\n\t"
26383+ "pop %%ds\n\t"
26384 "jc 1f\n\t"
26385 "xor %%ah, %%ah\n"
26386 "1:"
fe2de317 26387@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
58c5fc13
MT
26388 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26389 "b" (bx),
26390 "D" ((long)reg),
26391- "S" (&pci_indirect));
26392+ "S" (&pci_indirect),
26393+ "r" (__PCIBIOS_DS));
26394 break;
26395 }
26396
fe2de317 26397@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
58c5fc13
MT
26398
26399 switch (len) {
26400 case 1:
26401- __asm__("lcall *(%%esi); cld\n\t"
26402+ __asm__("movw %w6, %%ds\n\t"
26403+ "lcall *%%ss:(%%esi); cld\n\t"
26404+ "push %%ss\n\t"
26405+ "pop %%ds\n\t"
26406 "jc 1f\n\t"
26407 "xor %%ah, %%ah\n"
26408 "1:"
fe2de317 26409@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
58c5fc13
MT
26410 "c" (value),
26411 "b" (bx),
26412 "D" ((long)reg),
26413- "S" (&pci_indirect));
26414+ "S" (&pci_indirect),
26415+ "r" (__PCIBIOS_DS));
26416 break;
26417 case 2:
26418- __asm__("lcall *(%%esi); cld\n\t"
26419+ __asm__("movw %w6, %%ds\n\t"
26420+ "lcall *%%ss:(%%esi); cld\n\t"
26421+ "push %%ss\n\t"
26422+ "pop %%ds\n\t"
26423 "jc 1f\n\t"
26424 "xor %%ah, %%ah\n"
26425 "1:"
fe2de317 26426@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
58c5fc13
MT
26427 "c" (value),
26428 "b" (bx),
26429 "D" ((long)reg),
26430- "S" (&pci_indirect));
26431+ "S" (&pci_indirect),
26432+ "r" (__PCIBIOS_DS));
26433 break;
26434 case 4:
26435- __asm__("lcall *(%%esi); cld\n\t"
26436+ __asm__("movw %w6, %%ds\n\t"
26437+ "lcall *%%ss:(%%esi); cld\n\t"
26438+ "push %%ss\n\t"
26439+ "pop %%ds\n\t"
26440 "jc 1f\n\t"
26441 "xor %%ah, %%ah\n"
26442 "1:"
fe2de317 26443@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
58c5fc13
MT
26444 "c" (value),
26445 "b" (bx),
26446 "D" ((long)reg),
26447- "S" (&pci_indirect));
26448+ "S" (&pci_indirect),
26449+ "r" (__PCIBIOS_DS));
26450 break;
26451 }
26452
fe2de317 26453@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
58c5fc13
MT
26454
26455 DBG("PCI: Fetching IRQ routing table... ");
26456 __asm__("push %%es\n\t"
26457+ "movw %w8, %%ds\n\t"
26458 "push %%ds\n\t"
26459 "pop %%es\n\t"
26460- "lcall *(%%esi); cld\n\t"
26461+ "lcall *%%ss:(%%esi); cld\n\t"
26462 "pop %%es\n\t"
26463+ "push %%ss\n\t"
26464+ "pop %%ds\n"
26465 "jc 1f\n\t"
26466 "xor %%ah, %%ah\n"
26467 "1:"
fe2de317 26468@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
58c5fc13
MT
26469 "1" (0),
26470 "D" ((long) &opt),
26471 "S" (&pci_indirect),
26472- "m" (opt)
26473+ "m" (opt),
26474+ "r" (__PCIBIOS_DS)
26475 : "memory");
26476 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26477 if (ret & 0xff00)
fe2de317 26478@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
58c5fc13
MT
26479 {
26480 int ret;
26481
26482- __asm__("lcall *(%%esi); cld\n\t"
26483+ __asm__("movw %w5, %%ds\n\t"
26484+ "lcall *%%ss:(%%esi); cld\n\t"
26485+ "push %%ss\n\t"
26486+ "pop %%ds\n"
26487 "jc 1f\n\t"
26488 "xor %%ah, %%ah\n"
26489 "1:"
fe2de317 26490@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
58c5fc13
MT
26491 : "0" (PCIBIOS_SET_PCI_HW_INT),
26492 "b" ((dev->bus->number << 8) | dev->devfn),
26493 "c" ((irq << 8) | (pin + 10)),
26494- "S" (&pci_indirect));
26495+ "S" (&pci_indirect),
26496+ "r" (__PCIBIOS_DS));
26497 return !(ret & 0xff00);
26498 }
26499 EXPORT_SYMBOL(pcibios_set_irq_routing);
fe2de317 26500diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
4c928ab7 26501index 40e4469..1ab536e 100644
fe2de317
MT
26502--- a/arch/x86/platform/efi/efi_32.c
26503+++ b/arch/x86/platform/efi/efi_32.c
4c928ab7 26504@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
bc901d79 26505 {
bc901d79
MT
26506 struct desc_ptr gdt_descr;
26507
15a11c5b
MT
26508+#ifdef CONFIG_PAX_KERNEXEC
26509+ struct desc_struct d;
26510+#endif
fe2de317
MT
26511+
26512 local_irq_save(efi_rt_eflags);
bc901d79 26513
4c928ab7 26514 load_cr3(initial_page_table);
bc901d79
MT
26515 __flush_tlb_all();
26516
15a11c5b
MT
26517+#ifdef CONFIG_PAX_KERNEXEC
26518+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
6e9df6a3 26519+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
15a11c5b 26520+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
6e9df6a3 26521+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15a11c5b
MT
26522+#endif
26523+
26524 gdt_descr.address = __pa(get_cpu_gdt_table(0));
bc901d79
MT
26525 gdt_descr.size = GDT_SIZE - 1;
26526 load_gdt(&gdt_descr);
4c928ab7 26527@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
bc901d79 26528 {
bc901d79
MT
26529 struct desc_ptr gdt_descr;
26530
15a11c5b
MT
26531+#ifdef CONFIG_PAX_KERNEXEC
26532+ struct desc_struct d;
26533+
26534+ memset(&d, 0, sizeof d);
6e9df6a3
MT
26535+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
26536+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
15a11c5b
MT
26537+#endif
26538+
26539 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
bc901d79
MT
26540 gdt_descr.size = GDT_SIZE - 1;
26541 load_gdt(&gdt_descr);
fe2de317
MT
26542diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
26543index fbe66e6..c5c0dd2 100644
26544--- a/arch/x86/platform/efi/efi_stub_32.S
26545+++ b/arch/x86/platform/efi/efi_stub_32.S
15a11c5b 26546@@ -6,7 +6,9 @@
bc901d79
MT
26547 */
26548
26549 #include <linux/linkage.h>
26550+#include <linux/init.h>
26551 #include <asm/page_types.h>
15a11c5b 26552+#include <asm/segment.h>
bc901d79
MT
26553
26554 /*
15a11c5b
MT
26555 * efi_call_phys(void *, ...) is a function with variable parameters.
26556@@ -20,7 +22,7 @@
bc901d79
MT
26557 * service functions will comply with gcc calling convention, too.
26558 */
26559
26560-.text
26561+__INIT
26562 ENTRY(efi_call_phys)
26563 /*
26564 * 0. The function can only be called in Linux kernel. So CS has been
15a11c5b 26565@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
bc901d79
MT
26566 * The mapping of lower virtual memory has been created in prelog and
26567 * epilog.
26568 */
26569- movl $1f, %edx
26570- subl $__PAGE_OFFSET, %edx
26571- jmp *%edx
15a11c5b
MT
26572+ movl $(__KERNEXEC_EFI_DS), %edx
26573+ mov %edx, %ds
26574+ mov %edx, %es
26575+ mov %edx, %ss
26576+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
bc901d79
MT
26577 1:
26578
26579 /*
15a11c5b 26580@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
bc901d79
MT
26581 * parameter 2, ..., param n. To make things easy, we save the return
26582 * address of efi_call_phys in a global variable.
26583 */
26584- popl %edx
26585- movl %edx, saved_return_addr
26586- /* get the function pointer into ECX*/
26587- popl %ecx
26588- movl %ecx, efi_rt_function_ptr
26589- movl $2f, %edx
26590- subl $__PAGE_OFFSET, %edx
26591- pushl %edx
26592+ popl (saved_return_addr)
26593+ popl (efi_rt_function_ptr)
26594
26595 /*
26596 * 3. Clear PG bit in %CR0.
15a11c5b 26597@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
bc901d79
MT
26598 /*
26599 * 5. Call the physical function.
26600 */
26601- jmp *%ecx
26602+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
26603
26604-2:
26605 /*
26606 * 6. After EFI runtime service returns, control will return to
26607 * following instruction. We'd better readjust stack pointer first.
15a11c5b 26608@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
bc901d79
MT
26609 movl %cr0, %edx
26610 orl $0x80000000, %edx
26611 movl %edx, %cr0
26612- jmp 1f
26613-1:
26614+
26615 /*
26616 * 8. Now restore the virtual mode from flat mode by
26617 * adding EIP with PAGE_OFFSET.
26618 */
26619- movl $1f, %edx
26620- jmp *%edx
15a11c5b 26621+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
bc901d79 26622 1:
15a11c5b
MT
26623+ movl $(__KERNEL_DS), %edx
26624+ mov %edx, %ds
26625+ mov %edx, %es
26626+ mov %edx, %ss
bc901d79
MT
26627
26628 /*
26629 * 9. Balance the stack. And because EAX contain the return value,
26630 * we'd better not clobber it.
26631 */
26632- leal efi_rt_function_ptr, %edx
26633- movl (%edx), %ecx
26634- pushl %ecx
26635+ pushl (efi_rt_function_ptr)
26636
26637 /*
26638- * 10. Push the saved return address onto the stack and return.
26639+ * 10. Return to the saved return address.
26640 */
26641- leal saved_return_addr, %edx
26642- movl (%edx), %ecx
26643- pushl %ecx
26644- ret
26645+ jmpl *(saved_return_addr)
26646 ENDPROC(efi_call_phys)
26647 .previous
26648
26649-.data
26650+__INITDATA
26651 saved_return_addr:
26652 .long 0
26653 efi_rt_function_ptr:
fe2de317
MT
26654diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
26655index 4c07cca..2c8427d 100644
26656--- a/arch/x86/platform/efi/efi_stub_64.S
26657+++ b/arch/x86/platform/efi/efi_stub_64.S
6e9df6a3
MT
26658@@ -7,6 +7,7 @@
26659 */
26660
26661 #include <linux/linkage.h>
26662+#include <asm/alternative-asm.h>
26663
26664 #define SAVE_XMM \
26665 mov %rsp, %rax; \
26666@@ -40,6 +41,7 @@ ENTRY(efi_call0)
15a11c5b
MT
26667 call *%rdi
26668 addq $32, %rsp
26669 RESTORE_XMM
fe2de317 26670+ pax_force_retaddr 0, 1
15a11c5b
MT
26671 ret
26672 ENDPROC(efi_call0)
26673
6e9df6a3 26674@@ -50,6 +52,7 @@ ENTRY(efi_call1)
15a11c5b
MT
26675 call *%rdi
26676 addq $32, %rsp
26677 RESTORE_XMM
fe2de317 26678+ pax_force_retaddr 0, 1
15a11c5b
MT
26679 ret
26680 ENDPROC(efi_call1)
26681
6e9df6a3 26682@@ -60,6 +63,7 @@ ENTRY(efi_call2)
15a11c5b
MT
26683 call *%rdi
26684 addq $32, %rsp
26685 RESTORE_XMM
fe2de317 26686+ pax_force_retaddr 0, 1
15a11c5b
MT
26687 ret
26688 ENDPROC(efi_call2)
26689
6e9df6a3 26690@@ -71,6 +75,7 @@ ENTRY(efi_call3)
15a11c5b
MT
26691 call *%rdi
26692 addq $32, %rsp
26693 RESTORE_XMM
fe2de317 26694+ pax_force_retaddr 0, 1
15a11c5b
MT
26695 ret
26696 ENDPROC(efi_call3)
26697
6e9df6a3 26698@@ -83,6 +88,7 @@ ENTRY(efi_call4)
15a11c5b
MT
26699 call *%rdi
26700 addq $32, %rsp
26701 RESTORE_XMM
fe2de317 26702+ pax_force_retaddr 0, 1
15a11c5b
MT
26703 ret
26704 ENDPROC(efi_call4)
26705
6e9df6a3 26706@@ -96,6 +102,7 @@ ENTRY(efi_call5)
15a11c5b
MT
26707 call *%rdi
26708 addq $48, %rsp
26709 RESTORE_XMM
fe2de317 26710+ pax_force_retaddr 0, 1
15a11c5b
MT
26711 ret
26712 ENDPROC(efi_call5)
26713
6e9df6a3 26714@@ -112,5 +119,6 @@ ENTRY(efi_call6)
15a11c5b
MT
26715 call *%rdi
26716 addq $48, %rsp
26717 RESTORE_XMM
fe2de317 26718+ pax_force_retaddr 0, 1
15a11c5b
MT
26719 ret
26720 ENDPROC(efi_call6)
fe2de317 26721diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
c6e2a6c8 26722index e31bcd8..f12dc46 100644
fe2de317
MT
26723--- a/arch/x86/platform/mrst/mrst.c
26724+++ b/arch/x86/platform/mrst/mrst.c
c6e2a6c8 26725@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
4c928ab7
MT
26726 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
26727 int sfi_mrtc_num;
15a11c5b 26728
15a11c5b
MT
26729-static void mrst_power_off(void)
26730+static __noreturn void mrst_power_off(void)
26731 {
15a11c5b
MT
26732+ BUG();
26733 }
26734
26735-static void mrst_reboot(void)
26736+static __noreturn void mrst_reboot(void)
26737 {
c6e2a6c8 26738 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
15a11c5b 26739+ BUG();
66a7e928
MT
26740 }
26741
4c928ab7 26742 /* parse all the mtimer info to a static mtimer array */
fe2de317 26743diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
c6e2a6c8 26744index 218cdb1..fd55c08 100644
fe2de317
MT
26745--- a/arch/x86/power/cpu.c
26746+++ b/arch/x86/power/cpu.c
c6e2a6c8 26747@@ -132,7 +132,7 @@ static void do_fpu_end(void)
58c5fc13
MT
26748 static void fix_processor_context(void)
26749 {
26750 int cpu = smp_processor_id();
26751- struct tss_struct *t = &per_cpu(init_tss, cpu);
26752+ struct tss_struct *t = init_tss + cpu;
58c5fc13
MT
26753
26754 set_tss_desc(cpu, t); /*
26755 * This just modifies memory; should not be
c6e2a6c8 26756@@ -142,7 +142,9 @@ static void fix_processor_context(void)
58c5fc13
MT
26757 */
26758
26759 #ifdef CONFIG_X86_64
ae4e228f 26760+ pax_open_kernel();
58c5fc13 26761 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
ae4e228f 26762+ pax_close_kernel();
58c5fc13 26763
58c5fc13
MT
26764 syscall_init(); /* This sets MSR_*STAR and related */
26765 #endif
c6e2a6c8 26766diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
c1e3898a 26767index b685296..e00eb65 100644
c6e2a6c8
MT
26768--- a/arch/x86/tools/relocs.c
26769+++ b/arch/x86/tools/relocs.c
26770@@ -12,10 +12,13 @@
26771 #include <regex.h>
26772 #include <tools/le_byteshift.h>
26773
26774+#include "../../../include/generated/autoconf.h"
26775+
26776 static void die(char *fmt, ...);
26777
26778 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
26779 static Elf32_Ehdr ehdr;
26780+static Elf32_Phdr *phdr;
26781 static unsigned long reloc_count, reloc_idx;
26782 static unsigned long *relocs;
26783 static unsigned long reloc16_count, reloc16_idx;
26784@@ -323,9 +326,39 @@ static void read_ehdr(FILE *fp)
26785 }
26786 }
26787
26788+static void read_phdrs(FILE *fp)
26789+{
26790+ unsigned int i;
26791+
26792+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
26793+ if (!phdr) {
26794+ die("Unable to allocate %d program headers\n",
26795+ ehdr.e_phnum);
26796+ }
26797+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
26798+ die("Seek to %d failed: %s\n",
26799+ ehdr.e_phoff, strerror(errno));
26800+ }
26801+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
26802+ die("Cannot read ELF program headers: %s\n",
26803+ strerror(errno));
26804+ }
26805+ for(i = 0; i < ehdr.e_phnum; i++) {
26806+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
26807+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
26808+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
26809+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
26810+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
26811+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
26812+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
26813+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
26814+ }
26815+
26816+}
26817+
26818 static void read_shdrs(FILE *fp)
26819 {
26820- int i;
26821+ unsigned int i;
26822 Elf32_Shdr shdr;
26823
26824 secs = calloc(ehdr.e_shnum, sizeof(struct section));
26825@@ -360,7 +393,7 @@ static void read_shdrs(FILE *fp)
26826
26827 static void read_strtabs(FILE *fp)
26828 {
26829- int i;
26830+ unsigned int i;
26831 for (i = 0; i < ehdr.e_shnum; i++) {
26832 struct section *sec = &secs[i];
26833 if (sec->shdr.sh_type != SHT_STRTAB) {
26834@@ -385,7 +418,7 @@ static void read_strtabs(FILE *fp)
26835
26836 static void read_symtabs(FILE *fp)
26837 {
26838- int i,j;
26839+ unsigned int i,j;
26840 for (i = 0; i < ehdr.e_shnum; i++) {
26841 struct section *sec = &secs[i];
26842 if (sec->shdr.sh_type != SHT_SYMTAB) {
26843@@ -418,7 +451,9 @@ static void read_symtabs(FILE *fp)
26844
26845 static void read_relocs(FILE *fp)
26846 {
26847- int i,j;
26848+ unsigned int i,j;
26849+ uint32_t base;
26850+
26851 for (i = 0; i < ehdr.e_shnum; i++) {
26852 struct section *sec = &secs[i];
26853 if (sec->shdr.sh_type != SHT_REL) {
26854@@ -438,9 +473,22 @@ static void read_relocs(FILE *fp)
26855 die("Cannot read symbol table: %s\n",
26856 strerror(errno));
26857 }
26858+ base = 0;
26859+
26860+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26861+ for (j = 0; j < ehdr.e_phnum; j++) {
26862+ if (phdr[j].p_type != PT_LOAD )
26863+ continue;
26864+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
26865+ continue;
26866+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
26867+ break;
26868+ }
26869+#endif
26870+
26871 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
26872 Elf32_Rel *rel = &sec->reltab[j];
26873- rel->r_offset = elf32_to_cpu(rel->r_offset);
26874+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
26875 rel->r_info = elf32_to_cpu(rel->r_info);
26876 }
26877 }
26878@@ -449,13 +497,13 @@ static void read_relocs(FILE *fp)
26879
26880 static void print_absolute_symbols(void)
26881 {
26882- int i;
26883+ unsigned int i;
26884 printf("Absolute symbols\n");
26885 printf(" Num: Value Size Type Bind Visibility Name\n");
26886 for (i = 0; i < ehdr.e_shnum; i++) {
26887 struct section *sec = &secs[i];
26888 char *sym_strtab;
26889- int j;
26890+ unsigned int j;
26891
26892 if (sec->shdr.sh_type != SHT_SYMTAB) {
26893 continue;
c1e3898a 26894@@ -482,14 +530,14 @@ static void print_absolute_symbols(void)
c6e2a6c8
MT
26895
26896 static void print_absolute_relocs(void)
26897 {
26898- int i, printed = 0;
26899+ unsigned int i, printed = 0;
26900
26901 for (i = 0; i < ehdr.e_shnum; i++) {
26902 struct section *sec = &secs[i];
c1e3898a
MT
26903 struct section *sec_applies, *sec_symtab;
26904 char *sym_strtab;
26905 Elf32_Sym *sh_symtab;
26906- int j;
26907+ unsigned int j;
26908 if (sec->shdr.sh_type != SHT_REL) {
26909 continue;
26910 }
26911@@ -551,13 +599,13 @@ static void print_absolute_relocs(void)
c6e2a6c8
MT
26912 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26913 int use_real_mode)
26914 {
26915- int i;
26916+ unsigned int i;
26917 /* Walk through the relocations */
26918 for (i = 0; i < ehdr.e_shnum; i++) {
26919 char *sym_strtab;
c1e3898a
MT
26920 Elf32_Sym *sh_symtab;
26921 struct section *sec_applies, *sec_symtab;
26922- int j;
26923+ unsigned int j;
26924 struct section *sec = &secs[i];
26925
26926 if (sec->shdr.sh_type != SHT_REL) {
c6e2a6c8
MT
26927@@ -581,6 +629,22 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
26928 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
26929 r_type = ELF32_R_TYPE(rel->r_info);
26930
26931+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
26932+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
26933+ continue;
26934+
26935+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
26936+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
26937+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
26938+ continue;
26939+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
26940+ continue;
26941+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
26942+ continue;
26943+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
26944+ continue;
26945+#endif
26946+
26947 shn_abs = sym->st_shndx == SHN_ABS;
26948
26949 switch (r_type) {
26950@@ -674,7 +738,7 @@ static int write32(unsigned int v, FILE *f)
26951
26952 static void emit_relocs(int as_text, int use_real_mode)
26953 {
26954- int i;
26955+ unsigned int i;
26956 /* Count how many relocations I have and allocate space for them. */
26957 reloc_count = 0;
26958 walk_relocs(count_reloc, use_real_mode);
26959@@ -801,6 +865,7 @@ int main(int argc, char **argv)
26960 fname, strerror(errno));
26961 }
26962 read_ehdr(fp);
26963+ read_phdrs(fp);
26964 read_shdrs(fp);
26965 read_strtabs(fp);
26966 read_symtabs(fp);
fe2de317 26967diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
c6e2a6c8 26968index fd14be1..e3c79c0 100644
fe2de317
MT
26969--- a/arch/x86/vdso/Makefile
26970+++ b/arch/x86/vdso/Makefile
c6e2a6c8 26971@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
6892158b
MT
26972 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
26973 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
58c5fc13 26974
ae4e228f 26975-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
71d190be 26976+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
58c5fc13
MT
26977 GCOV_PROFILE := n
26978
26979 #
fe2de317 26980diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
c6e2a6c8 26981index 66e6d93..587f435 100644
fe2de317
MT
26982--- a/arch/x86/vdso/vdso32-setup.c
26983+++ b/arch/x86/vdso/vdso32-setup.c
58c5fc13
MT
26984@@ -25,6 +25,7 @@
26985 #include <asm/tlbflush.h>
26986 #include <asm/vdso.h>
26987 #include <asm/proto.h>
26988+#include <asm/mman.h>
26989
26990 enum {
26991 VDSO_DISABLED = 0,
fe2de317 26992@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
58c5fc13
MT
26993 void enable_sep_cpu(void)
26994 {
26995 int cpu = get_cpu();
26996- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26997+ struct tss_struct *tss = init_tss + cpu;
26998
26999 if (!boot_cpu_has(X86_FEATURE_SEP)) {
27000 put_cpu();
27001@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
27002 gate_vma.vm_start = FIXADDR_USER_START;
27003 gate_vma.vm_end = FIXADDR_USER_END;
27004 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
27005- gate_vma.vm_page_prot = __P101;
27006+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
c6e2a6c8
MT
27007
27008 return 0;
27009 }
27010@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
58c5fc13
MT
27011 if (compat)
27012 addr = VDSO_HIGH_BASE;
27013 else {
27014- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
27015+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
27016 if (IS_ERR_VALUE(addr)) {
27017 ret = addr;
27018 goto up_fail;
27019 }
27020 }
27021
27022- current->mm->context.vdso = (void *)addr;
27023+ current->mm->context.vdso = addr;
27024
27025 if (compat_uses_vma || !compat) {
27026 /*
c6e2a6c8 27027@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
ae4e228f
MT
27028 }
27029
27030 current_thread_info()->sysenter_return =
27031- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
27032+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
58c5fc13
MT
27033
27034 up_fail:
27035 if (ret)
27036- current->mm->context.vdso = NULL;
27037+ current->mm->context.vdso = 0;
27038
27039 up_write(&mm->mmap_sem);
27040
c6e2a6c8 27041@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
58c5fc13
MT
27042
27043 const char *arch_vma_name(struct vm_area_struct *vma)
27044 {
27045- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
27046+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
27047 return "[vdso]";
27048+
27049+#ifdef CONFIG_PAX_SEGMEXEC
27050+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
27051+ return "[vdso]";
27052+#endif
27053+
27054 return NULL;
27055 }
27056
c6e2a6c8 27057@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
66a7e928
MT
27058 * Check to see if the corresponding task was created in compat vdso
27059 * mode.
27060 */
58c5fc13
MT
27061- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
27062+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
27063 return &gate_vma;
27064 return NULL;
27065 }
fe2de317 27066diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
c6e2a6c8 27067index 00aaf04..4a26505 100644
fe2de317
MT
27068--- a/arch/x86/vdso/vma.c
27069+++ b/arch/x86/vdso/vma.c
6e9df6a3 27070@@ -16,8 +16,6 @@
15a11c5b 27071 #include <asm/vdso.h>
6e9df6a3 27072 #include <asm/page.h>
15a11c5b
MT
27073
27074-unsigned int __read_mostly vdso_enabled = 1;
27075-
27076 extern char vdso_start[], vdso_end[];
27077 extern unsigned short vdso_sync_cpuid;
15a11c5b 27078
c6e2a6c8 27079@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
4c928ab7
MT
27080 * unaligned here as a result of stack start randomization.
27081 */
27082 addr = PAGE_ALIGN(addr);
27083- addr = align_addr(addr, NULL, ALIGN_VDSO);
27084
27085 return addr;
27086 }
c6e2a6c8
MT
27087@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
27088 unsigned size)
15a11c5b 27089 {
6e9df6a3
MT
27090 struct mm_struct *mm = current->mm;
27091- unsigned long addr;
27092+ unsigned long addr = 0;
15a11c5b
MT
27093 int ret;
27094
27095- if (!vdso_enabled)
27096- return 0;
27097-
27098 down_write(&mm->mmap_sem);
6e9df6a3
MT
27099+
27100+#ifdef CONFIG_PAX_RANDMMAP
27101+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27102+#endif
27103+
c6e2a6c8 27104 addr = vdso_addr(mm->start_stack, size);
4c928ab7 27105+ addr = align_addr(addr, NULL, ALIGN_VDSO);
c6e2a6c8 27106 addr = get_unmapped_area(NULL, addr, size, 0, 0);
15a11c5b 27107 if (IS_ERR_VALUE(addr)) {
4c928ab7 27108 ret = addr;
58c5fc13
MT
27109 goto up_fail;
27110 }
27111
27112- current->mm->context.vdso = (void *)addr;
6e9df6a3 27113+ mm->context.vdso = addr;
58c5fc13 27114
c6e2a6c8 27115 ret = install_special_mapping(mm, addr, size,
58c5fc13 27116 VM_READ|VM_EXEC|
c6e2a6c8
MT
27117 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
27118 pages);
6e9df6a3 27119- if (ret) {
58c5fc13 27120- current->mm->context.vdso = NULL;
6e9df6a3
MT
27121- goto up_fail;
27122- }
15a11c5b
MT
27123+ if (ret)
27124+ mm->context.vdso = 0;
6e9df6a3 27125
15a11c5b 27126 up_fail:
58c5fc13 27127 up_write(&mm->mmap_sem);
c6e2a6c8
MT
27128@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
27129 vdsox32_size);
58c5fc13 27130 }
c6e2a6c8 27131 #endif
58c5fc13
MT
27132-
27133-static __init int vdso_setup(char *s)
27134-{
27135- vdso_enabled = simple_strtoul(s, NULL, 0);
27136- return 0;
27137-}
27138-__setup("vdso=", vdso_setup);
fe2de317 27139diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
c1e3898a 27140index 40edfc3..b4d80ac 100644
fe2de317
MT
27141--- a/arch/x86/xen/enlighten.c
27142+++ b/arch/x86/xen/enlighten.c
c6e2a6c8 27143@@ -95,8 +95,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
58c5fc13
MT
27144
27145 struct shared_info xen_dummy_shared_info;
27146
27147-void *xen_initial_gdt;
27148-
6892158b
MT
27149 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
27150 __read_mostly int xen_have_vector_callback;
27151 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
c1e3898a 27152@@ -1165,30 +1163,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
66a7e928
MT
27153 #endif
27154 };
27155
27156-static void xen_reboot(int reason)
27157+static __noreturn void xen_reboot(int reason)
27158 {
27159 struct sched_shutdown r = { .reason = reason };
27160
5e856224
MT
27161- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
27162- BUG();
27163+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
27164+ BUG();
66a7e928
MT
27165 }
27166
27167-static void xen_restart(char *msg)
27168+static __noreturn void xen_restart(char *msg)
27169 {
27170 xen_reboot(SHUTDOWN_reboot);
27171 }
27172
27173-static void xen_emergency_restart(void)
27174+static __noreturn void xen_emergency_restart(void)
27175 {
27176 xen_reboot(SHUTDOWN_reboot);
27177 }
27178
27179-static void xen_machine_halt(void)
27180+static __noreturn void xen_machine_halt(void)
27181 {
27182 xen_reboot(SHUTDOWN_poweroff);
27183 }
5e856224
MT
27184
27185-static void xen_machine_power_off(void)
27186+static __noreturn void xen_machine_power_off(void)
27187 {
27188 if (pm_power_off)
27189 pm_power_off();
c1e3898a 27190@@ -1291,7 +1289,17 @@ asmlinkage void __init xen_start_kernel(void)
df50ba0c
MT
27191 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
27192
27193 /* Work out if we support NX */
27194- x86_configure_nx();
27195+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
27196+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
57199397 27197+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
df50ba0c
MT
27198+ unsigned l, h;
27199+
27200+ __supported_pte_mask |= _PAGE_NX;
27201+ rdmsr(MSR_EFER, l, h);
27202+ l |= EFER_NX;
27203+ wrmsr(MSR_EFER, l, h);
27204+ }
27205+#endif
27206
27207 xen_setup_features();
27208
c1e3898a 27209@@ -1322,13 +1330,6 @@ asmlinkage void __init xen_start_kernel(void)
58c5fc13
MT
27210
27211 machine_ops = xen_machine_ops;
27212
27213- /*
27214- * The only reliable way to retain the initial address of the
27215- * percpu gdt_page is to remember it here, so we can go and
27216- * mark it RW later, when the initial percpu area is freed.
27217- */
27218- xen_initial_gdt = &per_cpu(gdt_page, 0);
27219-
27220 xen_smp_init();
27221
16454cff 27222 #ifdef CONFIG_ACPI_NUMA
fe2de317 27223diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
c6e2a6c8 27224index 69f5857..0699dc5 100644
fe2de317
MT
27225--- a/arch/x86/xen/mmu.c
27226+++ b/arch/x86/xen/mmu.c
5e856224 27227@@ -1738,6 +1738,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
58c5fc13
MT
27228 convert_pfn_mfn(init_level4_pgt);
27229 convert_pfn_mfn(level3_ident_pgt);
27230 convert_pfn_mfn(level3_kernel_pgt);
fe2de317
MT
27231+ convert_pfn_mfn(level3_vmalloc_start_pgt);
27232+ convert_pfn_mfn(level3_vmalloc_end_pgt);
58c5fc13
MT
27233+ convert_pfn_mfn(level3_vmemmap_pgt);
27234
27235 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27236 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
5e856224 27237@@ -1756,7 +1759,11 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
58c5fc13
MT
27238 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27239 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27240 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
fe2de317
MT
27241+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27242+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
58c5fc13
MT
27243+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27244 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27245+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27246 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27247 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27248
c6e2a6c8 27249@@ -1964,6 +1971,7 @@ static void __init xen_post_allocator_init(void)
15a11c5b
MT
27250 pv_mmu_ops.set_pud = xen_set_pud;
27251 #if PAGETABLE_LEVELS == 4
27252 pv_mmu_ops.set_pgd = xen_set_pgd;
27253+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27254 #endif
6892158b 27255
15a11c5b 27256 /* This will work as long as patching hasn't happened yet
c6e2a6c8 27257@@ -2045,6 +2053,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
15a11c5b
MT
27258 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27259 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27260 .set_pgd = xen_set_pgd_hyper,
27261+ .set_pgd_batched = xen_set_pgd_hyper,
6892158b 27262
15a11c5b
MT
27263 .alloc_pud = xen_alloc_pmd_init,
27264 .release_pud = xen_release_pmd_init,
fe2de317 27265diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
c6e2a6c8 27266index 0503c0c..ceb2d16 100644
fe2de317
MT
27267--- a/arch/x86/xen/smp.c
27268+++ b/arch/x86/xen/smp.c
c6e2a6c8 27269@@ -215,11 +215,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
58c5fc13
MT
27270 {
27271 BUG_ON(smp_processor_id() != 0);
27272 native_smp_prepare_boot_cpu();
27273-
27274- /* We've switched to the "real" per-cpu gdt, so make sure the
27275- old memory can be recycled */
27276- make_lowmem_page_readwrite(xen_initial_gdt);
27277-
bc901d79 27278 xen_filter_cpu_maps();
58c5fc13
MT
27279 xen_setup_vcpu_info_placement();
27280 }
c6e2a6c8 27281@@ -296,12 +291,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
58c5fc13
MT
27282 gdt = get_cpu_gdt_table(cpu);
27283
27284 ctxt->flags = VGCF_IN_KERNEL;
27285- ctxt->user_regs.ds = __USER_DS;
27286- ctxt->user_regs.es = __USER_DS;
27287+ ctxt->user_regs.ds = __KERNEL_DS;
27288+ ctxt->user_regs.es = __KERNEL_DS;
27289 ctxt->user_regs.ss = __KERNEL_DS;
27290 #ifdef CONFIG_X86_32
27291 ctxt->user_regs.fs = __KERNEL_PERCPU;
bc901d79
MT
27292- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27293+ savesegment(gs, ctxt->user_regs.gs);
27294 #else
27295 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27296 #endif
c6e2a6c8 27297@@ -352,13 +347,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
71d190be
MT
27298 int rc;
27299
27300 per_cpu(current_task, cpu) = idle;
27301+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27302 #ifdef CONFIG_X86_32
27303 irq_ctx_init(cpu);
27304 #else
27305 clear_tsk_thread_flag(idle, TIF_FORK);
27306- per_cpu(kernel_stack, cpu) =
27307- (unsigned long)task_stack_page(idle) -
27308- KERNEL_STACK_OFFSET + THREAD_SIZE;
66a7e928 27309+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
71d190be
MT
27310 #endif
27311 xen_setup_runstate_info(cpu);
27312 xen_setup_timer(cpu);
fe2de317
MT
27313diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27314index b040b0e..8cc4fe0 100644
27315--- a/arch/x86/xen/xen-asm_32.S
27316+++ b/arch/x86/xen/xen-asm_32.S
71d190be
MT
27317@@ -83,14 +83,14 @@ ENTRY(xen_iret)
27318 ESP_OFFSET=4 # bytes pushed onto stack
27319
27320 /*
27321- * Store vcpu_info pointer for easy access. Do it this way to
27322- * avoid having to reload %fs
27323+ * Store vcpu_info pointer for easy access.
27324 */
27325 #ifdef CONFIG_SMP
27326- GET_THREAD_INFO(%eax)
27327- movl TI_cpu(%eax), %eax
27328- movl __per_cpu_offset(,%eax,4), %eax
27329- mov xen_vcpu(%eax), %eax
27330+ push %fs
27331+ mov $(__KERNEL_PERCPU), %eax
27332+ mov %eax, %fs
27333+ mov PER_CPU_VAR(xen_vcpu), %eax
27334+ pop %fs
27335 #else
27336 movl xen_vcpu, %eax
27337 #endif
fe2de317
MT
27338diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27339index aaa7291..3f77960 100644
27340--- a/arch/x86/xen/xen-head.S
27341+++ b/arch/x86/xen/xen-head.S
df50ba0c
MT
27342@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27343 #ifdef CONFIG_X86_32
27344 mov %esi,xen_start_info
27345 mov $init_thread_union+THREAD_SIZE,%esp
27346+#ifdef CONFIG_SMP
27347+ movl $cpu_gdt_table,%edi
27348+ movl $__per_cpu_load,%eax
27349+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27350+ rorl $16,%eax
27351+ movb %al,__KERNEL_PERCPU + 4(%edi)
27352+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27353+ movl $__per_cpu_end - 1,%eax
27354+ subl $__per_cpu_start,%eax
27355+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27356+#endif
27357 #else
27358 mov %rsi,xen_start_info
27359 mov $init_thread_union+THREAD_SIZE,%rsp
fe2de317
MT
27360diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27361index b095739..8c17bcd 100644
27362--- a/arch/x86/xen/xen-ops.h
27363+++ b/arch/x86/xen/xen-ops.h
58c5fc13
MT
27364@@ -10,8 +10,6 @@
27365 extern const char xen_hypervisor_callback[];
27366 extern const char xen_failsafe_callback[];
27367
27368-extern void *xen_initial_gdt;
27369-
27370 struct trap_info;
27371 void xen_copy_trap_info(struct trap_info *traps);
27372
4c928ab7
MT
27373diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
27374index 525bd3d..ef888b1 100644
27375--- a/arch/xtensa/variants/dc232b/include/variant/core.h
27376+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
27377@@ -119,9 +119,9 @@
27378 ----------------------------------------------------------------------*/
27379
27380 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
27381-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
27382 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
27383 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
27384+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27385
27386 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
27387 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
27388diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
27389index 2f33760..835e50a 100644
27390--- a/arch/xtensa/variants/fsf/include/variant/core.h
27391+++ b/arch/xtensa/variants/fsf/include/variant/core.h
27392@@ -11,6 +11,7 @@
27393 #ifndef _XTENSA_CORE_H
27394 #define _XTENSA_CORE_H
27395
27396+#include <linux/const.h>
27397
27398 /****************************************************************************
27399 Parameters Useful for Any Code, USER or PRIVILEGED
27400@@ -112,9 +113,9 @@
27401 ----------------------------------------------------------------------*/
27402
27403 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27404-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27405 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27406 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27407+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27408
27409 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
27410 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
27411diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
27412index af00795..2bb8105 100644
27413--- a/arch/xtensa/variants/s6000/include/variant/core.h
27414+++ b/arch/xtensa/variants/s6000/include/variant/core.h
27415@@ -11,6 +11,7 @@
27416 #ifndef _XTENSA_CORE_CONFIGURATION_H
27417 #define _XTENSA_CORE_CONFIGURATION_H
27418
27419+#include <linux/const.h>
27420
27421 /****************************************************************************
27422 Parameters Useful for Any Code, USER or PRIVILEGED
27423@@ -118,9 +119,9 @@
27424 ----------------------------------------------------------------------*/
27425
27426 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
27427-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
27428 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
27429 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
27430+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
27431
27432 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
27433 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
fe2de317
MT
27434diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27435index 58916af..9cb880b 100644
27436--- a/block/blk-iopoll.c
27437+++ b/block/blk-iopoll.c
27438@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
ae4e228f
MT
27439 }
27440 EXPORT_SYMBOL(blk_iopoll_complete);
27441
27442-static void blk_iopoll_softirq(struct softirq_action *h)
27443+static void blk_iopoll_softirq(void)
27444 {
27445 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27446 int rearm = 0, budget = blk_iopoll_budget;
fe2de317 27447diff --git a/block/blk-map.c b/block/blk-map.c
4c928ab7 27448index 623e1cd..ca1e109 100644
fe2de317
MT
27449--- a/block/blk-map.c
27450+++ b/block/blk-map.c
27451@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
ae4e228f
MT
27452 if (!len || !kbuf)
27453 return -EINVAL;
58c5fc13 27454
bc901d79
MT
27455- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
27456+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
ae4e228f
MT
27457 if (do_copy)
27458 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27459 else
fe2de317 27460diff --git a/block/blk-softirq.c b/block/blk-softirq.c
c6e2a6c8 27461index 467c8de..4bddc6d 100644
fe2de317
MT
27462--- a/block/blk-softirq.c
27463+++ b/block/blk-softirq.c
c6e2a6c8 27464@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
ae4e228f
MT
27465 * Softirq action handler - move entries to local list and loop over them
27466 * while passing them to the queue registered handler.
27467 */
27468-static void blk_done_softirq(struct softirq_action *h)
27469+static void blk_done_softirq(void)
27470 {
27471 struct list_head *cpu_list, local_list;
58c5fc13 27472
fe2de317 27473diff --git a/block/bsg.c b/block/bsg.c
5e856224 27474index ff64ae3..593560c 100644
fe2de317
MT
27475--- a/block/bsg.c
27476+++ b/block/bsg.c
27477@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
71d190be
MT
27478 struct sg_io_v4 *hdr, struct bsg_device *bd,
27479 fmode_t has_write_perm)
27480 {
27481+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27482+ unsigned char *cmdptr;
27483+
27484 if (hdr->request_len > BLK_MAX_CDB) {
27485 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27486 if (!rq->cmd)
27487 return -ENOMEM;
27488- }
27489+ cmdptr = rq->cmd;
27490+ } else
27491+ cmdptr = tmpcmd;
27492
6e9df6a3
MT
27493- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
27494+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
71d190be
MT
27495 hdr->request_len))
27496 return -EFAULT;
27497
27498+ if (cmdptr != rq->cmd)
27499+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27500+
27501 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27502 if (blk_verify_command(rq->cmd, has_write_perm))
27503 return -EPERM;
fe2de317 27504diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
5e856224 27505index 7c668c8..db3521c 100644
fe2de317
MT
27506--- a/block/compat_ioctl.c
27507+++ b/block/compat_ioctl.c
27508@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
6e9df6a3
MT
27509 err |= __get_user(f->spec1, &uf->spec1);
27510 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27511 err |= __get_user(name, &uf->name);
27512- f->name = compat_ptr(name);
27513+ f->name = (void __force_kernel *)compat_ptr(name);
27514 if (err) {
27515 err = -EFAULT;
27516 goto out;
5e856224
MT
27517diff --git a/block/partitions/efi.c b/block/partitions/efi.c
27518index 6296b40..417c00f 100644
27519--- a/block/partitions/efi.c
27520+++ b/block/partitions/efi.c
27521@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
27522 if (!gpt)
27523 return NULL;
27524
27525+ if (!le32_to_cpu(gpt->num_partition_entries))
27526+ return NULL;
27527+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
27528+ if (!pte)
27529+ return NULL;
27530+
27531 count = le32_to_cpu(gpt->num_partition_entries) *
27532 le32_to_cpu(gpt->sizeof_partition_entry);
27533- if (!count)
27534- return NULL;
27535- pte = kzalloc(count, GFP_KERNEL);
27536- if (!pte)
27537- return NULL;
27538-
27539 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
27540 (u8 *) pte,
27541 count) < count) {
fe2de317 27542diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
5e856224 27543index 260fa80..e8f3caf 100644
fe2de317
MT
27544--- a/block/scsi_ioctl.c
27545+++ b/block/scsi_ioctl.c
4c928ab7 27546@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
71d190be
MT
27547 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27548 struct sg_io_hdr *hdr, fmode_t mode)
27549 {
27550- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27551+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27552+ unsigned char *cmdptr;
27553+
27554+ if (rq->cmd != rq->__cmd)
27555+ cmdptr = rq->cmd;
27556+ else
27557+ cmdptr = tmpcmd;
27558+
27559+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27560 return -EFAULT;
27561+
27562+ if (cmdptr != rq->cmd)
27563+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27564+
27565 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27566 return -EPERM;
27567
4c928ab7 27568@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
71d190be
MT
27569 int err;
27570 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27571 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27572+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27573+ unsigned char *cmdptr;
27574
27575 if (!sic)
27576 return -EINVAL;
4c928ab7 27577@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
71d190be
MT
27578 */
27579 err = -EFAULT;
27580 rq->cmd_len = cmdlen;
27581- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27582+
27583+ if (rq->cmd != rq->__cmd)
27584+ cmdptr = rq->cmd;
27585+ else
27586+ cmdptr = tmpcmd;
27587+
27588+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27589 goto error;
27590
27591+ if (rq->cmd != cmdptr)
27592+ memcpy(rq->cmd, cmdptr, cmdlen);
27593+
27594 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27595 goto error;
27596
fe2de317
MT
27597diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27598index 671d4d6..5f24030 100644
27599--- a/crypto/cryptd.c
27600+++ b/crypto/cryptd.c
15a11c5b
MT
27601@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
27602
27603 struct cryptd_blkcipher_request_ctx {
27604 crypto_completion_t complete;
27605-};
27606+} __no_const;
27607
27608 struct cryptd_hash_ctx {
27609 struct crypto_shash *child;
27610@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
27611
27612 struct cryptd_aead_request_ctx {
27613 crypto_completion_t complete;
27614-};
27615+} __no_const;
27616
27617 static void cryptd_queue_worker(struct work_struct *work);
27618
fe2de317 27619diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
c6e2a6c8 27620index e6defd8..c26a225 100644
fe2de317
MT
27621--- a/drivers/acpi/apei/cper.c
27622+++ b/drivers/acpi/apei/cper.c
66a7e928 27623@@ -38,12 +38,12 @@
8308f9c9
MT
27624 */
27625 u64 cper_next_record_id(void)
27626 {
27627- static atomic64_t seq;
27628+ static atomic64_unchecked_t seq;
27629
27630- if (!atomic64_read(&seq))
27631- atomic64_set(&seq, ((u64)get_seconds()) << 32);
27632+ if (!atomic64_read_unchecked(&seq))
27633+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
27634
27635- return atomic64_inc_return(&seq);
27636+ return atomic64_inc_return_unchecked(&seq);
27637 }
27638 EXPORT_SYMBOL_GPL(cper_next_record_id);
27639
fe2de317 27640diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
c6e2a6c8 27641index 7586544..636a2f0 100644
fe2de317
MT
27642--- a/drivers/acpi/ec_sys.c
27643+++ b/drivers/acpi/ec_sys.c
4c928ab7 27644@@ -12,6 +12,7 @@
15a11c5b
MT
27645 #include <linux/acpi.h>
27646 #include <linux/debugfs.h>
4c928ab7
MT
27647 #include <linux/module.h>
27648+#include <linux/uaccess.h>
15a11c5b 27649 #include "internal.h"
ae4e228f 27650
15a11c5b 27651 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
c6e2a6c8 27652@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
15a11c5b
MT
27653 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
27654 */
27655 unsigned int size = EC_SPACE_SIZE;
27656- u8 *data = (u8 *) buf;
27657+ u8 data;
27658 loff_t init_off = *off;
27659 int err = 0;
ae4e228f 27660
c6e2a6c8 27661@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
15a11c5b 27662 size = count;
58c5fc13 27663
15a11c5b
MT
27664 while (size) {
27665- err = ec_read(*off, &data[*off - init_off]);
27666+ err = ec_read(*off, &data);
27667 if (err)
27668 return err;
27669+ if (put_user(data, &buf[*off - init_off]))
27670+ return -EFAULT;
27671 *off += 1;
27672 size--;
27673 }
c6e2a6c8 27674@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
66a7e928 27675
15a11c5b
MT
27676 unsigned int size = count;
27677 loff_t init_off = *off;
27678- u8 *data = (u8 *) buf;
27679 int err = 0;
df50ba0c 27680
15a11c5b 27681 if (*off >= EC_SPACE_SIZE)
c6e2a6c8 27682@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
15a11c5b 27683 }
df50ba0c 27684
15a11c5b
MT
27685 while (size) {
27686- u8 byte_write = data[*off - init_off];
27687+ u8 byte_write;
27688+ if (get_user(byte_write, &buf[*off - init_off]))
27689+ return -EFAULT;
27690 err = ec_write(*off, byte_write);
27691 if (err)
27692 return err;
fe2de317 27693diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
4c928ab7 27694index 251c7b62..000462d 100644
fe2de317
MT
27695--- a/drivers/acpi/proc.c
27696+++ b/drivers/acpi/proc.c
4c928ab7 27697@@ -343,19 +343,13 @@ acpi_system_write_wakeup_device(struct file *file,
df50ba0c
MT
27698 size_t count, loff_t * ppos)
27699 {
27700 struct list_head *node, *next;
27701- char strbuf[5];
27702- char str[5] = "";
27703- unsigned int len = count;
fe2de317
MT
27704+ char strbuf[5] = {0};
27705
df50ba0c
MT
27706- if (len > 4)
27707- len = 4;
27708- if (len < 0)
16454cff
MT
27709+ if (count > 4)
27710+ count = 4;
df50ba0c
MT
27711+ if (copy_from_user(strbuf, buffer, count))
27712 return -EFAULT;
fe2de317
MT
27713-
27714- if (copy_from_user(strbuf, buffer, len))
27715- return -EFAULT;
df50ba0c
MT
27716- strbuf[len] = '\0';
27717- sscanf(strbuf, "%s", str);
27718+ strbuf[count] = '\0';
27719
27720 mutex_lock(&acpi_device_lock);
27721 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
4c928ab7 27722@@ -364,7 +358,7 @@ acpi_system_write_wakeup_device(struct file *file,
df50ba0c
MT
27723 if (!dev->wakeup.flags.valid)
27724 continue;
27725
27726- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27727+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
16454cff
MT
27728 if (device_can_wakeup(&dev->dev)) {
27729 bool enable = !device_may_wakeup(&dev->dev);
27730 device_set_wakeup_enable(&dev->dev, enable);
fe2de317 27731diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
c6e2a6c8 27732index 0734086..3ad3e4c 100644
fe2de317
MT
27733--- a/drivers/acpi/processor_driver.c
27734+++ b/drivers/acpi/processor_driver.c
c6e2a6c8 27735@@ -556,7 +556,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
58c5fc13 27736 return 0;
57199397 27737 #endif
58c5fc13
MT
27738
27739- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27740+ BUG_ON(pr->id >= nr_cpu_ids);
27741
27742 /*
27743 * Buggy BIOS check
fe2de317 27744diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
c6e2a6c8 27745index d31ee55..8363a8b 100644
fe2de317
MT
27746--- a/drivers/ata/libata-core.c
27747+++ b/drivers/ata/libata-core.c
c6e2a6c8 27748@@ -4742,7 +4742,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
6892158b
MT
27749 struct ata_port *ap;
27750 unsigned int tag;
27751
27752- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27753+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27754 ap = qc->ap;
27755
27756 qc->flags = 0;
c6e2a6c8 27757@@ -4758,7 +4758,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
6892158b
MT
27758 struct ata_port *ap;
27759 struct ata_link *link;
27760
27761- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27762+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27763 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27764 ap = qc->ap;
27765 link = qc->dev->link;
c6e2a6c8 27766@@ -5822,6 +5822,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
ae4e228f
MT
27767 return;
27768
27769 spin_lock(&lock);
27770+ pax_open_kernel();
27771
27772 for (cur = ops->inherits; cur; cur = cur->inherits) {
27773 void **inherit = (void **)cur;
c6e2a6c8 27774@@ -5835,8 +5836,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
ae4e228f
MT
27775 if (IS_ERR(*pp))
27776 *pp = NULL;
27777
27778- ops->inherits = NULL;
15a11c5b 27779+ *(struct ata_port_operations **)&ops->inherits = NULL;
ae4e228f
MT
27780
27781+ pax_close_kernel();
27782 spin_unlock(&lock);
27783 }
27784
fe2de317 27785diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
c6e2a6c8 27786index 3239517..343b5f6 100644
fe2de317
MT
27787--- a/drivers/ata/pata_arasan_cf.c
27788+++ b/drivers/ata/pata_arasan_cf.c
27789@@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(struct platform_device *pdev)
66a7e928
MT
27790 /* Handle platform specific quirks */
27791 if (pdata->quirk) {
27792 if (pdata->quirk & CF_BROKEN_PIO) {
27793- ap->ops->set_piomode = NULL;
27794+ pax_open_kernel();
15a11c5b 27795+ *(void **)&ap->ops->set_piomode = NULL;
66a7e928
MT
27796+ pax_close_kernel();
27797 ap->pio_mask = 0;
27798 }
27799 if (pdata->quirk & CF_BROKEN_MWDMA)
fe2de317
MT
27800diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
27801index f9b983a..887b9d8 100644
27802--- a/drivers/atm/adummy.c
27803+++ b/drivers/atm/adummy.c
27804@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
27805 vcc->pop(vcc, skb);
27806 else
27807 dev_kfree_skb_any(skb);
27808- atomic_inc(&vcc->stats->tx);
27809+ atomic_inc_unchecked(&vcc->stats->tx);
ae4e228f 27810
15a11c5b
MT
27811 return 0;
27812 }
fe2de317
MT
27813diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
27814index f8f41e0..1f987dd 100644
27815--- a/drivers/atm/ambassador.c
27816+++ b/drivers/atm/ambassador.c
27817@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
15a11c5b
MT
27818 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
27819
27820 // VC layer stats
27821- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27822+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
27823
27824 // free the descriptor
27825 kfree (tx_descr);
fe2de317 27826@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
15a11c5b
MT
27827 dump_skb ("<<<", vc, skb);
27828
27829 // VC layer stats
27830- atomic_inc(&atm_vcc->stats->rx);
27831+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27832 __net_timestamp(skb);
27833 // end of our responsibility
27834 atm_vcc->push (atm_vcc, skb);
fe2de317 27835@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
15a11c5b
MT
27836 } else {
27837 PRINTK (KERN_INFO, "dropped over-size frame");
27838 // should we count this?
27839- atomic_inc(&atm_vcc->stats->rx_drop);
27840+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27841 }
27842
27843 } else {
fe2de317 27844@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
15a11c5b
MT
27845 }
27846
27847 if (check_area (skb->data, skb->len)) {
27848- atomic_inc(&atm_vcc->stats->tx_err);
27849+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
27850 return -ENOMEM; // ?
27851 }
27852
fe2de317
MT
27853diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
27854index b22d71c..d6e1049 100644
27855--- a/drivers/atm/atmtcp.c
27856+++ b/drivers/atm/atmtcp.c
27857@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
27858 if (vcc->pop) vcc->pop(vcc,skb);
27859 else dev_kfree_skb(skb);
27860 if (dev_data) return 0;
27861- atomic_inc(&vcc->stats->tx_err);
27862+ atomic_inc_unchecked(&vcc->stats->tx_err);
27863 return -ENOLINK;
27864 }
27865 size = skb->len+sizeof(struct atmtcp_hdr);
fe2de317 27866@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
27867 if (!new_skb) {
27868 if (vcc->pop) vcc->pop(vcc,skb);
27869 else dev_kfree_skb(skb);
27870- atomic_inc(&vcc->stats->tx_err);
27871+ atomic_inc_unchecked(&vcc->stats->tx_err);
27872 return -ENOBUFS;
27873 }
27874 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
fe2de317 27875@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
27876 if (vcc->pop) vcc->pop(vcc,skb);
27877 else dev_kfree_skb(skb);
27878 out_vcc->push(out_vcc,new_skb);
27879- atomic_inc(&vcc->stats->tx);
27880- atomic_inc(&out_vcc->stats->rx);
27881+ atomic_inc_unchecked(&vcc->stats->tx);
27882+ atomic_inc_unchecked(&out_vcc->stats->rx);
27883 return 0;
27884 }
ae4e228f 27885
fe2de317 27886@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
27887 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
27888 read_unlock(&vcc_sklist_lock);
27889 if (!out_vcc) {
27890- atomic_inc(&vcc->stats->tx_err);
27891+ atomic_inc_unchecked(&vcc->stats->tx_err);
27892 goto done;
27893 }
27894 skb_pull(skb,sizeof(struct atmtcp_hdr));
fe2de317 27895@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
15a11c5b
MT
27896 __net_timestamp(new_skb);
27897 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
27898 out_vcc->push(out_vcc,new_skb);
27899- atomic_inc(&vcc->stats->tx);
27900- atomic_inc(&out_vcc->stats->rx);
27901+ atomic_inc_unchecked(&vcc->stats->tx);
27902+ atomic_inc_unchecked(&out_vcc->stats->rx);
27903 done:
27904 if (vcc->pop) vcc->pop(vcc,skb);
27905 else dev_kfree_skb(skb);
fe2de317 27906diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
c6e2a6c8 27907index 2059ee4..faf51c7 100644
fe2de317
MT
27908--- a/drivers/atm/eni.c
27909+++ b/drivers/atm/eni.c
c6e2a6c8 27910@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
15a11c5b
MT
27911 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
27912 vcc->dev->number);
27913 length = 0;
27914- atomic_inc(&vcc->stats->rx_err);
27915+ atomic_inc_unchecked(&vcc->stats->rx_err);
27916 }
27917 else {
27918 length = ATM_CELL_SIZE-1; /* no HEC */
c6e2a6c8 27919@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
15a11c5b
MT
27920 size);
27921 }
27922 eff = length = 0;
27923- atomic_inc(&vcc->stats->rx_err);
27924+ atomic_inc_unchecked(&vcc->stats->rx_err);
27925 }
27926 else {
27927 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
c6e2a6c8 27928@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
15a11c5b
MT
27929 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
27930 vcc->dev->number,vcc->vci,length,size << 2,descr);
27931 length = eff = 0;
27932- atomic_inc(&vcc->stats->rx_err);
27933+ atomic_inc_unchecked(&vcc->stats->rx_err);
27934 }
27935 }
27936 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
c6e2a6c8 27937@@ -767,7 +767,7 @@ rx_dequeued++;
15a11c5b
MT
27938 vcc->push(vcc,skb);
27939 pushed++;
27940 }
27941- atomic_inc(&vcc->stats->rx);
27942+ atomic_inc_unchecked(&vcc->stats->rx);
27943 }
27944 wake_up(&eni_dev->rx_wait);
27945 }
c6e2a6c8 27946@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
15a11c5b
MT
27947 PCI_DMA_TODEVICE);
27948 if (vcc->pop) vcc->pop(vcc,skb);
27949 else dev_kfree_skb_irq(skb);
27950- atomic_inc(&vcc->stats->tx);
27951+ atomic_inc_unchecked(&vcc->stats->tx);
27952 wake_up(&eni_dev->tx_wait);
27953 dma_complete++;
27954 }
c6e2a6c8 27955@@ -1567,7 +1567,7 @@ tx_complete++;
6e9df6a3
MT
27956 /*--------------------------------- entries ---------------------------------*/
27957
27958
27959-static const char *media_name[] __devinitdata = {
27960+static const char *media_name[] __devinitconst = {
27961 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
27962 "UTP", "05?", "06?", "07?", /* 4- 7 */
27963 "TAXI","09?", "10?", "11?", /* 8-11 */
fe2de317 27964diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
c6e2a6c8 27965index 86fed1b..6dc4721 100644
fe2de317
MT
27966--- a/drivers/atm/firestream.c
27967+++ b/drivers/atm/firestream.c
c6e2a6c8 27968@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
15a11c5b
MT
27969 }
27970 }
ae4e228f 27971
15a11c5b
MT
27972- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
27973+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
ae4e228f 27974
15a11c5b
MT
27975 fs_dprintk (FS_DEBUG_TXMEM, "i");
27976 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
c6e2a6c8 27977@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
15a11c5b
MT
27978 #endif
27979 skb_put (skb, qe->p1 & 0xffff);
27980 ATM_SKB(skb)->vcc = atm_vcc;
27981- atomic_inc(&atm_vcc->stats->rx);
27982+ atomic_inc_unchecked(&atm_vcc->stats->rx);
27983 __net_timestamp(skb);
27984 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
27985 atm_vcc->push (atm_vcc, skb);
c6e2a6c8 27986@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
15a11c5b
MT
27987 kfree (pe);
27988 }
27989 if (atm_vcc)
27990- atomic_inc(&atm_vcc->stats->rx_drop);
27991+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27992 break;
27993 case 0x1f: /* Reassembly abort: no buffers. */
27994 /* Silently increment error counter. */
27995 if (atm_vcc)
27996- atomic_inc(&atm_vcc->stats->rx_drop);
27997+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
27998 break;
27999 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
28000 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
fe2de317
MT
28001diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
28002index 361f5ae..7fc552d 100644
28003--- a/drivers/atm/fore200e.c
28004+++ b/drivers/atm/fore200e.c
28005@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
15a11c5b
MT
28006 #endif
28007 /* check error condition */
28008 if (*entry->status & STATUS_ERROR)
28009- atomic_inc(&vcc->stats->tx_err);
28010+ atomic_inc_unchecked(&vcc->stats->tx_err);
28011 else
28012- atomic_inc(&vcc->stats->tx);
28013+ atomic_inc_unchecked(&vcc->stats->tx);
28014 }
28015 }
ae4e228f 28016
fe2de317 28017@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
15a11c5b
MT
28018 if (skb == NULL) {
28019 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
ae4e228f 28020
15a11c5b
MT
28021- atomic_inc(&vcc->stats->rx_drop);
28022+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28023 return -ENOMEM;
28024 }
ae4e228f 28025
fe2de317 28026@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
ae4e228f 28027
15a11c5b 28028 dev_kfree_skb_any(skb);
ae4e228f 28029
15a11c5b
MT
28030- atomic_inc(&vcc->stats->rx_drop);
28031+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28032 return -ENOMEM;
28033 }
ae4e228f 28034
15a11c5b 28035 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
ae4e228f 28036
15a11c5b
MT
28037 vcc->push(vcc, skb);
28038- atomic_inc(&vcc->stats->rx);
28039+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 28040
15a11c5b 28041 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
ae4e228f 28042
fe2de317 28043@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
15a11c5b
MT
28044 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
28045 fore200e->atm_dev->number,
28046 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
28047- atomic_inc(&vcc->stats->rx_err);
28048+ atomic_inc_unchecked(&vcc->stats->rx_err);
28049 }
28050 }
ae4e228f 28051
fe2de317 28052@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28053 goto retry_here;
28054 }
ae4e228f 28055
15a11c5b
MT
28056- atomic_inc(&vcc->stats->tx_err);
28057+ atomic_inc_unchecked(&vcc->stats->tx_err);
ae4e228f 28058
15a11c5b
MT
28059 fore200e->tx_sat++;
28060 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
fe2de317 28061diff --git a/drivers/atm/he.c b/drivers/atm/he.c
5e856224 28062index b182c2f..1c6fa8a 100644
fe2de317
MT
28063--- a/drivers/atm/he.c
28064+++ b/drivers/atm/he.c
28065@@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
ae4e228f 28066
15a11c5b
MT
28067 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
28068 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
28069- atomic_inc(&vcc->stats->rx_drop);
28070+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28071 goto return_host_buffers;
28072 }
ae4e228f 28073
fe2de317 28074@@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
15a11c5b
MT
28075 RBRQ_LEN_ERR(he_dev->rbrq_head)
28076 ? "LEN_ERR" : "",
28077 vcc->vpi, vcc->vci);
28078- atomic_inc(&vcc->stats->rx_err);
28079+ atomic_inc_unchecked(&vcc->stats->rx_err);
28080 goto return_host_buffers;
28081 }
ae4e228f 28082
fe2de317 28083@@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
15a11c5b
MT
28084 vcc->push(vcc, skb);
28085 spin_lock(&he_dev->global_lock);
ae4e228f 28086
15a11c5b
MT
28087- atomic_inc(&vcc->stats->rx);
28088+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 28089
15a11c5b
MT
28090 return_host_buffers:
28091 ++pdus_assembled;
fe2de317 28092@@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
15a11c5b
MT
28093 tpd->vcc->pop(tpd->vcc, tpd->skb);
28094 else
28095 dev_kfree_skb_any(tpd->skb);
28096- atomic_inc(&tpd->vcc->stats->tx_err);
28097+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
28098 }
28099 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
28100 return;
fe2de317 28101@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28102 vcc->pop(vcc, skb);
28103 else
28104 dev_kfree_skb_any(skb);
28105- atomic_inc(&vcc->stats->tx_err);
28106+ atomic_inc_unchecked(&vcc->stats->tx_err);
28107 return -EINVAL;
28108 }
ae4e228f 28109
fe2de317 28110@@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28111 vcc->pop(vcc, skb);
28112 else
28113 dev_kfree_skb_any(skb);
28114- atomic_inc(&vcc->stats->tx_err);
28115+ atomic_inc_unchecked(&vcc->stats->tx_err);
28116 return -EINVAL;
ae4e228f 28117 }
15a11c5b 28118 #endif
fe2de317 28119@@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28120 vcc->pop(vcc, skb);
28121 else
28122 dev_kfree_skb_any(skb);
28123- atomic_inc(&vcc->stats->tx_err);
28124+ atomic_inc_unchecked(&vcc->stats->tx_err);
28125 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28126 return -ENOMEM;
28127 }
fe2de317 28128@@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28129 vcc->pop(vcc, skb);
28130 else
28131 dev_kfree_skb_any(skb);
28132- atomic_inc(&vcc->stats->tx_err);
28133+ atomic_inc_unchecked(&vcc->stats->tx_err);
28134 spin_unlock_irqrestore(&he_dev->global_lock, flags);
28135 return -ENOMEM;
28136 }
fe2de317 28137@@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
15a11c5b
MT
28138 __enqueue_tpd(he_dev, tpd, cid);
28139 spin_unlock_irqrestore(&he_dev->global_lock, flags);
ae4e228f 28140
15a11c5b
MT
28141- atomic_inc(&vcc->stats->tx);
28142+ atomic_inc_unchecked(&vcc->stats->tx);
ae4e228f 28143
ae4e228f
MT
28144 return 0;
28145 }
fe2de317 28146diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
c6e2a6c8 28147index 75fd691..2d20b14 100644
fe2de317
MT
28148--- a/drivers/atm/horizon.c
28149+++ b/drivers/atm/horizon.c
c6e2a6c8 28150@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
15a11c5b
MT
28151 {
28152 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
28153 // VC layer stats
28154- atomic_inc(&vcc->stats->rx);
28155+ atomic_inc_unchecked(&vcc->stats->rx);
28156 __net_timestamp(skb);
28157 // end of our responsibility
28158 vcc->push (vcc, skb);
c6e2a6c8 28159@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
15a11c5b
MT
28160 dev->tx_iovec = NULL;
28161
28162 // VC layer stats
28163- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
28164+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
28165
28166 // free the skb
28167 hrz_kfree_skb (skb);
fe2de317 28168diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
4c928ab7 28169index 1c05212..c28e200 100644
fe2de317
MT
28170--- a/drivers/atm/idt77252.c
28171+++ b/drivers/atm/idt77252.c
28172@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
15a11c5b
MT
28173 else
28174 dev_kfree_skb(skb);
ae4e228f 28175
15a11c5b
MT
28176- atomic_inc(&vcc->stats->tx);
28177+ atomic_inc_unchecked(&vcc->stats->tx);
28178 }
ae4e228f 28179
15a11c5b 28180 atomic_dec(&scq->used);
fe2de317 28181@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b
MT
28182 if ((sb = dev_alloc_skb(64)) == NULL) {
28183 printk("%s: Can't allocate buffers for aal0.\n",
28184 card->name);
28185- atomic_add(i, &vcc->stats->rx_drop);
28186+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28187 break;
28188 }
28189 if (!atm_charge(vcc, sb->truesize)) {
28190 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
28191 card->name);
28192- atomic_add(i - 1, &vcc->stats->rx_drop);
28193+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
28194 dev_kfree_skb(sb);
28195 break;
28196 }
fe2de317 28197@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b
MT
28198 ATM_SKB(sb)->vcc = vcc;
28199 __net_timestamp(sb);
28200 vcc->push(vcc, sb);
28201- atomic_inc(&vcc->stats->rx);
28202+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 28203
15a11c5b
MT
28204 cell += ATM_CELL_PAYLOAD;
28205 }
fe2de317 28206@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b
MT
28207 "(CDC: %08x)\n",
28208 card->name, len, rpp->len, readl(SAR_REG_CDC));
28209 recycle_rx_pool_skb(card, rpp);
28210- atomic_inc(&vcc->stats->rx_err);
28211+ atomic_inc_unchecked(&vcc->stats->rx_err);
28212 return;
28213 }
28214 if (stat & SAR_RSQE_CRC) {
28215 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
28216 recycle_rx_pool_skb(card, rpp);
28217- atomic_inc(&vcc->stats->rx_err);
28218+ atomic_inc_unchecked(&vcc->stats->rx_err);
28219 return;
28220 }
28221 if (skb_queue_len(&rpp->queue) > 1) {
fe2de317 28222@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b
MT
28223 RXPRINTK("%s: Can't alloc RX skb.\n",
28224 card->name);
28225 recycle_rx_pool_skb(card, rpp);
28226- atomic_inc(&vcc->stats->rx_err);
28227+ atomic_inc_unchecked(&vcc->stats->rx_err);
28228 return;
28229 }
28230 if (!atm_charge(vcc, skb->truesize)) {
fe2de317 28231@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b 28232 __net_timestamp(skb);
ae4e228f 28233
15a11c5b
MT
28234 vcc->push(vcc, skb);
28235- atomic_inc(&vcc->stats->rx);
28236+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 28237
15a11c5b
MT
28238 return;
28239 }
fe2de317 28240@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
15a11c5b 28241 __net_timestamp(skb);
ae4e228f 28242
15a11c5b
MT
28243 vcc->push(vcc, skb);
28244- atomic_inc(&vcc->stats->rx);
28245+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 28246
15a11c5b
MT
28247 if (skb->truesize > SAR_FB_SIZE_3)
28248 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
fe2de317 28249@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
15a11c5b
MT
28250 if (vcc->qos.aal != ATM_AAL0) {
28251 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
28252 card->name, vpi, vci);
28253- atomic_inc(&vcc->stats->rx_drop);
28254+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28255 goto drop;
28256 }
28257
28258 if ((sb = dev_alloc_skb(64)) == NULL) {
28259 printk("%s: Can't allocate buffers for AAL0.\n",
28260 card->name);
28261- atomic_inc(&vcc->stats->rx_err);
28262+ atomic_inc_unchecked(&vcc->stats->rx_err);
28263 goto drop;
28264 }
ae4e228f 28265
fe2de317 28266@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
15a11c5b
MT
28267 ATM_SKB(sb)->vcc = vcc;
28268 __net_timestamp(sb);
28269 vcc->push(vcc, sb);
28270- atomic_inc(&vcc->stats->rx);
28271+ atomic_inc_unchecked(&vcc->stats->rx);
ae4e228f 28272
15a11c5b
MT
28273 drop:
28274 skb_pull(queue, 64);
fe2de317 28275@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
58c5fc13 28276
15a11c5b
MT
28277 if (vc == NULL) {
28278 printk("%s: NULL connection in send().\n", card->name);
58c5fc13
MT
28279- atomic_inc(&vcc->stats->tx_err);
28280+ atomic_inc_unchecked(&vcc->stats->tx_err);
15a11c5b
MT
28281 dev_kfree_skb(skb);
28282 return -EINVAL;
58c5fc13 28283 }
15a11c5b
MT
28284 if (!test_bit(VCF_TX, &vc->flags)) {
28285 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
58c5fc13
MT
28286- atomic_inc(&vcc->stats->tx_err);
28287+ atomic_inc_unchecked(&vcc->stats->tx_err);
15a11c5b
MT
28288 dev_kfree_skb(skb);
28289 return -EINVAL;
58c5fc13 28290 }
fe2de317 28291@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
15a11c5b
MT
28292 break;
28293 default:
28294 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
58c5fc13
MT
28295- atomic_inc(&vcc->stats->tx_err);
28296+ atomic_inc_unchecked(&vcc->stats->tx_err);
15a11c5b
MT
28297 dev_kfree_skb(skb);
28298 return -EINVAL;
58c5fc13 28299 }
15a11c5b
MT
28300
28301 if (skb_shinfo(skb)->nr_frags != 0) {
28302 printk("%s: No scatter-gather yet.\n", card->name);
28303- atomic_inc(&vcc->stats->tx_err);
28304+ atomic_inc_unchecked(&vcc->stats->tx_err);
28305 dev_kfree_skb(skb);
28306 return -EINVAL;
58c5fc13 28307 }
fe2de317 28308@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
15a11c5b
MT
28309
28310 err = queue_skb(card, vc, skb, oam);
28311 if (err) {
28312- atomic_inc(&vcc->stats->tx_err);
28313+ atomic_inc_unchecked(&vcc->stats->tx_err);
28314 dev_kfree_skb(skb);
28315 return err;
58c5fc13 28316 }
fe2de317 28317@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
15a11c5b
MT
28318 skb = dev_alloc_skb(64);
28319 if (!skb) {
28320 printk("%s: Out of memory in send_oam().\n", card->name);
28321- atomic_inc(&vcc->stats->tx_err);
28322+ atomic_inc_unchecked(&vcc->stats->tx_err);
28323 return -ENOMEM;
58c5fc13
MT
28324 }
28325 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
fe2de317 28326diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
c6e2a6c8 28327index d438601..8b98495 100644
fe2de317
MT
28328--- a/drivers/atm/iphase.c
28329+++ b/drivers/atm/iphase.c
c6e2a6c8 28330@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
58c5fc13
MT
28331 status = (u_short) (buf_desc_ptr->desc_mode);
28332 if (status & (RX_CER | RX_PTE | RX_OFL))
28333 {
28334- atomic_inc(&vcc->stats->rx_err);
28335+ atomic_inc_unchecked(&vcc->stats->rx_err);
28336 IF_ERR(printk("IA: bad packet, dropping it");)
28337 if (status & RX_CER) {
28338 IF_ERR(printk(" cause: packet CRC error\n");)
c6e2a6c8 28339@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
58c5fc13
MT
28340 len = dma_addr - buf_addr;
28341 if (len > iadev->rx_buf_sz) {
28342 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
28343- atomic_inc(&vcc->stats->rx_err);
28344+ atomic_inc_unchecked(&vcc->stats->rx_err);
28345 goto out_free_desc;
28346 }
28347
c6e2a6c8 28348@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
58c5fc13
MT
28349 ia_vcc = INPH_IA_VCC(vcc);
28350 if (ia_vcc == NULL)
28351 {
28352- atomic_inc(&vcc->stats->rx_err);
28353+ atomic_inc_unchecked(&vcc->stats->rx_err);
5e856224 28354 atm_return(vcc, skb->truesize);
58c5fc13 28355 dev_kfree_skb_any(skb);
58c5fc13 28356 goto INCR_DLE;
c6e2a6c8 28357@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
58c5fc13
MT
28358 if ((length > iadev->rx_buf_sz) || (length >
28359 (skb->len - sizeof(struct cpcs_trailer))))
28360 {
28361- atomic_inc(&vcc->stats->rx_err);
28362+ atomic_inc_unchecked(&vcc->stats->rx_err);
28363 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
28364 length, skb->len);)
5e856224 28365 atm_return(vcc, skb->truesize);
c6e2a6c8 28366@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
58c5fc13
MT
28367
28368 IF_RX(printk("rx_dle_intr: skb push");)
28369 vcc->push(vcc,skb);
28370- atomic_inc(&vcc->stats->rx);
28371+ atomic_inc_unchecked(&vcc->stats->rx);
28372 iadev->rx_pkt_cnt++;
28373 }
28374 INCR_DLE:
c6e2a6c8 28375@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
58c5fc13
MT
28376 {
28377 struct k_sonet_stats *stats;
28378 stats = &PRIV(_ia_dev[board])->sonet_stats;
28379- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
28380- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
28381- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
28382- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
28383- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
28384- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
28385- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
28386- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
28387- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
28388+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
28389+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
28390+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
28391+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
28392+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
28393+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
28394+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
28395+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
28396+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
28397 }
28398 ia_cmds.status = 0;
28399 break;
c6e2a6c8 28400@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
58c5fc13
MT
28401 if ((desc == 0) || (desc > iadev->num_tx_desc))
28402 {
28403 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
28404- atomic_inc(&vcc->stats->tx);
28405+ atomic_inc_unchecked(&vcc->stats->tx);
28406 if (vcc->pop)
28407 vcc->pop(vcc, skb);
28408 else
c6e2a6c8 28409@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
58c5fc13
MT
28410 ATM_DESC(skb) = vcc->vci;
28411 skb_queue_tail(&iadev->tx_dma_q, skb);
28412
28413- atomic_inc(&vcc->stats->tx);
28414+ atomic_inc_unchecked(&vcc->stats->tx);
28415 iadev->tx_pkt_cnt++;
28416 /* Increment transaction counter */
28417 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
28418
28419 #if 0
28420 /* add flow control logic */
28421- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
28422+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
28423 if (iavcc->vc_desc_cnt > 10) {
28424 vcc->tx_quota = vcc->tx_quota * 3 / 4;
28425 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
fe2de317 28426diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
c6e2a6c8 28427index 68c7588..7036683 100644
fe2de317
MT
28428--- a/drivers/atm/lanai.c
28429+++ b/drivers/atm/lanai.c
28430@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
58c5fc13
MT
28431 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
28432 lanai_endtx(lanai, lvcc);
28433 lanai_free_skb(lvcc->tx.atmvcc, skb);
28434- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
28435+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
28436 }
28437
28438 /* Try to fill the buffer - don't call unless there is backlog */
fe2de317 28439@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
58c5fc13
MT
28440 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
28441 __net_timestamp(skb);
28442 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
28443- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
28444+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
28445 out:
28446 lvcc->rx.buf.ptr = end;
28447 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
4c928ab7 28448@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
58c5fc13
MT
28449 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
28450 "vcc %d\n", lanai->number, (unsigned int) s, vci);
28451 lanai->stats.service_rxnotaal5++;
28452- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28453+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28454 return 0;
28455 }
28456 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
4c928ab7 28457@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
58c5fc13
MT
28458 int bytes;
28459 read_unlock(&vcc_sklist_lock);
28460 DPRINTK("got trashed rx pdu on vci %d\n", vci);
28461- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28462+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28463 lvcc->stats.x.aal5.service_trash++;
28464 bytes = (SERVICE_GET_END(s) * 16) -
28465 (((unsigned long) lvcc->rx.buf.ptr) -
4c928ab7 28466@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
58c5fc13
MT
28467 }
28468 if (s & SERVICE_STREAM) {
28469 read_unlock(&vcc_sklist_lock);
28470- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28471+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28472 lvcc->stats.x.aal5.service_stream++;
28473 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
28474 "PDU on VCI %d!\n", lanai->number, vci);
4c928ab7 28475@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
58c5fc13
MT
28476 return 0;
28477 }
28478 DPRINTK("got rx crc error on vci %d\n", vci);
28479- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
28480+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
28481 lvcc->stats.x.aal5.service_rxcrc++;
28482 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
28483 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
fe2de317
MT
28484diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
28485index 1c70c45..300718d 100644
28486--- a/drivers/atm/nicstar.c
28487+++ b/drivers/atm/nicstar.c
28488@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
6892158b
MT
28489 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
28490 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
28491 card->index);
28492- atomic_inc(&vcc->stats->tx_err);
28493+ atomic_inc_unchecked(&vcc->stats->tx_err);
28494 dev_kfree_skb_any(skb);
28495 return -EINVAL;
28496 }
fe2de317 28497@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
6892158b
MT
28498 if (!vc->tx) {
28499 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
28500 card->index);
28501- atomic_inc(&vcc->stats->tx_err);
28502+ atomic_inc_unchecked(&vcc->stats->tx_err);
28503 dev_kfree_skb_any(skb);
28504 return -EINVAL;
28505 }
fe2de317 28506@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
6892158b
MT
28507 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
28508 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
28509 card->index);
28510- atomic_inc(&vcc->stats->tx_err);
28511+ atomic_inc_unchecked(&vcc->stats->tx_err);
28512 dev_kfree_skb_any(skb);
28513 return -EINVAL;
28514 }
58c5fc13 28515
6892158b
MT
28516 if (skb_shinfo(skb)->nr_frags != 0) {
28517 printk("nicstar%d: No scatter-gather yet.\n", card->index);
28518- atomic_inc(&vcc->stats->tx_err);
28519+ atomic_inc_unchecked(&vcc->stats->tx_err);
28520 dev_kfree_skb_any(skb);
28521 return -EINVAL;
28522 }
fe2de317 28523@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
6892158b
MT
28524 }
28525
28526 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
28527- atomic_inc(&vcc->stats->tx_err);
28528+ atomic_inc_unchecked(&vcc->stats->tx_err);
28529 dev_kfree_skb_any(skb);
28530 return -EIO;
28531 }
28532- atomic_inc(&vcc->stats->tx);
28533+ atomic_inc_unchecked(&vcc->stats->tx);
28534
28535 return 0;
28536 }
fe2de317 28537@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28538 printk
28539 ("nicstar%d: Can't allocate buffers for aal0.\n",
28540 card->index);
28541- atomic_add(i, &vcc->stats->rx_drop);
28542+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
28543 break;
28544 }
28545 if (!atm_charge(vcc, sb->truesize)) {
28546 RXPRINTK
28547 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
28548 card->index);
28549- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28550+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
28551 dev_kfree_skb_any(sb);
28552 break;
28553 }
fe2de317 28554@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28555 ATM_SKB(sb)->vcc = vcc;
28556 __net_timestamp(sb);
28557 vcc->push(vcc, sb);
28558- atomic_inc(&vcc->stats->rx);
28559+ atomic_inc_unchecked(&vcc->stats->rx);
28560 cell += ATM_CELL_PAYLOAD;
28561 }
28562
fe2de317 28563@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28564 if (iovb == NULL) {
28565 printk("nicstar%d: Out of iovec buffers.\n",
28566 card->index);
28567- atomic_inc(&vcc->stats->rx_drop);
28568+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28569 recycle_rx_buf(card, skb);
28570 return;
28571 }
fe2de317 28572@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28573 small or large buffer itself. */
28574 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
28575 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
28576- atomic_inc(&vcc->stats->rx_err);
28577+ atomic_inc_unchecked(&vcc->stats->rx_err);
28578 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28579 NS_MAX_IOVECS);
28580 NS_PRV_IOVCNT(iovb) = 0;
fe2de317 28581@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28582 ("nicstar%d: Expected a small buffer, and this is not one.\n",
28583 card->index);
28584 which_list(card, skb);
28585- atomic_inc(&vcc->stats->rx_err);
28586+ atomic_inc_unchecked(&vcc->stats->rx_err);
28587 recycle_rx_buf(card, skb);
28588 vc->rx_iov = NULL;
28589 recycle_iov_buf(card, iovb);
fe2de317 28590@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28591 ("nicstar%d: Expected a large buffer, and this is not one.\n",
28592 card->index);
28593 which_list(card, skb);
28594- atomic_inc(&vcc->stats->rx_err);
28595+ atomic_inc_unchecked(&vcc->stats->rx_err);
28596 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28597 NS_PRV_IOVCNT(iovb));
28598 vc->rx_iov = NULL;
fe2de317 28599@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28600 printk(" - PDU size mismatch.\n");
28601 else
28602 printk(".\n");
28603- atomic_inc(&vcc->stats->rx_err);
28604+ atomic_inc_unchecked(&vcc->stats->rx_err);
28605 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
28606 NS_PRV_IOVCNT(iovb));
28607 vc->rx_iov = NULL;
fe2de317 28608@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28609 /* skb points to a small buffer */
28610 if (!atm_charge(vcc, skb->truesize)) {
28611 push_rxbufs(card, skb);
28612- atomic_inc(&vcc->stats->rx_drop);
28613+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28614 } else {
28615 skb_put(skb, len);
28616 dequeue_sm_buf(card, skb);
fe2de317 28617@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28618 ATM_SKB(skb)->vcc = vcc;
28619 __net_timestamp(skb);
28620 vcc->push(vcc, skb);
28621- atomic_inc(&vcc->stats->rx);
28622+ atomic_inc_unchecked(&vcc->stats->rx);
28623 }
28624 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
28625 struct sk_buff *sb;
fe2de317 28626@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28627 if (len <= NS_SMBUFSIZE) {
28628 if (!atm_charge(vcc, sb->truesize)) {
28629 push_rxbufs(card, sb);
28630- atomic_inc(&vcc->stats->rx_drop);
28631+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28632 } else {
28633 skb_put(sb, len);
28634 dequeue_sm_buf(card, sb);
fe2de317 28635@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28636 ATM_SKB(sb)->vcc = vcc;
28637 __net_timestamp(sb);
28638 vcc->push(vcc, sb);
28639- atomic_inc(&vcc->stats->rx);
28640+ atomic_inc_unchecked(&vcc->stats->rx);
28641 }
28642
28643 push_rxbufs(card, skb);
fe2de317 28644@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28645
28646 if (!atm_charge(vcc, skb->truesize)) {
28647 push_rxbufs(card, skb);
28648- atomic_inc(&vcc->stats->rx_drop);
28649+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28650 } else {
28651 dequeue_lg_buf(card, skb);
28652 #ifdef NS_USE_DESTRUCTORS
fe2de317 28653@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28654 ATM_SKB(skb)->vcc = vcc;
28655 __net_timestamp(skb);
28656 vcc->push(vcc, skb);
28657- atomic_inc(&vcc->stats->rx);
28658+ atomic_inc_unchecked(&vcc->stats->rx);
28659 }
28660
28661 push_rxbufs(card, sb);
fe2de317 28662@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28663 printk
28664 ("nicstar%d: Out of huge buffers.\n",
28665 card->index);
28666- atomic_inc(&vcc->stats->rx_drop);
28667+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28668 recycle_iovec_rx_bufs(card,
28669 (struct iovec *)
28670 iovb->data,
fe2de317 28671@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
6892158b
MT
28672 card->hbpool.count++;
28673 } else
28674 dev_kfree_skb_any(hb);
28675- atomic_inc(&vcc->stats->rx_drop);
28676+ atomic_inc_unchecked(&vcc->stats->rx_drop);
28677 } else {
28678 /* Copy the small buffer to the huge buffer */
28679 sb = (struct sk_buff *)iov->iov_base;
fe2de317 28680@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
58c5fc13 28681 #endif /* NS_USE_DESTRUCTORS */
6892158b
MT
28682 __net_timestamp(hb);
28683 vcc->push(vcc, hb);
28684- atomic_inc(&vcc->stats->rx);
28685+ atomic_inc_unchecked(&vcc->stats->rx);
28686 }
28687 }
58c5fc13 28688
fe2de317 28689diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
c6e2a6c8 28690index 9851093..adb2b1e 100644
fe2de317
MT
28691--- a/drivers/atm/solos-pci.c
28692+++ b/drivers/atm/solos-pci.c
15a11c5b 28693@@ -714,7 +714,7 @@ void solos_bh(unsigned long card_arg)
58c5fc13
MT
28694 }
28695 atm_charge(vcc, skb->truesize);
28696 vcc->push(vcc, skb);
28697- atomic_inc(&vcc->stats->rx);
28698+ atomic_inc_unchecked(&vcc->stats->rx);
28699 break;
28700
28701 case PKT_STATUS:
c6e2a6c8 28702@@ -1009,7 +1009,7 @@ static uint32_t fpga_tx(struct solos_card *card)
58c5fc13
MT
28703 vcc = SKB_CB(oldskb)->vcc;
28704
28705 if (vcc) {
28706- atomic_inc(&vcc->stats->tx);
28707+ atomic_inc_unchecked(&vcc->stats->tx);
28708 solos_pop(vcc, oldskb);
28709 } else
28710 dev_kfree_skb_irq(oldskb);
fe2de317 28711diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
c6e2a6c8 28712index 0215934..ce9f5b1 100644
fe2de317
MT
28713--- a/drivers/atm/suni.c
28714+++ b/drivers/atm/suni.c
c6e2a6c8 28715@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
58c5fc13
MT
28716
28717
28718 #define ADD_LIMITED(s,v) \
28719- atomic_add((v),&stats->s); \
28720- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
28721+ atomic_add_unchecked((v),&stats->s); \
28722+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
28723
28724
28725 static void suni_hz(unsigned long from_timer)
fe2de317
MT
28726diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
28727index 5120a96..e2572bd 100644
28728--- a/drivers/atm/uPD98402.c
28729+++ b/drivers/atm/uPD98402.c
28730@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
58c5fc13
MT
28731 struct sonet_stats tmp;
28732 int error = 0;
28733
28734- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28735+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
28736 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
28737 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
28738 if (zero && !error) {
fe2de317 28739@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
58c5fc13
MT
28740
28741
28742 #define ADD_LIMITED(s,v) \
28743- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
28744- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
28745- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28746+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
28747+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
28748+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
28749
28750
28751 static void stat_event(struct atm_dev *dev)
fe2de317 28752@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
58c5fc13
MT
28753 if (reason & uPD98402_INT_PFM) stat_event(dev);
28754 if (reason & uPD98402_INT_PCO) {
28755 (void) GET(PCOCR); /* clear interrupt cause */
28756- atomic_add(GET(HECCT),
28757+ atomic_add_unchecked(GET(HECCT),
28758 &PRIV(dev)->sonet_stats.uncorr_hcs);
28759 }
28760 if ((reason & uPD98402_INT_RFO) &&
fe2de317 28761@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
58c5fc13
MT
28762 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
28763 uPD98402_INT_LOS),PIMR); /* enable them */
28764 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
28765- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28766- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
28767- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
28768+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
28769+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
28770+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
28771 return 0;
28772 }
28773
fe2de317 28774diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
c6e2a6c8 28775index abe4e20..83c4727 100644
fe2de317
MT
28776--- a/drivers/atm/zatm.c
28777+++ b/drivers/atm/zatm.c
c6e2a6c8 28778@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
58c5fc13
MT
28779 }
28780 if (!size) {
28781 dev_kfree_skb_irq(skb);
28782- if (vcc) atomic_inc(&vcc->stats->rx_err);
28783+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
28784 continue;
28785 }
28786 if (!atm_charge(vcc,skb->truesize)) {
c6e2a6c8 28787@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
58c5fc13
MT
28788 skb->len = size;
28789 ATM_SKB(skb)->vcc = vcc;
28790 vcc->push(vcc,skb);
28791- atomic_inc(&vcc->stats->rx);
28792+ atomic_inc_unchecked(&vcc->stats->rx);
28793 }
28794 zout(pos & 0xffff,MTA(mbx));
28795 #if 0 /* probably a stupid idea */
c6e2a6c8 28796@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
58c5fc13
MT
28797 skb_queue_head(&zatm_vcc->backlog,skb);
28798 break;
28799 }
28800- atomic_inc(&vcc->stats->tx);
28801+ atomic_inc_unchecked(&vcc->stats->tx);
28802 wake_up(&zatm_vcc->tx_wait);
28803 }
28804
fe2de317 28805diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
5e856224 28806index 8493536..31adee0 100644
fe2de317
MT
28807--- a/drivers/base/devtmpfs.c
28808+++ b/drivers/base/devtmpfs.c
6e9df6a3
MT
28809@@ -368,7 +368,7 @@ int devtmpfs_mount(const char *mntdir)
28810 if (!thread)
28811 return 0;
28812
28813- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
28814+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
28815 if (err)
28816 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
28817 else
c6e2a6c8
MT
28818diff --git a/drivers/base/node.c b/drivers/base/node.c
28819index 90aa2a1..af1a177 100644
28820--- a/drivers/base/node.c
28821+++ b/drivers/base/node.c
28822@@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
28823 {
28824 int n;
28825
28826- n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]);
28827- if (n > 0 && PAGE_SIZE > n + 1) {
28828- *(buf + n++) = '\n';
28829- *(buf + n++) = '\0';
28830- }
28831+ n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
28832+ buf[n++] = '\n';
28833+ buf[n] = '\0';
28834 return n;
28835 }
28836
fe2de317 28837diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
c6e2a6c8 28838index 2a3e581..3d6a73f 100644
fe2de317
MT
28839--- a/drivers/base/power/wakeup.c
28840+++ b/drivers/base/power/wakeup.c
4c928ab7 28841@@ -30,14 +30,14 @@ bool events_check_enabled;
66a7e928
MT
28842 * They need to be modified together atomically, so it's better to use one
28843 * atomic variable to hold them both.
28844 */
28845-static atomic_t combined_event_count = ATOMIC_INIT(0);
28846+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
8308f9c9 28847
66a7e928
MT
28848 #define IN_PROGRESS_BITS (sizeof(int) * 4)
28849 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
28850
28851 static void split_counters(unsigned int *cnt, unsigned int *inpr)
28852 {
28853- unsigned int comb = atomic_read(&combined_event_count);
28854+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
28855
28856 *cnt = (comb >> IN_PROGRESS_BITS);
28857 *inpr = comb & MAX_IN_PROGRESS;
c6e2a6c8 28858@@ -379,7 +379,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
66a7e928
MT
28859 ws->last_time = ktime_get();
28860
28861 /* Increment the counter of events in progress. */
28862- atomic_inc(&combined_event_count);
28863+ atomic_inc_unchecked(&combined_event_count);
8308f9c9
MT
28864 }
28865
66a7e928 28866 /**
c6e2a6c8 28867@@ -475,7 +475,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
66a7e928
MT
28868 * Increment the counter of registered wakeup events and decrement the
28869 * couter of wakeup events in progress simultaneously.
28870 */
28871- atomic_add(MAX_IN_PROGRESS, &combined_event_count);
28872+ atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
28873 }
8308f9c9 28874
66a7e928 28875 /**
fe2de317 28876diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
4c928ab7 28877index b0f553b..77b928b 100644
fe2de317
MT
28878--- a/drivers/block/cciss.c
28879+++ b/drivers/block/cciss.c
4c928ab7 28880@@ -1198,6 +1198,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
bc901d79
MT
28881 int err;
28882 u32 cp;
28883
28884+ memset(&arg64, 0, sizeof(arg64));
28885+
28886 err = 0;
28887 err |=
28888 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4c928ab7 28889@@ -3007,7 +3009,7 @@ static void start_io(ctlr_info_t *h)
15a11c5b
MT
28890 while (!list_empty(&h->reqQ)) {
28891 c = list_entry(h->reqQ.next, CommandList_struct, list);
28892 /* can't do anything if fifo is full */
28893- if ((h->access.fifo_full(h))) {
28894+ if ((h->access->fifo_full(h))) {
28895 dev_warn(&h->pdev->dev, "fifo full\n");
28896 break;
28897 }
4c928ab7 28898@@ -3017,7 +3019,7 @@ static void start_io(ctlr_info_t *h)
15a11c5b 28899 h->Qdepth--;
66a7e928 28900
15a11c5b
MT
28901 /* Tell the controller execute command */
28902- h->access.submit_command(h, c);
28903+ h->access->submit_command(h, c);
66a7e928 28904
15a11c5b
MT
28905 /* Put job onto the completed Q */
28906 addQ(&h->cmpQ, c);
4c928ab7 28907@@ -3443,17 +3445,17 @@ startio:
66a7e928 28908
15a11c5b
MT
28909 static inline unsigned long get_next_completion(ctlr_info_t *h)
28910 {
28911- return h->access.command_completed(h);
28912+ return h->access->command_completed(h);
28913 }
28914
28915 static inline int interrupt_pending(ctlr_info_t *h)
28916 {
28917- return h->access.intr_pending(h);
28918+ return h->access->intr_pending(h);
28919 }
28920
28921 static inline long interrupt_not_for_us(ctlr_info_t *h)
28922 {
28923- return ((h->access.intr_pending(h) == 0) ||
28924+ return ((h->access->intr_pending(h) == 0) ||
28925 (h->interrupts_enabled == 0));
28926 }
28927
4c928ab7 28928@@ -3486,7 +3488,7 @@ static inline u32 next_command(ctlr_info_t *h)
15a11c5b
MT
28929 u32 a;
28930
28931 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
28932- return h->access.command_completed(h);
28933+ return h->access->command_completed(h);
28934
28935 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
28936 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
4c928ab7 28937@@ -4044,7 +4046,7 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
15a11c5b
MT
28938 trans_support & CFGTBL_Trans_use_short_tags);
28939
28940 /* Change the access methods to the performant access methods */
28941- h->access = SA5_performant_access;
28942+ h->access = &SA5_performant_access;
28943 h->transMethod = CFGTBL_Trans_Performant;
28944
28945 return;
4c928ab7 28946@@ -4316,7 +4318,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
15a11c5b
MT
28947 if (prod_index < 0)
28948 return -ENODEV;
28949 h->product_name = products[prod_index].product_name;
28950- h->access = *(products[prod_index].access);
28951+ h->access = products[prod_index].access;
28952
28953 if (cciss_board_disabled(h)) {
28954 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
4c928ab7 28955@@ -5041,7 +5043,7 @@ reinit_after_soft_reset:
15a11c5b
MT
28956 }
28957
28958 /* make sure the board interrupts are off */
28959- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28960+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28961 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
28962 if (rc)
28963 goto clean2;
4c928ab7 28964@@ -5093,7 +5095,7 @@ reinit_after_soft_reset:
15a11c5b
MT
28965 * fake ones to scoop up any residual completions.
28966 */
28967 spin_lock_irqsave(&h->lock, flags);
28968- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28969+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28970 spin_unlock_irqrestore(&h->lock, flags);
4c928ab7 28971 free_irq(h->intr[h->intr_mode], h);
15a11c5b 28972 rc = cciss_request_irq(h, cciss_msix_discard_completions,
4c928ab7 28973@@ -5113,9 +5115,9 @@ reinit_after_soft_reset:
15a11c5b
MT
28974 dev_info(&h->pdev->dev, "Board READY.\n");
28975 dev_info(&h->pdev->dev,
28976 "Waiting for stale completions to drain.\n");
28977- h->access.set_intr_mask(h, CCISS_INTR_ON);
28978+ h->access->set_intr_mask(h, CCISS_INTR_ON);
28979 msleep(10000);
28980- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28981+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
28982
28983 rc = controller_reset_failed(h->cfgtable);
28984 if (rc)
4c928ab7 28985@@ -5138,7 +5140,7 @@ reinit_after_soft_reset:
15a11c5b
MT
28986 cciss_scsi_setup(h);
28987
28988 /* Turn the interrupts on so we can service requests */
28989- h->access.set_intr_mask(h, CCISS_INTR_ON);
28990+ h->access->set_intr_mask(h, CCISS_INTR_ON);
28991
28992 /* Get the firmware version */
28993 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
4c928ab7 28994@@ -5211,7 +5213,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
15a11c5b
MT
28995 kfree(flush_buf);
28996 if (return_code != IO_OK)
28997 dev_warn(&h->pdev->dev, "Error flushing cache\n");
28998- h->access.set_intr_mask(h, CCISS_INTR_OFF);
28999+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
4c928ab7 29000 free_irq(h->intr[h->intr_mode], h);
15a11c5b
MT
29001 }
29002
fe2de317 29003diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
4c928ab7 29004index 7fda30e..eb5dfe0 100644
fe2de317
MT
29005--- a/drivers/block/cciss.h
29006+++ b/drivers/block/cciss.h
4c928ab7 29007@@ -101,7 +101,7 @@ struct ctlr_info
15a11c5b
MT
29008 /* information about each logical volume */
29009 drive_info_struct *drv[CISS_MAX_LUN];
29010
29011- struct access_method access;
29012+ struct access_method *access;
29013
29014 /* queue and queue Info */
29015 struct list_head reqQ;
fe2de317 29016diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
4c928ab7 29017index 9125bbe..eede5c8 100644
fe2de317
MT
29018--- a/drivers/block/cpqarray.c
29019+++ b/drivers/block/cpqarray.c
29020@@ -404,7 +404,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
15a11c5b
MT
29021 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
29022 goto Enomem4;
29023 }
29024- hba[i]->access.set_intr_mask(hba[i], 0);
29025+ hba[i]->access->set_intr_mask(hba[i], 0);
29026 if (request_irq(hba[i]->intr, do_ida_intr,
29027 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
29028 {
fe2de317 29029@@ -459,7 +459,7 @@ static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
15a11c5b
MT
29030 add_timer(&hba[i]->timer);
29031
29032 /* Enable IRQ now that spinlock and rate limit timer are set up */
29033- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29034+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
29035
29036 for(j=0; j<NWD; j++) {
29037 struct gendisk *disk = ida_gendisk[i][j];
29038@@ -694,7 +694,7 @@ DBGINFO(
29039 for(i=0; i<NR_PRODUCTS; i++) {
29040 if (board_id == products[i].board_id) {
29041 c->product_name = products[i].product_name;
29042- c->access = *(products[i].access);
29043+ c->access = products[i].access;
29044 break;
29045 }
29046 }
fe2de317 29047@@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detect(void)
15a11c5b
MT
29048 hba[ctlr]->intr = intr;
29049 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
29050 hba[ctlr]->product_name = products[j].product_name;
29051- hba[ctlr]->access = *(products[j].access);
29052+ hba[ctlr]->access = products[j].access;
29053 hba[ctlr]->ctlr = ctlr;
29054 hba[ctlr]->board_id = board_id;
29055 hba[ctlr]->pci_dev = NULL; /* not PCI */
4c928ab7 29056@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
15a11c5b
MT
29057
29058 while((c = h->reqQ) != NULL) {
29059 /* Can't do anything if we're busy */
29060- if (h->access.fifo_full(h) == 0)
29061+ if (h->access->fifo_full(h) == 0)
29062 return;
66a7e928 29063
15a11c5b 29064 /* Get the first entry from the request Q */
4c928ab7 29065@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
15a11c5b
MT
29066 h->Qdepth--;
29067
29068 /* Tell the controller to do our bidding */
29069- h->access.submit_command(h, c);
29070+ h->access->submit_command(h, c);
29071
29072 /* Get onto the completion Q */
29073 addQ(&h->cmpQ, c);
4c928ab7 29074@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
15a11c5b
MT
29075 unsigned long flags;
29076 __u32 a,a1;
29077
29078- istat = h->access.intr_pending(h);
29079+ istat = h->access->intr_pending(h);
29080 /* Is this interrupt for us? */
29081 if (istat == 0)
29082 return IRQ_NONE;
4c928ab7 29083@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
15a11c5b
MT
29084 */
29085 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
29086 if (istat & FIFO_NOT_EMPTY) {
29087- while((a = h->access.command_completed(h))) {
29088+ while((a = h->access->command_completed(h))) {
29089 a1 = a; a &= ~3;
29090 if ((c = h->cmpQ) == NULL)
29091 {
4c928ab7 29092@@ -1449,11 +1449,11 @@ static int sendcmd(
15a11c5b
MT
29093 /*
29094 * Disable interrupt
29095 */
29096- info_p->access.set_intr_mask(info_p, 0);
29097+ info_p->access->set_intr_mask(info_p, 0);
29098 /* Make sure there is room in the command FIFO */
29099 /* Actually it should be completely empty at this time. */
29100 for (i = 200000; i > 0; i--) {
29101- temp = info_p->access.fifo_full(info_p);
29102+ temp = info_p->access->fifo_full(info_p);
29103 if (temp != 0) {
29104 break;
29105 }
4c928ab7 29106@@ -1466,7 +1466,7 @@ DBG(
15a11c5b
MT
29107 /*
29108 * Send the cmd
29109 */
29110- info_p->access.submit_command(info_p, c);
29111+ info_p->access->submit_command(info_p, c);
29112 complete = pollcomplete(ctlr);
29113
29114 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
4c928ab7 29115@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
15a11c5b
MT
29116 * we check the new geometry. Then turn interrupts back on when
29117 * we're done.
29118 */
29119- host->access.set_intr_mask(host, 0);
29120+ host->access->set_intr_mask(host, 0);
29121 getgeometry(ctlr);
29122- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
29123+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
29124
29125 for(i=0; i<NWD; i++) {
29126 struct gendisk *disk = ida_gendisk[ctlr][i];
4c928ab7 29127@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
15a11c5b
MT
29128 /* Wait (up to 2 seconds) for a command to complete */
29129
29130 for (i = 200000; i > 0; i--) {
29131- done = hba[ctlr]->access.command_completed(hba[ctlr]);
29132+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
29133 if (done == 0) {
29134 udelay(10); /* a short fixed delay */
29135 } else
fe2de317
MT
29136diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
29137index be73e9d..7fbf140 100644
29138--- a/drivers/block/cpqarray.h
29139+++ b/drivers/block/cpqarray.h
15a11c5b
MT
29140@@ -99,7 +99,7 @@ struct ctlr_info {
29141 drv_info_t drv[NWD];
29142 struct proc_dir_entry *proc;
29143
29144- struct access_method access;
29145+ struct access_method *access;
29146
29147 cmdlist_t *reqQ;
29148 cmdlist_t *cmpQ;
fe2de317 29149diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
5e856224 29150index 8d68056..e67050f 100644
fe2de317
MT
29151--- a/drivers/block/drbd/drbd_int.h
29152+++ b/drivers/block/drbd/drbd_int.h
4c928ab7 29153@@ -736,7 +736,7 @@ struct drbd_request;
8308f9c9
MT
29154 struct drbd_epoch {
29155 struct list_head list;
29156 unsigned int barrier_nr;
29157- atomic_t epoch_size; /* increased on every request added. */
29158+ atomic_unchecked_t epoch_size; /* increased on every request added. */
29159 atomic_t active; /* increased on every req. added, and dec on every finished. */
29160 unsigned long flags;
29161 };
4c928ab7 29162@@ -1108,7 +1108,7 @@ struct drbd_conf {
8308f9c9
MT
29163 void *int_dig_in;
29164 void *int_dig_vv;
29165 wait_queue_head_t seq_wait;
29166- atomic_t packet_seq;
29167+ atomic_unchecked_t packet_seq;
29168 unsigned int peer_seq;
29169 spinlock_t peer_seq_lock;
29170 unsigned int minor;
4c928ab7 29171@@ -1617,30 +1617,30 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
6e9df6a3
MT
29172
29173 static inline void drbd_tcp_cork(struct socket *sock)
29174 {
29175- int __user val = 1;
29176+ int val = 1;
29177 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29178- (char __user *)&val, sizeof(val));
29179+ (char __force_user *)&val, sizeof(val));
29180 }
29181
29182 static inline void drbd_tcp_uncork(struct socket *sock)
29183 {
29184- int __user val = 0;
29185+ int val = 0;
29186 (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK,
29187- (char __user *)&val, sizeof(val));
29188+ (char __force_user *)&val, sizeof(val));
29189 }
29190
29191 static inline void drbd_tcp_nodelay(struct socket *sock)
29192 {
29193- int __user val = 1;
29194+ int val = 1;
29195 (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY,
29196- (char __user *)&val, sizeof(val));
29197+ (char __force_user *)&val, sizeof(val));
29198 }
29199
29200 static inline void drbd_tcp_quickack(struct socket *sock)
29201 {
29202- int __user val = 2;
29203+ int val = 2;
29204 (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
29205- (char __user *)&val, sizeof(val));
29206+ (char __force_user *)&val, sizeof(val));
29207 }
29208
29209 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
fe2de317 29210diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
5e856224 29211index 211fc44..c5116f1 100644
fe2de317
MT
29212--- a/drivers/block/drbd/drbd_main.c
29213+++ b/drivers/block/drbd/drbd_main.c
29214@@ -2397,7 +2397,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
8308f9c9
MT
29215 p.sector = sector;
29216 p.block_id = block_id;
29217 p.blksize = blksize;
29218- p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
29219+ p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
29220
29221 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
66a7e928 29222 return false;
fe2de317 29223@@ -2696,7 +2696,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
8308f9c9
MT
29224 p.sector = cpu_to_be64(req->sector);
29225 p.block_id = (unsigned long)req;
29226 p.seq_num = cpu_to_be32(req->seq_num =
29227- atomic_add_return(1, &mdev->packet_seq));
29228+ atomic_add_return_unchecked(1, &mdev->packet_seq));
29229
29230 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
29231
fe2de317 29232@@ -2981,7 +2981,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
8308f9c9
MT
29233 atomic_set(&mdev->unacked_cnt, 0);
29234 atomic_set(&mdev->local_cnt, 0);
29235 atomic_set(&mdev->net_cnt, 0);
29236- atomic_set(&mdev->packet_seq, 0);
29237+ atomic_set_unchecked(&mdev->packet_seq, 0);
29238 atomic_set(&mdev->pp_in_use, 0);
29239 atomic_set(&mdev->pp_in_use_by_net, 0);
29240 atomic_set(&mdev->rs_sect_in, 0);
fe2de317 29241@@ -3063,8 +3063,8 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
8308f9c9
MT
29242 mdev->receiver.t_state);
29243
29244 /* no need to lock it, I'm the only thread alive */
29245- if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
29246- dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
29247+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
29248+ dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
29249 mdev->al_writ_cnt =
29250 mdev->bm_writ_cnt =
29251 mdev->read_cnt =
fe2de317 29252diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
c6e2a6c8 29253index 946166e..356b39a 100644
fe2de317
MT
29254--- a/drivers/block/drbd/drbd_nl.c
29255+++ b/drivers/block/drbd/drbd_nl.c
29256@@ -2359,7 +2359,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
8308f9c9
MT
29257 module_put(THIS_MODULE);
29258 }
29259
29260-static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29261+static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
29262
29263 static unsigned short *
29264 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
fe2de317 29265@@ -2430,7 +2430,7 @@ void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state)
8308f9c9
MT
29266 cn_reply->id.idx = CN_IDX_DRBD;
29267 cn_reply->id.val = CN_VAL_DRBD;
29268
29269- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29270+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29271 cn_reply->ack = 0; /* not used here. */
29272 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29273 (int)((char *)tl - (char *)reply->tag_list);
fe2de317 29274@@ -2462,7 +2462,7 @@ void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name)
8308f9c9
MT
29275 cn_reply->id.idx = CN_IDX_DRBD;
29276 cn_reply->id.val = CN_VAL_DRBD;
29277
29278- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29279+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29280 cn_reply->ack = 0; /* not used here. */
29281 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29282 (int)((char *)tl - (char *)reply->tag_list);
fe2de317 29283@@ -2540,7 +2540,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
8308f9c9
MT
29284 cn_reply->id.idx = CN_IDX_DRBD;
29285 cn_reply->id.val = CN_VAL_DRBD;
29286
29287- cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
29288+ cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
29289 cn_reply->ack = 0; // not used here.
29290 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29291 (int)((char*)tl - (char*)reply->tag_list);
fe2de317 29292@@ -2579,7 +2579,7 @@ void drbd_bcast_sync_progress(struct drbd_conf *mdev)
8308f9c9
MT
29293 cn_reply->id.idx = CN_IDX_DRBD;
29294 cn_reply->id.val = CN_VAL_DRBD;
29295
29296- cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
29297+ cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
29298 cn_reply->ack = 0; /* not used here. */
29299 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
29300 (int)((char *)tl - (char *)reply->tag_list);
fe2de317
MT
29301diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
29302index 43beaca..4a5b1dd 100644
29303--- a/drivers/block/drbd/drbd_receiver.c
29304+++ b/drivers/block/drbd/drbd_receiver.c
66a7e928 29305@@ -894,7 +894,7 @@ retry:
8308f9c9
MT
29306 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
29307 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
29308
29309- atomic_set(&mdev->packet_seq, 0);
29310+ atomic_set_unchecked(&mdev->packet_seq, 0);
29311 mdev->peer_seq = 0;
29312
29313 drbd_thread_start(&mdev->asender);
fe2de317 29314@@ -985,7 +985,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
8308f9c9
MT
29315 do {
29316 next_epoch = NULL;
29317
29318- epoch_size = atomic_read(&epoch->epoch_size);
29319+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
29320
29321 switch (ev & ~EV_CLEANUP) {
29322 case EV_PUT:
fe2de317 29323@@ -1020,7 +1020,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
8308f9c9
MT
29324 rv = FE_DESTROYED;
29325 } else {
29326 epoch->flags = 0;
29327- atomic_set(&epoch->epoch_size, 0);
29328+ atomic_set_unchecked(&epoch->epoch_size, 0);
29329 /* atomic_set(&epoch->active, 0); is already zero */
29330 if (rv == FE_STILL_LIVE)
29331 rv = FE_RECYCLED;
fe2de317 29332@@ -1191,14 +1191,14 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
8308f9c9
MT
29333 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
29334 drbd_flush(mdev);
29335
29336- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29337+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29338 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
29339 if (epoch)
29340 break;
29341 }
29342
29343 epoch = mdev->current_epoch;
29344- wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
29345+ wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
29346
29347 D_ASSERT(atomic_read(&epoch->active) == 0);
29348 D_ASSERT(epoch->flags == 0);
fe2de317 29349@@ -1210,11 +1210,11 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
8308f9c9
MT
29350 }
29351
29352 epoch->flags = 0;
29353- atomic_set(&epoch->epoch_size, 0);
29354+ atomic_set_unchecked(&epoch->epoch_size, 0);
29355 atomic_set(&epoch->active, 0);
29356
29357 spin_lock(&mdev->epoch_lock);
29358- if (atomic_read(&mdev->current_epoch->epoch_size)) {
29359+ if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
29360 list_add(&epoch->list, &mdev->current_epoch->list);
29361 mdev->current_epoch = epoch;
29362 mdev->epochs++;
fe2de317 29363@@ -1663,7 +1663,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
8308f9c9
MT
29364 spin_unlock(&mdev->peer_seq_lock);
29365
29366 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
29367- atomic_inc(&mdev->current_epoch->epoch_size);
29368+ atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
29369 return drbd_drain_block(mdev, data_size);
29370 }
29371
fe2de317 29372@@ -1689,7 +1689,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
8308f9c9
MT
29373
29374 spin_lock(&mdev->epoch_lock);
29375 e->epoch = mdev->current_epoch;
29376- atomic_inc(&e->epoch->epoch_size);
29377+ atomic_inc_unchecked(&e->epoch->epoch_size);
29378 atomic_inc(&e->epoch->active);
29379 spin_unlock(&mdev->epoch_lock);
29380
fe2de317 29381@@ -3885,7 +3885,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
8308f9c9
MT
29382 D_ASSERT(list_empty(&mdev->done_ee));
29383
29384 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
29385- atomic_set(&mdev->current_epoch->epoch_size, 0);
29386+ atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
29387 D_ASSERT(list_empty(&mdev->current_epoch->list));
29388 }
29389
fe2de317 29390diff --git a/drivers/block/loop.c b/drivers/block/loop.c
c6e2a6c8 29391index bbca966..65e37dd 100644
fe2de317
MT
29392--- a/drivers/block/loop.c
29393+++ b/drivers/block/loop.c
5e856224 29394@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
6e9df6a3
MT
29395 mm_segment_t old_fs = get_fs();
29396
29397 set_fs(get_ds());
29398- bw = file->f_op->write(file, buf, len, &pos);
29399+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
29400 set_fs(old_fs);
29401 if (likely(bw == len))
29402 return 0;
fe2de317 29403diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
c6e2a6c8 29404index ee94686..3e09ad3 100644
fe2de317
MT
29405--- a/drivers/char/Kconfig
29406+++ b/drivers/char/Kconfig
29407@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
29408
29409 config DEVKMEM
29410 bool "/dev/kmem virtual device support"
29411- default y
29412+ default n
29413+ depends on !GRKERNSEC_KMEM
29414 help
29415 Say Y here if you want to support the /dev/kmem device. The
29416 /dev/kmem device is rarely used, but can be used for certain
c6e2a6c8 29417@@ -581,6 +582,7 @@ config DEVPORT
fe2de317
MT
29418 bool
29419 depends on !M68K
29420 depends on ISA || PCI
29421+ depends on !GRKERNSEC_KMEM
29422 default y
29423
29424 source "drivers/s390/char/Kconfig"
29425diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
29426index 2e04433..22afc64 100644
29427--- a/drivers/char/agp/frontend.c
29428+++ b/drivers/char/agp/frontend.c
29429@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
58c5fc13
MT
29430 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
29431 return -EFAULT;
29432
29433- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
29434+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
29435 return -EFAULT;
29436
29437 client = agp_find_client_by_pid(reserve.pid);
fe2de317 29438diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
c6e2a6c8 29439index 21cb980..f15107c 100644
fe2de317
MT
29440--- a/drivers/char/genrtc.c
29441+++ b/drivers/char/genrtc.c
c6e2a6c8 29442@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
71d190be
MT
29443 switch (cmd) {
29444
29445 case RTC_PLL_GET:
29446+ memset(&pll, 0, sizeof(pll));
29447 if (get_rtc_pll(&pll))
29448 return -EINVAL;
29449 else
fe2de317 29450diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
c6e2a6c8 29451index dfd7876..c0b0885 100644
fe2de317
MT
29452--- a/drivers/char/hpet.c
29453+++ b/drivers/char/hpet.c
c6e2a6c8 29454@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
ae4e228f
MT
29455 }
29456
df50ba0c 29457 static int
bc901d79
MT
29458-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
29459+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
29460 struct hpet_info *info)
ae4e228f 29461 {
df50ba0c 29462 struct hpet_timer __iomem *timer;
fe2de317 29463diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
c6e2a6c8 29464index 2c29942..604c5ba 100644
fe2de317
MT
29465--- a/drivers/char/ipmi/ipmi_msghandler.c
29466+++ b/drivers/char/ipmi/ipmi_msghandler.c
c6e2a6c8 29467@@ -420,7 +420,7 @@ struct ipmi_smi {
58c5fc13
MT
29468 struct proc_dir_entry *proc_dir;
29469 char proc_dir_name[10];
29470
29471- atomic_t stats[IPMI_NUM_STATS];
29472+ atomic_unchecked_t stats[IPMI_NUM_STATS];
29473
29474 /*
29475 * run_to_completion duplicate of smb_info, smi_info
c6e2a6c8 29476@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
58c5fc13
MT
29477
29478
29479 #define ipmi_inc_stat(intf, stat) \
29480- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
29481+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
29482 #define ipmi_get_stat(intf, stat) \
29483- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
29484+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
29485
29486 static int is_lan_addr(struct ipmi_addr *addr)
29487 {
c6e2a6c8 29488@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
58c5fc13
MT
29489 INIT_LIST_HEAD(&intf->cmd_rcvrs);
29490 init_waitqueue_head(&intf->waitq);
29491 for (i = 0; i < IPMI_NUM_STATS; i++)
29492- atomic_set(&intf->stats[i], 0);
29493+ atomic_set_unchecked(&intf->stats[i], 0);
29494
29495 intf->proc_dir = NULL;
29496
fe2de317 29497diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
c6e2a6c8 29498index 1e638ff..a869ef5 100644
fe2de317
MT
29499--- a/drivers/char/ipmi/ipmi_si_intf.c
29500+++ b/drivers/char/ipmi/ipmi_si_intf.c
c6e2a6c8 29501@@ -275,7 +275,7 @@ struct smi_info {
58c5fc13
MT
29502 unsigned char slave_addr;
29503
29504 /* Counters and things for the proc filesystem. */
29505- atomic_t stats[SI_NUM_STATS];
29506+ atomic_unchecked_t stats[SI_NUM_STATS];
29507
29508 struct task_struct *thread;
29509
c6e2a6c8 29510@@ -284,9 +284,9 @@ struct smi_info {
58c5fc13
MT
29511 };
29512
29513 #define smi_inc_stat(smi, stat) \
29514- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
29515+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
29516 #define smi_get_stat(smi, stat) \
29517- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
29518+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
29519
29520 #define SI_MAX_PARMS 4
29521
c6e2a6c8 29522@@ -3209,7 +3209,7 @@ static int try_smi_init(struct smi_info *new_smi)
58c5fc13
MT
29523 atomic_set(&new_smi->req_events, 0);
29524 new_smi->run_to_completion = 0;
29525 for (i = 0; i < SI_NUM_STATS; i++)
29526- atomic_set(&new_smi->stats[i], 0);
29527+ atomic_set_unchecked(&new_smi->stats[i], 0);
29528
57199397 29529 new_smi->interrupt_disabled = 1;
58c5fc13 29530 atomic_set(&new_smi->stop_operation, 0);
fe2de317 29531diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
c6e2a6c8 29532index 47ff7e4..0c7d340 100644
fe2de317
MT
29533--- a/drivers/char/mbcs.c
29534+++ b/drivers/char/mbcs.c
c6e2a6c8 29535@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
6e9df6a3
MT
29536 return 0;
29537 }
29538
29539-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
29540+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
29541 {
29542 .part_num = MBCS_PART_NUM,
29543 .mfg_num = MBCS_MFG_NUM,
fe2de317 29544diff --git a/drivers/char/mem.c b/drivers/char/mem.c
572b4308 29545index d6e9d08..0c314bf 100644
fe2de317
MT
29546--- a/drivers/char/mem.c
29547+++ b/drivers/char/mem.c
58c5fc13
MT
29548@@ -18,6 +18,7 @@
29549 #include <linux/raw.h>
29550 #include <linux/tty.h>
29551 #include <linux/capability.h>
29552+#include <linux/security.h>
29553 #include <linux/ptrace.h>
29554 #include <linux/device.h>
29555 #include <linux/highmem.h>
4c928ab7 29556@@ -35,6 +36,10 @@
58c5fc13
MT
29557 # include <linux/efi.h>
29558 #endif
29559
29560+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
6e9df6a3 29561+extern const struct file_operations grsec_fops;
58c5fc13
MT
29562+#endif
29563+
ae4e228f
MT
29564 static inline unsigned long size_inside_page(unsigned long start,
29565 unsigned long size)
29566 {
4c928ab7 29567@@ -66,9 +71,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
71d190be
MT
29568
29569 while (cursor < to) {
29570 if (!devmem_is_allowed(pfn)) {
29571+#ifdef CONFIG_GRKERNSEC_KMEM
29572+ gr_handle_mem_readwrite(from, to);
29573+#else
29574 printk(KERN_INFO
29575 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
29576 current->comm, from, to);
29577+#endif
29578 return 0;
29579 }
29580 cursor += PAGE_SIZE;
4c928ab7 29581@@ -76,6 +85,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
71d190be
MT
29582 }
29583 return 1;
29584 }
29585+#elif defined(CONFIG_GRKERNSEC_KMEM)
29586+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29587+{
29588+ return 0;
29589+}
29590 #else
29591 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29592 {
4c928ab7 29593@@ -118,6 +132,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
57199397
MT
29594
29595 while (count > 0) {
29596 unsigned long remaining;
29597+ char *temp;
29598
29599 sz = size_inside_page(p, count);
29600
4c928ab7 29601@@ -133,7 +148,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
57199397
MT
29602 if (!ptr)
29603 return -EFAULT;
29604
29605- remaining = copy_to_user(buf, ptr, sz);
29606+#ifdef CONFIG_PAX_USERCOPY
572b4308 29607+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
57199397
MT
29608+ if (!temp) {
29609+ unxlate_dev_mem_ptr(p, ptr);
29610+ return -ENOMEM;
29611+ }
29612+ memcpy(temp, ptr, sz);
29613+#else
29614+ temp = ptr;
29615+#endif
29616+
29617+ remaining = copy_to_user(buf, temp, sz);
29618+
29619+#ifdef CONFIG_PAX_USERCOPY
29620+ kfree(temp);
29621+#endif
29622+
29623 unxlate_dev_mem_ptr(p, ptr);
29624 if (remaining)
29625 return -EFAULT;
4c928ab7 29626@@ -396,9 +427,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
57199397
MT
29627 size_t count, loff_t *ppos)
29628 {
29629 unsigned long p = *ppos;
29630- ssize_t low_count, read, sz;
29631+ ssize_t low_count, read, sz, err = 0;
29632 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
29633- int err = 0;
29634
29635 read = 0;
29636 if (p < (unsigned long) high_memory) {
4c928ab7 29637@@ -420,6 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
57199397
MT
29638 }
29639 #endif
29640 while (low_count > 0) {
29641+ char *temp;
29642+
29643 sz = size_inside_page(p, low_count);
29644
29645 /*
4c928ab7 29646@@ -429,7 +461,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
57199397
MT
29647 */
29648 kbuf = xlate_dev_kmem_ptr((char *)p);
29649
29650- if (copy_to_user(buf, kbuf, sz))
29651+#ifdef CONFIG_PAX_USERCOPY
572b4308 29652+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
57199397
MT
29653+ if (!temp)
29654+ return -ENOMEM;
29655+ memcpy(temp, kbuf, sz);
29656+#else
29657+ temp = kbuf;
29658+#endif
29659+
29660+ err = copy_to_user(buf, temp, sz);
29661+
29662+#ifdef CONFIG_PAX_USERCOPY
29663+ kfree(temp);
29664+#endif
29665+
29666+ if (err)
29667 return -EFAULT;
29668 buf += sz;
29669 p += sz;
4c928ab7 29670@@ -867,6 +914,9 @@ static const struct memdev {
58c5fc13 29671 #ifdef CONFIG_CRASH_DUMP
ae4e228f 29672 [12] = { "oldmem", 0, &oldmem_fops, NULL },
58c5fc13
MT
29673 #endif
29674+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
ae4e228f 29675+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
58c5fc13
MT
29676+#endif
29677 };
29678
29679 static int memory_open(struct inode *inode, struct file *filp)
fe2de317 29680diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
c6e2a6c8 29681index 9df78e2..01ba9ae 100644
fe2de317
MT
29682--- a/drivers/char/nvram.c
29683+++ b/drivers/char/nvram.c
c6e2a6c8 29684@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
ae4e228f
MT
29685
29686 spin_unlock_irq(&rtc_lock);
58c5fc13 29687
ae4e228f
MT
29688- if (copy_to_user(buf, contents, tmp - contents))
29689+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
29690 return -EFAULT;
29691
29692 *ppos = i;
fe2de317 29693diff --git a/drivers/char/random.c b/drivers/char/random.c
572b4308 29694index 4ec04a7..9918387 100644
fe2de317
MT
29695--- a/drivers/char/random.c
29696+++ b/drivers/char/random.c
66a7e928 29697@@ -261,8 +261,13 @@
58c5fc13
MT
29698 /*
29699 * Configuration information
29700 */
29701+#ifdef CONFIG_GRKERNSEC_RANDNET
29702+#define INPUT_POOL_WORDS 512
29703+#define OUTPUT_POOL_WORDS 128
29704+#else
29705 #define INPUT_POOL_WORDS 128
29706 #define OUTPUT_POOL_WORDS 32
29707+#endif
29708 #define SEC_XFER_SIZE 512
57199397 29709 #define EXTRACT_SIZE 10
58c5fc13 29710
66a7e928 29711@@ -300,10 +305,17 @@ static struct poolinfo {
58c5fc13
MT
29712 int poolwords;
29713 int tap1, tap2, tap3, tap4, tap5;
29714 } poolinfo_table[] = {
29715+#ifdef CONFIG_GRKERNSEC_RANDNET
29716+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
29717+ { 512, 411, 308, 208, 104, 1 },
29718+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
29719+ { 128, 103, 76, 51, 25, 1 },
29720+#else
29721 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
29722 { 128, 103, 76, 51, 25, 1 },
29723 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
29724 { 32, 26, 20, 14, 7, 1 },
29725+#endif
29726 #if 0
29727 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
29728 { 2048, 1638, 1231, 819, 411, 1 },
572b4308
MT
29729@@ -726,6 +738,17 @@ void add_disk_randomness(struct gendisk *disk)
29730 }
29731 #endif
29732
29733+#ifdef CONFIG_PAX_LATENT_ENTROPY
29734+u64 latent_entropy;
29735+
29736+__init void transfer_latent_entropy(void)
29737+{
29738+ mix_pool_bytes(&input_pool, &latent_entropy, sizeof(latent_entropy));
29739+ mix_pool_bytes(&nonblocking_pool, &latent_entropy, sizeof(latent_entropy));
29740+// printk(KERN_INFO "PAX: transferring latent entropy: %16llx\n", latent_entropy);
29741+}
29742+#endif
29743+
29744 /*********************************************************************
29745 *
29746 * Entropy extraction routines
29747@@ -913,7 +936,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
ae4e228f
MT
29748
29749 extract_buf(r, tmp);
29750 i = min_t(int, nbytes, EXTRACT_SIZE);
29751- if (copy_to_user(buf, tmp, i)) {
29752+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
29753 ret = -EFAULT;
29754 break;
29755 }
572b4308 29756@@ -1238,7 +1261,7 @@ EXPORT_SYMBOL(generate_random_uuid);
58c5fc13
MT
29757 #include <linux/sysctl.h>
29758
29759 static int min_read_thresh = 8, min_write_thresh;
29760-static int max_read_thresh = INPUT_POOL_WORDS * 32;
29761+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
29762 static int max_write_thresh = INPUT_POOL_WORDS * 32;
29763 static char sysctl_bootid[16];
29764
fe2de317 29765diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
c6e2a6c8 29766index 45713f0..8286d21 100644
fe2de317
MT
29767--- a/drivers/char/sonypi.c
29768+++ b/drivers/char/sonypi.c
c6e2a6c8
MT
29769@@ -54,6 +54,7 @@
29770
c52201e0
MT
29771 #include <asm/uaccess.h>
29772 #include <asm/io.h>
c52201e0
MT
29773+#include <asm/local.h>
29774
29775 #include <linux/sonypi.h>
29776
c6e2a6c8 29777@@ -490,7 +491,7 @@ static struct sonypi_device {
58c5fc13
MT
29778 spinlock_t fifo_lock;
29779 wait_queue_head_t fifo_proc_list;
29780 struct fasync_struct *fifo_async;
29781- int open_count;
c52201e0 29782+ local_t open_count;
58c5fc13
MT
29783 int model;
29784 struct input_dev *input_jog_dev;
29785 struct input_dev *input_key_dev;
c6e2a6c8 29786@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
58c5fc13
MT
29787 static int sonypi_misc_release(struct inode *inode, struct file *file)
29788 {
29789 mutex_lock(&sonypi_device.lock);
29790- sonypi_device.open_count--;
c52201e0 29791+ local_dec(&sonypi_device.open_count);
58c5fc13
MT
29792 mutex_unlock(&sonypi_device.lock);
29793 return 0;
29794 }
c6e2a6c8 29795@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
ae4e228f 29796 {
58c5fc13
MT
29797 mutex_lock(&sonypi_device.lock);
29798 /* Flush input queue on first open */
29799- if (!sonypi_device.open_count)
c52201e0 29800+ if (!local_read(&sonypi_device.open_count))
ae4e228f 29801 kfifo_reset(&sonypi_device.fifo);
58c5fc13 29802- sonypi_device.open_count++;
c52201e0 29803+ local_inc(&sonypi_device.open_count);
58c5fc13 29804 mutex_unlock(&sonypi_device.lock);
ae4e228f 29805
58c5fc13 29806 return 0;
fe2de317 29807diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
5e856224 29808index ad7c732..5aa8054 100644
fe2de317
MT
29809--- a/drivers/char/tpm/tpm.c
29810+++ b/drivers/char/tpm/tpm.c
5e856224 29811@@ -415,7 +415,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
fe2de317
MT
29812 chip->vendor.req_complete_val)
29813 goto out_recv;
29814
29815- if ((status == chip->vendor.req_canceled)) {
29816+ if (status == chip->vendor.req_canceled) {
29817 dev_err(chip->dev, "Operation Canceled\n");
29818 rc = -ECANCELED;
29819 goto out;
fe2de317
MT
29820diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
29821index 0636520..169c1d0 100644
29822--- a/drivers/char/tpm/tpm_bios.c
29823+++ b/drivers/char/tpm/tpm_bios.c
29824@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
ae4e228f
MT
29825 event = addr;
29826
29827 if ((event->event_type == 0 && event->event_size == 0) ||
29828- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
29829+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
29830 return NULL;
29831
29832 return addr;
fe2de317 29833@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
ae4e228f
MT
29834 return NULL;
29835
29836 if ((event->event_type == 0 && event->event_size == 0) ||
29837- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
29838+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
29839 return NULL;
29840
29841 (*pos)++;
fe2de317 29842@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
ae4e228f
MT
29843 int i;
29844
29845 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
29846- seq_putc(m, data[i]);
29847+ if (!seq_putc(m, data[i]))
29848+ return -EFAULT;
29849
58c5fc13
MT
29850 return 0;
29851 }
fe2de317 29852@@ -410,8 +411,13 @@ static int read_log(struct tpm_bios_log *log)
ae4e228f 29853 log->bios_event_log_end = log->bios_event_log + len;
58c5fc13 29854
ae4e228f
MT
29855 virt = acpi_os_map_memory(start, len);
29856+ if (!virt) {
29857+ kfree(log->bios_event_log);
29858+ log->bios_event_log = NULL;
29859+ return -EFAULT;
29860+ }
29861
6e9df6a3
MT
29862- memcpy(log->bios_event_log, virt, len);
29863+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
ae4e228f 29864
6e9df6a3
MT
29865 acpi_os_unmap_memory(virt, len);
29866 return 0;
fe2de317 29867diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
c6e2a6c8 29868index cdf2f54..e55c197 100644
fe2de317
MT
29869--- a/drivers/char/virtio_console.c
29870+++ b/drivers/char/virtio_console.c
4c928ab7 29871@@ -563,7 +563,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
6e9df6a3
MT
29872 if (to_user) {
29873 ssize_t ret;
29874
29875- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
29876+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
29877 if (ret)
29878 return -EFAULT;
29879 } else {
4c928ab7 29880@@ -662,7 +662,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
6e9df6a3
MT
29881 if (!port_has_data(port) && !port->host_connected)
29882 return 0;
29883
29884- return fill_readbuf(port, ubuf, count, true);
29885+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
29886 }
29887
29888 static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
fe2de317 29889diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
5e856224 29890index 97f5064..202b6e6 100644
fe2de317
MT
29891--- a/drivers/edac/edac_pci_sysfs.c
29892+++ b/drivers/edac/edac_pci_sysfs.c
29893@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
8308f9c9
MT
29894 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29895 static int edac_pci_poll_msec = 1000; /* one second workq period */
29896
29897-static atomic_t pci_parity_count = ATOMIC_INIT(0);
29898-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29899+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29900+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29901
29902 static struct kobject *edac_pci_top_main_kobj;
29903 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
fe2de317 29904@@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
29905 edac_printk(KERN_CRIT, EDAC_PCI,
29906 "Signaled System Error on %s\n",
29907 pci_name(dev));
29908- atomic_inc(&pci_nonparity_count);
29909+ atomic_inc_unchecked(&pci_nonparity_count);
29910 }
29911
29912 if (status & (PCI_STATUS_PARITY)) {
fe2de317 29913@@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
29914 "Master Data Parity Error on %s\n",
29915 pci_name(dev));
29916
29917- atomic_inc(&pci_parity_count);
29918+ atomic_inc_unchecked(&pci_parity_count);
29919 }
29920
29921 if (status & (PCI_STATUS_DETECTED_PARITY)) {
fe2de317 29922@@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
29923 "Detected Parity Error on %s\n",
29924 pci_name(dev));
29925
29926- atomic_inc(&pci_parity_count);
29927+ atomic_inc_unchecked(&pci_parity_count);
29928 }
29929 }
29930
fe2de317 29931@@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
29932 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29933 "Signaled System Error on %s\n",
29934 pci_name(dev));
29935- atomic_inc(&pci_nonparity_count);
29936+ atomic_inc_unchecked(&pci_nonparity_count);
29937 }
29938
29939 if (status & (PCI_STATUS_PARITY)) {
fe2de317 29940@@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
29941 "Master Data Parity Error on "
29942 "%s\n", pci_name(dev));
29943
29944- atomic_inc(&pci_parity_count);
29945+ atomic_inc_unchecked(&pci_parity_count);
29946 }
29947
29948 if (status & (PCI_STATUS_DETECTED_PARITY)) {
fe2de317 29949@@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
8308f9c9
MT
29950 "Detected Parity Error on %s\n",
29951 pci_name(dev));
29952
29953- atomic_inc(&pci_parity_count);
29954+ atomic_inc_unchecked(&pci_parity_count);
29955 }
29956 }
29957 }
29958@@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
29959 if (!check_pci_errors)
29960 return;
29961
29962- before_count = atomic_read(&pci_parity_count);
29963+ before_count = atomic_read_unchecked(&pci_parity_count);
29964
29965 /* scan all PCI devices looking for a Parity Error on devices and
29966 * bridges.
29967@@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
29968 /* Only if operator has selected panic on PCI Error */
29969 if (edac_pci_get_panic_on_pe()) {
29970 /* If the count is different 'after' from 'before' */
29971- if (before_count != atomic_read(&pci_parity_count))
29972+ if (before_count != atomic_read_unchecked(&pci_parity_count))
29973 panic("EDAC: PCI Parity Error");
29974 }
29975 }
fe2de317 29976diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
c6e2a6c8 29977index c6074c5..88a9e2e 100644
fe2de317
MT
29978--- a/drivers/edac/mce_amd.h
29979+++ b/drivers/edac/mce_amd.h
c6e2a6c8
MT
29980@@ -82,7 +82,7 @@ extern const char * const ii_msgs[];
29981 struct amd_decoder_ops {
15a11c5b
MT
29982 bool (*dc_mce)(u16, u8);
29983 bool (*ic_mce)(u16, u8);
15a11c5b
MT
29984-};
29985+} __no_const;
29986
29987 void amd_report_gart_errors(bool);
4c928ab7 29988 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
fe2de317 29989diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
c6e2a6c8 29990index cc595eb..4ec702a 100644
fe2de317
MT
29991--- a/drivers/firewire/core-card.c
29992+++ b/drivers/firewire/core-card.c
c6e2a6c8 29993@@ -679,7 +679,7 @@ void fw_card_release(struct kref *kref)
15a11c5b
MT
29994
29995 void fw_core_remove_card(struct fw_card *card)
29996 {
29997- struct fw_card_driver dummy_driver = dummy_driver_template;
29998+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
29999
30000 card->driver->update_phy_reg(card, 4,
30001 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
fe2de317 30002diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
c6e2a6c8 30003index 2e6b245..c3857d9 100644
fe2de317
MT
30004--- a/drivers/firewire/core-cdev.c
30005+++ b/drivers/firewire/core-cdev.c
c6e2a6c8 30006@@ -1341,8 +1341,7 @@ static int init_iso_resource(struct client *client,
df50ba0c 30007 int ret;
ae4e228f 30008
df50ba0c
MT
30009 if ((request->channels == 0 && request->bandwidth == 0) ||
30010- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
30011- request->bandwidth < 0)
30012+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
30013 return -EINVAL;
ae4e228f 30014
df50ba0c 30015 r = kmalloc(sizeof(*r), GFP_KERNEL);
fe2de317 30016diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
c6e2a6c8 30017index dea2dcc..a4fb978 100644
fe2de317
MT
30018--- a/drivers/firewire/core-transaction.c
30019+++ b/drivers/firewire/core-transaction.c
15a11c5b 30020@@ -37,6 +37,7 @@
66a7e928
MT
30021 #include <linux/timer.h>
30022 #include <linux/types.h>
15a11c5b 30023 #include <linux/workqueue.h>
66a7e928
MT
30024+#include <linux/sched.h>
30025
30026 #include <asm/byteorder.h>
30027
fe2de317 30028diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
c6e2a6c8 30029index 9047f55..e47c7ff 100644
fe2de317
MT
30030--- a/drivers/firewire/core.h
30031+++ b/drivers/firewire/core.h
c6e2a6c8 30032@@ -110,6 +110,7 @@ struct fw_card_driver {
fe2de317
MT
30033
30034 int (*stop_iso)(struct fw_iso_context *ctx);
30035 };
30036+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
30037
30038 void fw_card_initialize(struct fw_card *card,
30039 const struct fw_card_driver *driver, struct device *device);
30040diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
4c928ab7 30041index 153980b..4b4d046 100644
fe2de317
MT
30042--- a/drivers/firmware/dmi_scan.c
30043+++ b/drivers/firmware/dmi_scan.c
c52201e0 30044@@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
58c5fc13
MT
30045 }
30046 }
30047 else {
30048- /*
30049- * no iounmap() for that ioremap(); it would be a no-op, but
30050- * it's so early in setup that sucker gets confused into doing
30051- * what it shouldn't if we actually call it.
30052- */
30053 p = dmi_ioremap(0xF0000, 0x10000);
30054 if (p == NULL)
30055 goto error;
4c928ab7 30056@@ -723,7 +718,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
6e9df6a3
MT
30057 if (buf == NULL)
30058 return -1;
30059
30060- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
30061+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
30062
30063 iounmap(buf);
30064 return 0;
fe2de317 30065diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
5e856224 30066index 82d5c20..44a7177 100644
fe2de317
MT
30067--- a/drivers/gpio/gpio-vr41xx.c
30068+++ b/drivers/gpio/gpio-vr41xx.c
8308f9c9
MT
30069@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
30070 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
30071 maskl, pendl, maskh, pendh);
30072
30073- atomic_inc(&irq_err_count);
30074+ atomic_inc_unchecked(&irq_err_count);
30075
30076 return -EINVAL;
30077 }
fe2de317 30078diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
c6e2a6c8 30079index 8111889..367b253 100644
fe2de317
MT
30080--- a/drivers/gpu/drm/drm_crtc_helper.c
30081+++ b/drivers/gpu/drm/drm_crtc_helper.c
c6e2a6c8 30082@@ -286,7 +286,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
6892158b
MT
30083 struct drm_crtc *tmp;
30084 int crtc_mask = 1;
30085
bc901d79 30086- WARN(!crtc, "checking null crtc?\n");
6892158b
MT
30087+ BUG_ON(!crtc);
30088
30089 dev = crtc->dev;
30090
fe2de317 30091diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
c6e2a6c8 30092index 6116e3b..c29dd16 100644
fe2de317
MT
30093--- a/drivers/gpu/drm/drm_drv.c
30094+++ b/drivers/gpu/drm/drm_drv.c
c6e2a6c8 30095@@ -316,7 +316,7 @@ module_exit(drm_core_exit);
6e9df6a3
MT
30096 /**
30097 * Copy and IOCTL return string to user space
30098 */
30099-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
30100+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
30101 {
30102 int len;
30103
c6e2a6c8
MT
30104@@ -399,7 +399,7 @@ long drm_ioctl(struct file *filp,
30105 return -ENODEV;
ae4e228f 30106
58c5fc13
MT
30107 atomic_inc(&dev->ioctl_count);
30108- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
30109+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
30110 ++file_priv->ioctl_count;
30111
30112 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
fe2de317 30113diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
c6e2a6c8 30114index 123de28..43a0897 100644
fe2de317
MT
30115--- a/drivers/gpu/drm/drm_fops.c
30116+++ b/drivers/gpu/drm/drm_fops.c
4c928ab7 30117@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
58c5fc13
MT
30118 }
30119
30120 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
30121- atomic_set(&dev->counts[i], 0);
30122+ atomic_set_unchecked(&dev->counts[i], 0);
30123
30124 dev->sigdata.lock = NULL;
30125
c6e2a6c8 30126@@ -138,8 +138,8 @@ int drm_open(struct inode *inode, struct file *filp)
58c5fc13
MT
30127
30128 retcode = drm_open_helper(inode, filp, dev);
30129 if (!retcode) {
30130- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
6892158b 30131- if (!dev->open_count++)
58c5fc13 30132+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
c52201e0 30133+ if (local_inc_return(&dev->open_count) == 1)
58c5fc13 30134 retcode = drm_setup(dev);
6892158b
MT
30135 }
30136 if (!retcode) {
c6e2a6c8 30137@@ -482,7 +482,7 @@ int drm_release(struct inode *inode, struct file *filp)
58c5fc13 30138
6892158b 30139 mutex_lock(&drm_global_mutex);
58c5fc13
MT
30140
30141- DRM_DEBUG("open_count = %d\n", dev->open_count);
5e856224 30142+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
58c5fc13
MT
30143
30144 if (dev->driver->preclose)
30145 dev->driver->preclose(dev, file_priv);
c6e2a6c8 30146@@ -491,10 +491,10 @@ int drm_release(struct inode *inode, struct file *filp)
5e856224
MT
30147 * Begin inline drm_release
30148 */
30149
30150- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
30151+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
58c5fc13
MT
30152 task_pid_nr(current),
30153 (long)old_encode_dev(file_priv->minor->device),
30154- dev->open_count);
c52201e0 30155+ local_read(&dev->open_count));
58c5fc13 30156
4c928ab7
MT
30157 /* Release any auth tokens that might point to this file_priv,
30158 (do that under the drm_global_mutex) */
c6e2a6c8 30159@@ -584,8 +584,8 @@ int drm_release(struct inode *inode, struct file *filp)
58c5fc13
MT
30160 * End inline drm_release
30161 */
30162
30163- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
58c5fc13 30164- if (!--dev->open_count) {
6892158b 30165+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
c52201e0 30166+ if (local_dec_and_test(&dev->open_count)) {
58c5fc13
MT
30167 if (atomic_read(&dev->ioctl_count)) {
30168 DRM_ERROR("Device busy: %d\n",
30169 atomic_read(&dev->ioctl_count));
fe2de317
MT
30170diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
30171index c87dc96..326055d 100644
30172--- a/drivers/gpu/drm/drm_global.c
30173+++ b/drivers/gpu/drm/drm_global.c
6892158b
MT
30174@@ -36,7 +36,7 @@
30175 struct drm_global_item {
30176 struct mutex mutex;
30177 void *object;
30178- int refcount;
30179+ atomic_t refcount;
30180 };
30181
30182 static struct drm_global_item glob[DRM_GLOBAL_NUM];
30183@@ -49,7 +49,7 @@ void drm_global_init(void)
30184 struct drm_global_item *item = &glob[i];
30185 mutex_init(&item->mutex);
30186 item->object = NULL;
30187- item->refcount = 0;
30188+ atomic_set(&item->refcount, 0);
30189 }
30190 }
30191
30192@@ -59,7 +59,7 @@ void drm_global_release(void)
30193 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
30194 struct drm_global_item *item = &glob[i];
30195 BUG_ON(item->object != NULL);
30196- BUG_ON(item->refcount != 0);
30197+ BUG_ON(atomic_read(&item->refcount) != 0);
30198 }
30199 }
30200
fe2de317 30201@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
6892158b
MT
30202 void *object;
30203
30204 mutex_lock(&item->mutex);
30205- if (item->refcount == 0) {
30206+ if (atomic_read(&item->refcount) == 0) {
30207 item->object = kzalloc(ref->size, GFP_KERNEL);
30208 if (unlikely(item->object == NULL)) {
30209 ret = -ENOMEM;
fe2de317 30210@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
6892158b
MT
30211 goto out_err;
30212
30213 }
30214- ++item->refcount;
30215+ atomic_inc(&item->refcount);
30216 ref->object = item->object;
30217 object = item->object;
30218 mutex_unlock(&item->mutex);
fe2de317 30219@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
6892158b
MT
30220 struct drm_global_item *item = &glob[ref->global_type];
30221
30222 mutex_lock(&item->mutex);
30223- BUG_ON(item->refcount == 0);
30224+ BUG_ON(atomic_read(&item->refcount) == 0);
30225 BUG_ON(ref->object != item->object);
30226- if (--item->refcount == 0) {
30227+ if (atomic_dec_and_test(&item->refcount)) {
30228 ref->release(ref);
30229 item->object = NULL;
30230 }
fe2de317
MT
30231diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
30232index ab1162d..42587b2 100644
30233--- a/drivers/gpu/drm/drm_info.c
30234+++ b/drivers/gpu/drm/drm_info.c
30235@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
6892158b
MT
30236 struct drm_local_map *map;
30237 struct drm_map_list *r_list;
30238
30239- /* Hardcoded from _DRM_FRAME_BUFFER,
30240- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
30241- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
30242- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
30243+ static const char * const types[] = {
30244+ [_DRM_FRAME_BUFFER] = "FB",
30245+ [_DRM_REGISTERS] = "REG",
30246+ [_DRM_SHM] = "SHM",
30247+ [_DRM_AGP] = "AGP",
30248+ [_DRM_SCATTER_GATHER] = "SG",
30249+ [_DRM_CONSISTENT] = "PCI",
30250+ [_DRM_GEM] = "GEM" };
30251 const char *type;
30252 int i;
30253
fe2de317 30254@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
6892158b
MT
30255 map = r_list->map;
30256 if (!map)
30257 continue;
30258- if (map->type < 0 || map->type > 5)
30259+ if (map->type >= ARRAY_SIZE(types))
30260 type = "??";
30261 else
30262 type = types[map->type];
fe2de317 30263@@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, void *data)
16454cff
MT
30264 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
30265 vma->vm_flags & VM_LOCKED ? 'l' : '-',
30266 vma->vm_flags & VM_IO ? 'i' : '-',
30267+#ifdef CONFIG_GRKERNSEC_HIDESYM
30268+ 0);
30269+#else
30270 vma->vm_pgoff);
30271+#endif
30272
30273 #if defined(__i386__)
30274 pgprot = pgprot_val(vma->vm_page_prot);
fe2de317 30275diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
5e856224 30276index 637fcc3..e890b33 100644
fe2de317
MT
30277--- a/drivers/gpu/drm/drm_ioc32.c
30278+++ b/drivers/gpu/drm/drm_ioc32.c
5e856224 30279@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
6e9df6a3
MT
30280 request = compat_alloc_user_space(nbytes);
30281 if (!access_ok(VERIFY_WRITE, request, nbytes))
30282 return -EFAULT;
30283- list = (struct drm_buf_desc *) (request + 1);
30284+ list = (struct drm_buf_desc __user *) (request + 1);
30285
30286 if (__put_user(count, &request->count)
30287 || __put_user(list, &request->list))
5e856224 30288@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
6e9df6a3
MT
30289 request = compat_alloc_user_space(nbytes);
30290 if (!access_ok(VERIFY_WRITE, request, nbytes))
30291 return -EFAULT;
30292- list = (struct drm_buf_pub *) (request + 1);
30293+ list = (struct drm_buf_pub __user *) (request + 1);
30294
30295 if (__put_user(count, &request->count)
30296 || __put_user(list, &request->list))
fe2de317 30297diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
c6e2a6c8 30298index cf85155..f2665cb 100644
fe2de317
MT
30299--- a/drivers/gpu/drm/drm_ioctl.c
30300+++ b/drivers/gpu/drm/drm_ioctl.c
c6e2a6c8 30301@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
58c5fc13
MT
30302 stats->data[i].value =
30303 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
30304 else
30305- stats->data[i].value = atomic_read(&dev->counts[i]);
30306+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
30307 stats->data[i].type = dev->types[i];
30308 }
30309
fe2de317 30310diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
5e856224 30311index c79c713..2048588 100644
fe2de317
MT
30312--- a/drivers/gpu/drm/drm_lock.c
30313+++ b/drivers/gpu/drm/drm_lock.c
5e856224 30314@@ -90,7 +90,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
58c5fc13
MT
30315 if (drm_lock_take(&master->lock, lock->context)) {
30316 master->lock.file_priv = file_priv;
30317 master->lock.lock_time = jiffies;
30318- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
30319+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
30320 break; /* Got lock */
30321 }
30322
5e856224 30323@@ -161,7 +161,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
58c5fc13
MT
30324 return -EINVAL;
30325 }
30326
30327- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
30328+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
30329
bc901d79
MT
30330 if (drm_lock_free(&master->lock, lock->context)) {
30331 /* FIXME: Should really bail out here. */
c6e2a6c8
MT
30332diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
30333index aa454f8..6d38580 100644
30334--- a/drivers/gpu/drm/drm_stub.c
30335+++ b/drivers/gpu/drm/drm_stub.c
30336@@ -512,7 +512,7 @@ void drm_unplug_dev(struct drm_device *dev)
30337
30338 drm_device_set_unplugged(dev);
30339
30340- if (dev->open_count == 0) {
30341+ if (local_read(&dev->open_count) == 0) {
30342 drm_put_dev(dev);
30343 }
30344 mutex_unlock(&drm_global_mutex);
fe2de317 30345diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
c6e2a6c8 30346index f920fb5..001c52d 100644
fe2de317
MT
30347--- a/drivers/gpu/drm/i810/i810_dma.c
30348+++ b/drivers/gpu/drm/i810/i810_dma.c
c6e2a6c8 30349@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
58c5fc13
MT
30350 dma->buflist[vertex->idx],
30351 vertex->discard, vertex->used);
30352
30353- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30354- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30355+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
30356+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30357 sarea_priv->last_enqueue = dev_priv->counter - 1;
30358 sarea_priv->last_dispatch = (int)hw_status[5];
30359
c6e2a6c8 30360@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
58c5fc13
MT
30361 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
30362 mc->last_render);
30363
30364- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30365- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
30366+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
30367+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
30368 sarea_priv->last_enqueue = dev_priv->counter - 1;
30369 sarea_priv->last_dispatch = (int)hw_status[5];
30370
fe2de317
MT
30371diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
30372index c9339f4..f5e1b9d 100644
30373--- a/drivers/gpu/drm/i810/i810_drv.h
30374+++ b/drivers/gpu/drm/i810/i810_drv.h
8308f9c9
MT
30375@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
30376 int page_flipping;
30377
30378 wait_queue_head_t irq_queue;
30379- atomic_t irq_received;
30380- atomic_t irq_emitted;
30381+ atomic_unchecked_t irq_received;
30382+ atomic_unchecked_t irq_emitted;
30383
30384 int front_offset;
30385 } drm_i810_private_t;
fe2de317 30386diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
c6e2a6c8 30387index e6162a1..b2ff486 100644
fe2de317
MT
30388--- a/drivers/gpu/drm/i915/i915_debugfs.c
30389+++ b/drivers/gpu/drm/i915/i915_debugfs.c
c6e2a6c8 30390@@ -500,7 +500,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
8308f9c9
MT
30391 I915_READ(GTIMR));
30392 }
30393 seq_printf(m, "Interrupts received: %d\n",
30394- atomic_read(&dev_priv->irq_received));
30395+ atomic_read_unchecked(&dev_priv->irq_received));
30396 for (i = 0; i < I915_NUM_RINGS; i++) {
6e9df6a3 30397 if (IS_GEN6(dev) || IS_GEN7(dev)) {
8308f9c9 30398 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
c6e2a6c8 30399@@ -1313,7 +1313,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
6e9df6a3
MT
30400 return ret;
30401
30402 if (opregion->header)
30403- seq_write(m, opregion->header, OPREGION_SIZE);
30404+ seq_write(m, (const void __force_kernel *)opregion->header, OPREGION_SIZE);
30405
30406 mutex_unlock(&dev->struct_mutex);
30407
fe2de317 30408diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
c6e2a6c8 30409index ba60f3c..e2dff7f 100644
fe2de317
MT
30410--- a/drivers/gpu/drm/i915/i915_dma.c
30411+++ b/drivers/gpu/drm/i915/i915_dma.c
c6e2a6c8 30412@@ -1178,7 +1178,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
efbe55a5
MT
30413 bool can_switch;
30414
30415 spin_lock(&dev->count_lock);
30416- can_switch = (dev->open_count == 0);
c52201e0 30417+ can_switch = (local_read(&dev->open_count) == 0);
efbe55a5
MT
30418 spin_unlock(&dev->count_lock);
30419 return can_switch;
30420 }
fe2de317 30421diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
c6e2a6c8 30422index 5fabc6c..0b08aa1 100644
fe2de317
MT
30423--- a/drivers/gpu/drm/i915/i915_drv.h
30424+++ b/drivers/gpu/drm/i915/i915_drv.h
c6e2a6c8 30425@@ -240,7 +240,7 @@ struct drm_i915_display_funcs {
15a11c5b
MT
30426 /* render clock increase/decrease */
30427 /* display clock increase/decrease */
30428 /* pll clock increase/decrease */
30429-};
30430+} __no_const;
30431
30432 struct intel_device_info {
30433 u8 gen;
c6e2a6c8 30434@@ -350,7 +350,7 @@ typedef struct drm_i915_private {
8308f9c9
MT
30435 int current_page;
30436 int page_flipping;
30437
30438- atomic_t irq_received;
30439+ atomic_unchecked_t irq_received;
8308f9c9
MT
30440
30441 /* protects the irq masks */
66a7e928 30442 spinlock_t irq_lock;
c6e2a6c8 30443@@ -937,7 +937,7 @@ struct drm_i915_gem_object {
8308f9c9
MT
30444 * will be page flipped away on the next vblank. When it
30445 * reaches 0, dev_priv->pending_flip_queue will be woken up.
30446 */
30447- atomic_t pending_flip;
30448+ atomic_unchecked_t pending_flip;
30449 };
30450
30451 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
c6e2a6c8 30452@@ -1359,7 +1359,7 @@ extern int intel_setup_gmbus(struct drm_device *dev);
71d190be
MT
30453 extern void intel_teardown_gmbus(struct drm_device *dev);
30454 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
30455 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
30456-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30457+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
30458 {
30459 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
30460 }
fe2de317 30461diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
c6e2a6c8 30462index de43194..a14c4cc 100644
fe2de317
MT
30463--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
30464+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
4c928ab7 30465@@ -189,7 +189,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
15a11c5b 30466 i915_gem_clflush_object(obj);
66a7e928
MT
30467
30468 if (obj->base.pending_write_domain)
30469- cd->flips |= atomic_read(&obj->pending_flip);
30470+ cd->flips |= atomic_read_unchecked(&obj->pending_flip);
30471
30472 /* The actual obj->write_domain will be updated with
30473 * pending_write_domain after we emit the accumulated flush for all
c6e2a6c8 30474@@ -933,9 +933,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
fe2de317
MT
30475
30476 static int
30477 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
30478- int count)
30479+ unsigned int count)
30480 {
30481- int i;
30482+ unsigned int i;
30483
30484 for (i = 0; i < count; i++) {
30485 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
30486diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
572b4308 30487index 26c67a7..8d4cbcb 100644
fe2de317
MT
30488--- a/drivers/gpu/drm/i915/i915_irq.c
30489+++ b/drivers/gpu/drm/i915/i915_irq.c
572b4308 30490@@ -496,7 +496,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
15a11c5b
MT
30491 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
30492 struct drm_i915_master_private *master_priv;
30493
30494- atomic_inc(&dev_priv->irq_received);
30495+ atomic_inc_unchecked(&dev_priv->irq_received);
30496
30497 /* disable master interrupt before clearing iir */
30498 de_ier = I915_READ(DEIER);
572b4308 30499@@ -579,7 +579,7 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
15a11c5b
MT
30500 struct drm_i915_master_private *master_priv;
30501 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
30502
30503- atomic_inc(&dev_priv->irq_received);
30504+ atomic_inc_unchecked(&dev_priv->irq_received);
30505
30506 if (IS_GEN6(dev))
30507 bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
572b4308 30508@@ -1291,7 +1291,7 @@ static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
66a7e928
MT
30509 int ret = IRQ_NONE, pipe;
30510 bool blc_event = false;
8308f9c9
MT
30511
30512- atomic_inc(&dev_priv->irq_received);
30513+ atomic_inc_unchecked(&dev_priv->irq_received);
30514
15a11c5b
MT
30515 iir = I915_READ(IIR);
30516
572b4308 30517@@ -1802,7 +1802,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
15a11c5b
MT
30518 {
30519 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
30520
30521- atomic_set(&dev_priv->irq_received, 0);
30522+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30523
30524 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30525 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
572b4308 30526@@ -1979,7 +1979,7 @@ static void i915_driver_irq_preinstall(struct drm_device * dev)
8308f9c9 30527 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
66a7e928 30528 int pipe;
8308f9c9
MT
30529
30530- atomic_set(&dev_priv->irq_received, 0);
30531+ atomic_set_unchecked(&dev_priv->irq_received, 0);
30532
30533 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
30534 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
fe2de317 30535diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
572b4308 30536index d4d162f..b49a04e 100644
fe2de317
MT
30537--- a/drivers/gpu/drm/i915/intel_display.c
30538+++ b/drivers/gpu/drm/i915/intel_display.c
c6e2a6c8 30539@@ -2254,7 +2254,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
8308f9c9 30540
c6e2a6c8
MT
30541 wait_event(dev_priv->pending_flip_queue,
30542 atomic_read(&dev_priv->mm.wedged) ||
30543- atomic_read(&obj->pending_flip) == 0);
30544+ atomic_read_unchecked(&obj->pending_flip) == 0);
8308f9c9 30545
c6e2a6c8
MT
30546 /* Big Hammer, we also need to ensure that any pending
30547 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
30548@@ -2919,7 +2919,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
8308f9c9
MT
30549 obj = to_intel_framebuffer(crtc->fb)->obj;
30550 dev_priv = crtc->dev->dev_private;
30551 wait_event(dev_priv->pending_flip_queue,
30552- atomic_read(&obj->pending_flip) == 0);
30553+ atomic_read_unchecked(&obj->pending_flip) == 0);
30554 }
30555
30556 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
572b4308 30557@@ -7284,9 +7284,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
8308f9c9 30558
572b4308
MT
30559 obj = work->old_fb_obj;
30560
30561- atomic_clear_mask(1 << intel_crtc->plane,
30562- &obj->pending_flip.counter);
8308f9c9 30563- if (atomic_read(&obj->pending_flip) == 0)
572b4308 30564+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
8308f9c9
MT
30565+ if (atomic_read_unchecked(&obj->pending_flip) == 0)
30566 wake_up(&dev_priv->pending_flip_queue);
30567
30568 schedule_work(&work->work);
572b4308 30569@@ -7582,7 +7581,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
8308f9c9
MT
30570 /* Block clients from rendering to the new back buffer until
30571 * the flip occurs and the object is no longer visible.
30572 */
30573- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30574+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30575
15a11c5b
MT
30576 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
30577 if (ret)
572b4308 30578@@ -7596,7 +7595,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
15a11c5b
MT
30579 return 0;
30580
30581 cleanup_pending:
30582- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
30583+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
15a11c5b
MT
30584 drm_gem_object_unreference(&work->old_fb_obj->base);
30585 drm_gem_object_unreference(&obj->base);
4c928ab7 30586 mutex_unlock(&dev->struct_mutex);
fe2de317
MT
30587diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
30588index 54558a0..2d97005 100644
30589--- a/drivers/gpu/drm/mga/mga_drv.h
30590+++ b/drivers/gpu/drm/mga/mga_drv.h
8308f9c9
MT
30591@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
30592 u32 clear_cmd;
30593 u32 maccess;
30594
30595- atomic_t vbl_received; /**< Number of vblanks received. */
30596+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
30597 wait_queue_head_t fence_queue;
30598- atomic_t last_fence_retired;
30599+ atomic_unchecked_t last_fence_retired;
30600 u32 next_fence_to_post;
30601
30602 unsigned int fb_cpp;
fe2de317
MT
30603diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
30604index 2581202..f230a8d9 100644
30605--- a/drivers/gpu/drm/mga/mga_irq.c
30606+++ b/drivers/gpu/drm/mga/mga_irq.c
30607@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
8308f9c9
MT
30608 if (crtc != 0)
30609 return 0;
30610
30611- return atomic_read(&dev_priv->vbl_received);
30612+ return atomic_read_unchecked(&dev_priv->vbl_received);
30613 }
30614
30615
fe2de317 30616@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
30617 /* VBLANK interrupt */
30618 if (status & MGA_VLINEPEN) {
30619 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
30620- atomic_inc(&dev_priv->vbl_received);
30621+ atomic_inc_unchecked(&dev_priv->vbl_received);
30622 drm_handle_vblank(dev, 0);
30623 handled = 1;
30624 }
fe2de317 30625@@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
30626 if ((prim_start & ~0x03) != (prim_end & ~0x03))
30627 MGA_WRITE(MGA_PRIMEND, prim_end);
30628
30629- atomic_inc(&dev_priv->last_fence_retired);
30630+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
30631 DRM_WAKEUP(&dev_priv->fence_queue);
30632 handled = 1;
30633 }
fe2de317 30634@@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
8308f9c9
MT
30635 * using fences.
30636 */
30637 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
30638- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
30639+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
30640 - *sequence) <= (1 << 23)));
30641
30642 *sequence = cur_fence;
fe2de317 30643diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
c6e2a6c8 30644index 0be4a81..7464804 100644
fe2de317
MT
30645--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
30646+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
c6e2a6c8 30647@@ -5329,7 +5329,7 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
15a11c5b
MT
30648 struct bit_table {
30649 const char id;
30650 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
30651-};
30652+} __no_const;
30653
30654 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
30655
fe2de317 30656diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
c6e2a6c8 30657index 3aef353..0ad1322 100644
fe2de317
MT
30658--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
30659+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
c6e2a6c8 30660@@ -240,7 +240,7 @@ struct nouveau_channel {
8308f9c9
MT
30661 struct list_head pending;
30662 uint32_t sequence;
30663 uint32_t sequence_ack;
30664- atomic_t last_sequence_irq;
30665+ atomic_unchecked_t last_sequence_irq;
6e9df6a3 30666 struct nouveau_vma vma;
8308f9c9
MT
30667 } fence;
30668
c6e2a6c8 30669@@ -321,7 +321,7 @@ struct nouveau_exec_engine {
15a11c5b
MT
30670 u32 handle, u16 class);
30671 void (*set_tile_region)(struct drm_device *dev, int i);
30672 void (*tlb_flush)(struct drm_device *, int engine);
30673-};
30674+} __no_const;
30675
30676 struct nouveau_instmem_engine {
30677 void *priv;
c6e2a6c8 30678@@ -343,13 +343,13 @@ struct nouveau_instmem_engine {
15a11c5b
MT
30679 struct nouveau_mc_engine {
30680 int (*init)(struct drm_device *dev);
30681 void (*takedown)(struct drm_device *dev);
30682-};
30683+} __no_const;
30684
30685 struct nouveau_timer_engine {
30686 int (*init)(struct drm_device *dev);
30687 void (*takedown)(struct drm_device *dev);
30688 uint64_t (*read)(struct drm_device *dev);
30689-};
30690+} __no_const;
30691
30692 struct nouveau_fb_engine {
30693 int num_tiles;
c6e2a6c8 30694@@ -590,7 +590,7 @@ struct nouveau_vram_engine {
15a11c5b
MT
30695 void (*put)(struct drm_device *, struct nouveau_mem **);
30696
30697 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
30698-};
30699+} __no_const;
66a7e928
MT
30700
30701 struct nouveau_engine {
30702 struct nouveau_instmem_engine instmem;
c6e2a6c8 30703@@ -739,7 +739,7 @@ struct drm_nouveau_private {
8308f9c9
MT
30704 struct drm_global_reference mem_global_ref;
30705 struct ttm_bo_global_ref bo_global_ref;
30706 struct ttm_bo_device bdev;
30707- atomic_t validate_sequence;
30708+ atomic_unchecked_t validate_sequence;
30709 } ttm;
30710
30711 struct {
fe2de317 30712diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
c6e2a6c8 30713index c1dc20f..4df673c 100644
fe2de317
MT
30714--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
30715+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
30716@@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
8308f9c9
MT
30717 if (USE_REFCNT(dev))
30718 sequence = nvchan_rd32(chan, 0x48);
30719 else
30720- sequence = atomic_read(&chan->fence.last_sequence_irq);
30721+ sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
30722
30723 if (chan->fence.sequence_ack == sequence)
30724 goto out;
c6e2a6c8 30725@@ -538,7 +538,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
883a9837
MT
30726 return ret;
30727 }
15a11c5b 30728
8308f9c9
MT
30729- atomic_set(&chan->fence.last_sequence_irq, 0);
30730+ atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
8308f9c9
MT
30731 return 0;
30732 }
66a7e928 30733
fe2de317 30734diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
c6e2a6c8 30735index ed52a6f..484acdc 100644
fe2de317
MT
30736--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
30737+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
30738@@ -314,7 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
8308f9c9
MT
30739 int trycnt = 0;
30740 int ret, i;
30741
30742- sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
30743+ sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
30744 retry:
30745 if (++trycnt > 100000) {
30746 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
fe2de317 30747diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
c6e2a6c8 30748index c2a8511..4b996f9 100644
fe2de317
MT
30749--- a/drivers/gpu/drm/nouveau/nouveau_state.c
30750+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
c6e2a6c8 30751@@ -588,7 +588,7 @@ static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
efbe55a5
MT
30752 bool can_switch;
30753
30754 spin_lock(&dev->count_lock);
30755- can_switch = (dev->open_count == 0);
c52201e0 30756+ can_switch = (local_read(&dev->open_count) == 0);
efbe55a5
MT
30757 spin_unlock(&dev->count_lock);
30758 return can_switch;
30759 }
fe2de317
MT
30760diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
30761index dbdea8e..cd6eeeb 100644
30762--- a/drivers/gpu/drm/nouveau/nv04_graph.c
30763+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
6e9df6a3 30764@@ -554,7 +554,7 @@ static int
8308f9c9
MT
30765 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
30766 u32 class, u32 mthd, u32 data)
30767 {
30768- atomic_set(&chan->fence.last_sequence_irq, data);
30769+ atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
30770 return 0;
30771 }
30772
c6e2a6c8
MT
30773diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
30774index 2746402..c8dc4a4 100644
30775--- a/drivers/gpu/drm/nouveau/nv50_sor.c
30776+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
30777@@ -304,7 +304,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
30778 }
30779
30780 if (nv_encoder->dcb->type == OUTPUT_DP) {
30781- struct dp_train_func func = {
30782+ static struct dp_train_func func = {
30783 .link_set = nv50_sor_dp_link_set,
30784 .train_set = nv50_sor_dp_train_set,
30785 .train_adj = nv50_sor_dp_train_adj
30786diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
30787index 0247250..d2f6aaf 100644
30788--- a/drivers/gpu/drm/nouveau/nvd0_display.c
30789+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
30790@@ -1366,7 +1366,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
30791 nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
30792
30793 if (nv_encoder->dcb->type == OUTPUT_DP) {
30794- struct dp_train_func func = {
30795+ static struct dp_train_func func = {
30796 .link_set = nvd0_sor_dp_link_set,
30797 .train_set = nvd0_sor_dp_train_set,
30798 .train_adj = nvd0_sor_dp_train_adj
fe2de317 30799diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
4c928ab7 30800index bcac90b..53bfc76 100644
fe2de317
MT
30801--- a/drivers/gpu/drm/r128/r128_cce.c
30802+++ b/drivers/gpu/drm/r128/r128_cce.c
4c928ab7 30803@@ -378,7 +378,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
8308f9c9
MT
30804
30805 /* GH: Simple idle check.
30806 */
30807- atomic_set(&dev_priv->idle_count, 0);
30808+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30809
30810 /* We don't support anything other than bus-mastering ring mode,
30811 * but the ring can be in either AGP or PCI space for the ring
fe2de317
MT
30812diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
30813index 930c71b..499aded 100644
30814--- a/drivers/gpu/drm/r128/r128_drv.h
30815+++ b/drivers/gpu/drm/r128/r128_drv.h
8308f9c9
MT
30816@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
30817 int is_pci;
30818 unsigned long cce_buffers_offset;
30819
30820- atomic_t idle_count;
30821+ atomic_unchecked_t idle_count;
30822
30823 int page_flipping;
30824 int current_page;
30825 u32 crtc_offset;
30826 u32 crtc_offset_cntl;
30827
30828- atomic_t vbl_received;
30829+ atomic_unchecked_t vbl_received;
30830
30831 u32 color_fmt;
30832 unsigned int front_offset;
fe2de317
MT
30833diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
30834index 429d5a0..7e899ed 100644
30835--- a/drivers/gpu/drm/r128/r128_irq.c
30836+++ b/drivers/gpu/drm/r128/r128_irq.c
30837@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
8308f9c9
MT
30838 if (crtc != 0)
30839 return 0;
30840
30841- return atomic_read(&dev_priv->vbl_received);
30842+ return atomic_read_unchecked(&dev_priv->vbl_received);
30843 }
30844
30845 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
fe2de317 30846@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
30847 /* VBLANK interrupt */
30848 if (status & R128_CRTC_VBLANK_INT) {
30849 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30850- atomic_inc(&dev_priv->vbl_received);
30851+ atomic_inc_unchecked(&dev_priv->vbl_received);
30852 drm_handle_vblank(dev, 0);
30853 return IRQ_HANDLED;
30854 }
fe2de317
MT
30855diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
30856index a9e33ce..09edd4b 100644
30857--- a/drivers/gpu/drm/r128/r128_state.c
30858+++ b/drivers/gpu/drm/r128/r128_state.c
30859@@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
8308f9c9
MT
30860
30861 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
30862 {
30863- if (atomic_read(&dev_priv->idle_count) == 0)
30864+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
30865 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30866 else
30867- atomic_set(&dev_priv->idle_count, 0);
30868+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30869 }
30870
30871 #endif
fe2de317
MT
30872diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
30873index 5a82b6b..9e69c73 100644
30874--- a/drivers/gpu/drm/radeon/mkregtable.c
30875+++ b/drivers/gpu/drm/radeon/mkregtable.c
30876@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
ae4e228f
MT
30877 regex_t mask_rex;
30878 regmatch_t match[4];
30879 char buf[1024];
30880- size_t end;
30881+ long end;
30882 int len;
30883 int done = 0;
30884 int r;
30885 unsigned o;
30886 struct offset *offset;
30887 char last_reg_s[10];
30888- int last_reg;
30889+ unsigned long last_reg;
30890
30891 if (regcomp
30892 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
fe2de317 30893diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
c6e2a6c8 30894index 138b952..d74f9cb 100644
fe2de317
MT
30895--- a/drivers/gpu/drm/radeon/radeon.h
30896+++ b/drivers/gpu/drm/radeon/radeon.h
c6e2a6c8 30897@@ -253,7 +253,7 @@ struct radeon_fence_driver {
fe2de317 30898 uint32_t scratch_reg;
5e856224
MT
30899 uint64_t gpu_addr;
30900 volatile uint32_t *cpu_addr;
fe2de317
MT
30901- atomic_t seq;
30902+ atomic_unchecked_t seq;
30903 uint32_t last_seq;
30904 unsigned long last_jiffies;
30905 unsigned long last_timeout;
c6e2a6c8 30906@@ -753,7 +753,7 @@ struct r600_blit_cp_primitives {
4c928ab7
MT
30907 int x2, int y2);
30908 void (*draw_auto)(struct radeon_device *rdev);
30909 void (*set_default_state)(struct radeon_device *rdev);
30910-};
30911+} __no_const;
30912
30913 struct r600_blit {
30914 struct mutex mutex;
c6e2a6c8
MT
30915@@ -1246,7 +1246,7 @@ struct radeon_asic {
30916 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
30917 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
30918 } pflip;
fe2de317
MT
30919-};
30920+} __no_const;
30921
30922 /*
30923 * Asic structures
fe2de317 30924diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
c6e2a6c8 30925index 5992502..c19c633 100644
fe2de317
MT
30926--- a/drivers/gpu/drm/radeon/radeon_device.c
30927+++ b/drivers/gpu/drm/radeon/radeon_device.c
c6e2a6c8 30928@@ -691,7 +691,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
df50ba0c 30929 bool can_switch;
58c5fc13 30930
df50ba0c
MT
30931 spin_lock(&dev->count_lock);
30932- can_switch = (dev->open_count == 0);
c52201e0 30933+ can_switch = (local_read(&dev->open_count) == 0);
df50ba0c
MT
30934 spin_unlock(&dev->count_lock);
30935 return can_switch;
30936 }
fe2de317
MT
30937diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
30938index a1b59ca..86f2d44 100644
30939--- a/drivers/gpu/drm/radeon/radeon_drv.h
30940+++ b/drivers/gpu/drm/radeon/radeon_drv.h
8308f9c9
MT
30941@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
30942
30943 /* SW interrupt */
30944 wait_queue_head_t swi_queue;
30945- atomic_t swi_emitted;
30946+ atomic_unchecked_t swi_emitted;
30947 int vblank_crtc;
30948 uint32_t irq_enable_reg;
30949 uint32_t r500_disp_irq_reg;
fe2de317 30950diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
5e856224 30951index 4bd36a3..e66fe9c 100644
fe2de317
MT
30952--- a/drivers/gpu/drm/radeon/radeon_fence.c
30953+++ b/drivers/gpu/drm/radeon/radeon_fence.c
5e856224
MT
30954@@ -70,7 +70,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
30955 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
8308f9c9
MT
30956 return 0;
30957 }
5e856224
MT
30958- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
30959+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv[fence->ring].seq);
30960 if (!rdev->ring[fence->ring].ready)
8308f9c9
MT
30961 /* FIXME: cp is not running assume everythings is done right
30962 * away
5e856224
MT
30963@@ -405,7 +405,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
30964 }
30965 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
30966 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
30967- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
30968+ radeon_fence_write(rdev, atomic_read_unchecked(&rdev->fence_drv[ring].seq), ring);
30969 rdev->fence_drv[ring].initialized = true;
30970 DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
30971 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
30972@@ -418,7 +418,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
30973 rdev->fence_drv[ring].scratch_reg = -1;
30974 rdev->fence_drv[ring].cpu_addr = NULL;
30975 rdev->fence_drv[ring].gpu_addr = 0;
30976- atomic_set(&rdev->fence_drv[ring].seq, 0);
30977+ atomic_set_unchecked(&rdev->fence_drv[ring].seq, 0);
30978 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
30979 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
30980 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
fe2de317
MT
30981diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
30982index 48b7cea..342236f 100644
30983--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
30984+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
30985@@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
71d190be
MT
30986 request = compat_alloc_user_space(sizeof(*request));
30987 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30988 || __put_user(req32.param, &request->param)
30989- || __put_user((void __user *)(unsigned long)req32.value,
30990+ || __put_user((unsigned long)req32.value,
30991 &request->value))
30992 return -EFAULT;
30993
fe2de317 30994diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
4c928ab7 30995index 00da384..32f972d 100644
fe2de317
MT
30996--- a/drivers/gpu/drm/radeon/radeon_irq.c
30997+++ b/drivers/gpu/drm/radeon/radeon_irq.c
30998@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
8308f9c9
MT
30999 unsigned int ret;
31000 RING_LOCALS;
31001
31002- atomic_inc(&dev_priv->swi_emitted);
31003- ret = atomic_read(&dev_priv->swi_emitted);
31004+ atomic_inc_unchecked(&dev_priv->swi_emitted);
31005+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
31006
31007 BEGIN_RING(4);
31008 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
fe2de317 31009@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
8308f9c9
MT
31010 drm_radeon_private_t *dev_priv =
31011 (drm_radeon_private_t *) dev->dev_private;
31012
31013- atomic_set(&dev_priv->swi_emitted, 0);
31014+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
31015 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
31016
31017 dev->max_vblank_count = 0x001fffff;
fe2de317 31018diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
4c928ab7 31019index e8422ae..d22d4a8 100644
fe2de317
MT
31020--- a/drivers/gpu/drm/radeon/radeon_state.c
31021+++ b/drivers/gpu/drm/radeon/radeon_state.c
31022@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
ae4e228f
MT
31023 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
31024 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
31025
31026- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31027+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
31028 sarea_priv->nbox * sizeof(depth_boxes[0])))
31029 return -EFAULT;
31030
fe2de317 31031@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
58c5fc13
MT
31032 {
31033 drm_radeon_private_t *dev_priv = dev->dev_private;
31034 drm_radeon_getparam_t *param = data;
31035- int value;
31036+ int value = 0;
31037
31038 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
31039
fe2de317 31040diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
c6e2a6c8 31041index f493c64..524ab6b 100644
fe2de317
MT
31042--- a/drivers/gpu/drm/radeon/radeon_ttm.c
31043+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
c6e2a6c8 31044@@ -843,8 +843,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
15a11c5b
MT
31045 }
31046 if (unlikely(ttm_vm_ops == NULL)) {
31047 ttm_vm_ops = vma->vm_ops;
58c5fc13
MT
31048- radeon_ttm_vm_ops = *ttm_vm_ops;
31049- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
15a11c5b
MT
31050+ pax_open_kernel();
31051+ memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
31052+ *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
31053+ pax_close_kernel();
31054 }
57199397
MT
31055 vma->vm_ops = &radeon_ttm_vm_ops;
31056 return 0;
fe2de317 31057diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
c6e2a6c8 31058index f2c3b9d..d5a376b 100644
fe2de317
MT
31059--- a/drivers/gpu/drm/radeon/rs690.c
31060+++ b/drivers/gpu/drm/radeon/rs690.c
31061@@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
71d190be
MT
31062 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
31063 rdev->pm.sideport_bandwidth.full)
31064 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
31065- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
31066+ read_delay_latency.full = dfixed_const(800 * 1000);
31067 read_delay_latency.full = dfixed_div(read_delay_latency,
31068 rdev->pm.igp_sideport_mclk);
31069+ a.full = dfixed_const(370);
31070+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
31071 } else {
31072 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
31073 rdev->pm.k8_bandwidth.full)
fe2de317 31074diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
c6e2a6c8 31075index ebc6fac..a8313ed 100644
fe2de317
MT
31076--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
31077+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
c6e2a6c8 31078@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
15a11c5b
MT
31079 static int ttm_pool_mm_shrink(struct shrinker *shrink,
31080 struct shrink_control *sc)
8308f9c9
MT
31081 {
31082- static atomic_t start_pool = ATOMIC_INIT(0);
31083+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
31084 unsigned i;
31085- unsigned pool_offset = atomic_add_return(1, &start_pool);
31086+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
31087 struct ttm_page_pool *pool;
15a11c5b 31088 int shrink_pages = sc->nr_to_scan;
8308f9c9 31089
fe2de317 31090diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
5e856224 31091index 88edacc..1e5412b 100644
fe2de317
MT
31092--- a/drivers/gpu/drm/via/via_drv.h
31093+++ b/drivers/gpu/drm/via/via_drv.h
8308f9c9
MT
31094@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
31095 typedef uint32_t maskarray_t[5];
31096
31097 typedef struct drm_via_irq {
31098- atomic_t irq_received;
31099+ atomic_unchecked_t irq_received;
31100 uint32_t pending_mask;
31101 uint32_t enable_mask;
31102 wait_queue_head_t irq_queue;
31103@@ -75,7 +75,7 @@ typedef struct drm_via_private {
31104 struct timeval last_vblank;
31105 int last_vblank_valid;
31106 unsigned usec_per_vblank;
31107- atomic_t vbl_received;
31108+ atomic_unchecked_t vbl_received;
31109 drm_via_state_t hc_state;
31110 char pci_buf[VIA_PCI_BUF_SIZE];
31111 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
fe2de317
MT
31112diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
31113index d391f48..10c8ca3 100644
31114--- a/drivers/gpu/drm/via/via_irq.c
31115+++ b/drivers/gpu/drm/via/via_irq.c
31116@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
8308f9c9
MT
31117 if (crtc != 0)
31118 return 0;
31119
31120- return atomic_read(&dev_priv->vbl_received);
31121+ return atomic_read_unchecked(&dev_priv->vbl_received);
31122 }
31123
31124 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
fe2de317 31125@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
31126
31127 status = VIA_READ(VIA_REG_INTERRUPT);
31128 if (status & VIA_IRQ_VBLANK_PENDING) {
31129- atomic_inc(&dev_priv->vbl_received);
31130- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
31131+ atomic_inc_unchecked(&dev_priv->vbl_received);
31132+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
31133 do_gettimeofday(&cur_vblank);
31134 if (dev_priv->last_vblank_valid) {
31135 dev_priv->usec_per_vblank =
fe2de317 31136@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
31137 dev_priv->last_vblank = cur_vblank;
31138 dev_priv->last_vblank_valid = 1;
31139 }
31140- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
31141+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
31142 DRM_DEBUG("US per vblank is: %u\n",
31143 dev_priv->usec_per_vblank);
31144 }
fe2de317 31145@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
8308f9c9
MT
31146
31147 for (i = 0; i < dev_priv->num_irqs; ++i) {
31148 if (status & cur_irq->pending_mask) {
31149- atomic_inc(&cur_irq->irq_received);
31150+ atomic_inc_unchecked(&cur_irq->irq_received);
31151 DRM_WAKEUP(&cur_irq->irq_queue);
31152 handled = 1;
31153 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
fe2de317 31154@@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
8308f9c9
MT
31155 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31156 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
31157 masks[irq][4]));
31158- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
31159+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
31160 } else {
31161 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
31162 (((cur_irq_sequence =
31163- atomic_read(&cur_irq->irq_received)) -
31164+ atomic_read_unchecked(&cur_irq->irq_received)) -
31165 *sequence) <= (1 << 23)));
31166 }
31167 *sequence = cur_irq_sequence;
fe2de317 31168@@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
8308f9c9
MT
31169 }
31170
31171 for (i = 0; i < dev_priv->num_irqs; ++i) {
31172- atomic_set(&cur_irq->irq_received, 0);
31173+ atomic_set_unchecked(&cur_irq->irq_received, 0);
31174 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
31175 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
31176 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
fe2de317 31177@@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
8308f9c9
MT
31178 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
31179 case VIA_IRQ_RELATIVE:
31180 irqwait->request.sequence +=
31181- atomic_read(&cur_irq->irq_received);
31182+ atomic_read_unchecked(&cur_irq->irq_received);
31183 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
31184 case VIA_IRQ_ABSOLUTE:
31185 break;
fe2de317 31186diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
c6e2a6c8 31187index d0f2c07..9ebd9c3 100644
fe2de317
MT
31188--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
31189+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
c6e2a6c8 31190@@ -263,7 +263,7 @@ struct vmw_private {
8308f9c9
MT
31191 * Fencing and IRQs.
31192 */
31193
4c928ab7
MT
31194- atomic_t marker_seq;
31195+ atomic_unchecked_t marker_seq;
8308f9c9
MT
31196 wait_queue_head_t fence_queue;
31197 wait_queue_head_t fifo_queue;
4c928ab7 31198 int fence_queue_waiters; /* Protected by hw_mutex */
fe2de317 31199diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
4c928ab7 31200index a0c2f12..68ae6cb 100644
fe2de317
MT
31201--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31202+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
31203@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
8308f9c9
MT
31204 (unsigned int) min,
31205 (unsigned int) fifo->capabilities);
31206
4c928ab7
MT
31207- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31208+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
31209 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
31210 vmw_marker_queue_init(&fifo->marker_queue);
8308f9c9 31211 return vmw_fifo_send_fence(dev_priv, &dummy);
4c928ab7 31212@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
6e9df6a3
MT
31213 if (reserveable)
31214 iowrite32(bytes, fifo_mem +
31215 SVGA_FIFO_RESERVED);
31216- return fifo_mem + (next_cmd >> 2);
31217+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
31218 } else {
31219 need_bounce = true;
31220 }
4c928ab7 31221@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
8308f9c9
MT
31222
31223 fm = vmw_fifo_reserve(dev_priv, bytes);
31224 if (unlikely(fm == NULL)) {
4c928ab7
MT
31225- *seqno = atomic_read(&dev_priv->marker_seq);
31226+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
8308f9c9 31227 ret = -ENOMEM;
4c928ab7 31228 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
8308f9c9 31229 false, 3*HZ);
4c928ab7 31230@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
8308f9c9
MT
31231 }
31232
31233 do {
4c928ab7
MT
31234- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
31235+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
31236 } while (*seqno == 0);
8308f9c9
MT
31237
31238 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
fe2de317 31239diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
4c928ab7 31240index cabc95f..14b3d77 100644
fe2de317
MT
31241--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
31242+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
4c928ab7 31243@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
8308f9c9
MT
31244 * emitted. Then the fence is stale and signaled.
31245 */
31246
4c928ab7
MT
31247- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
31248+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
8308f9c9
MT
31249 > VMW_FENCE_WRAP);
31250
31251 return ret;
4c928ab7 31252@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
8308f9c9
MT
31253
31254 if (fifo_idle)
31255 down_read(&fifo_state->rwsem);
4c928ab7
MT
31256- signal_seq = atomic_read(&dev_priv->marker_seq);
31257+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
8308f9c9
MT
31258 ret = 0;
31259
31260 for (;;) {
4c928ab7
MT
31261diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31262index 8a8725c..afed796 100644
31263--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31264+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
31265@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
31266 while (!vmw_lag_lt(queue, us)) {
31267 spin_lock(&queue->lock);
31268 if (list_empty(&queue->head))
31269- seqno = atomic_read(&dev_priv->marker_seq);
31270+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
31271 else {
31272 marker = list_first_entry(&queue->head,
31273 struct vmw_marker, head);
fe2de317 31274diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
572b4308 31275index 054677b..741672a 100644
fe2de317
MT
31276--- a/drivers/hid/hid-core.c
31277+++ b/drivers/hid/hid-core.c
572b4308 31278@@ -2070,7 +2070,7 @@ static bool hid_ignore(struct hid_device *hdev)
8308f9c9
MT
31279
31280 int hid_add_device(struct hid_device *hdev)
31281 {
31282- static atomic_t id = ATOMIC_INIT(0);
31283+ static atomic_unchecked_t id = ATOMIC_INIT(0);
31284 int ret;
31285
31286 if (WARN_ON(hdev->status & HID_STAT_ADDED))
572b4308 31287@@ -2085,7 +2085,7 @@ int hid_add_device(struct hid_device *hdev)
8308f9c9
MT
31288 /* XXX hack, any other cleaner solution after the driver core
31289 * is converted to allow more than 20 bytes as the device name? */
31290 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
31291- hdev->vendor, hdev->product, atomic_inc_return(&id));
31292+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
31293
31294 hid_debug_register(hdev, dev_name(&hdev->dev));
31295 ret = device_add(&hdev->dev);
c6e2a6c8
MT
31296diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
31297index eec3291..8ed706b 100644
31298--- a/drivers/hid/hid-wiimote-debug.c
31299+++ b/drivers/hid/hid-wiimote-debug.c
31300@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
31301 else if (size == 0)
31302 return -EIO;
31303
31304- if (copy_to_user(u, buf, size))
31305+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
31306 return -EFAULT;
31307
31308 *off += size;
fe2de317 31309diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
5e856224 31310index b1ec0e2..c295a61 100644
fe2de317
MT
31311--- a/drivers/hid/usbhid/hiddev.c
31312+++ b/drivers/hid/usbhid/hiddev.c
31313@@ -624,7 +624,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
16454cff 31314 break;
ae4e228f 31315
df50ba0c
MT
31316 case HIDIOCAPPLICATION:
31317- if (arg < 0 || arg >= hid->maxapplication)
31318+ if (arg >= hid->maxapplication)
16454cff 31319 break;
df50ba0c
MT
31320
31321 for (i = 0; i < hid->maxcollection; i++)
4c928ab7
MT
31322diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
31323index 4065374..10ed7dc 100644
31324--- a/drivers/hv/channel.c
31325+++ b/drivers/hv/channel.c
31326@@ -400,8 +400,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
31327 int ret = 0;
31328 int t;
31329
31330- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
31331- atomic_inc(&vmbus_connection.next_gpadl_handle);
31332+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
31333+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
31334
31335 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
31336 if (ret)
31337diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
c6e2a6c8 31338index 15956bd..ea34398 100644
4c928ab7
MT
31339--- a/drivers/hv/hv.c
31340+++ b/drivers/hv/hv.c
31341@@ -132,7 +132,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
31342 u64 output_address = (output) ? virt_to_phys(output) : 0;
31343 u32 output_address_hi = output_address >> 32;
31344 u32 output_address_lo = output_address & 0xFFFFFFFF;
31345- void *hypercall_page = hv_context.hypercall_page;
31346+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
31347
31348 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
31349 "=a"(hv_status_lo) : "d" (control_hi),
31350diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
c6e2a6c8 31351index 699f0d8..f4f19250 100644
4c928ab7
MT
31352--- a/drivers/hv/hyperv_vmbus.h
31353+++ b/drivers/hv/hyperv_vmbus.h
c6e2a6c8 31354@@ -555,7 +555,7 @@ enum vmbus_connect_state {
4c928ab7
MT
31355 struct vmbus_connection {
31356 enum vmbus_connect_state conn_state;
31357
31358- atomic_t next_gpadl_handle;
31359+ atomic_unchecked_t next_gpadl_handle;
31360
31361 /*
31362 * Represents channel interrupts. Each bit position represents a
31363diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
5e856224 31364index a220e57..428f54d 100644
4c928ab7
MT
31365--- a/drivers/hv/vmbus_drv.c
31366+++ b/drivers/hv/vmbus_drv.c
31367@@ -663,10 +663,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
31368 {
31369 int ret = 0;
31370
31371- static atomic_t device_num = ATOMIC_INIT(0);
31372+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
31373
31374 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
31375- atomic_inc_return(&device_num));
31376+ atomic_inc_return_unchecked(&device_num));
31377
31378 child_device_obj->device.bus = &hv_bus;
31379 child_device_obj->device.parent = &hv_acpi_dev->dev;
fe2de317 31380diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
c6e2a6c8 31381index 9140236..ceaef4e 100644
fe2de317
MT
31382--- a/drivers/hwmon/acpi_power_meter.c
31383+++ b/drivers/hwmon/acpi_power_meter.c
31384@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
15a11c5b 31385 return res;
8308f9c9 31386
15a11c5b
MT
31387 temp /= 1000;
31388- if (temp < 0)
31389- return -EINVAL;
8308f9c9 31390
15a11c5b
MT
31391 mutex_lock(&resource->lock);
31392 resource->trip[attr->index - 7] = temp;
fe2de317 31393diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
c6e2a6c8 31394index 8b011d0..3de24a1 100644
fe2de317
MT
31395--- a/drivers/hwmon/sht15.c
31396+++ b/drivers/hwmon/sht15.c
15a11c5b 31397@@ -166,7 +166,7 @@ struct sht15_data {
8308f9c9 31398 int supply_uV;
15a11c5b 31399 bool supply_uV_valid;
8308f9c9
MT
31400 struct work_struct update_supply_work;
31401- atomic_t interrupt_handled;
31402+ atomic_unchecked_t interrupt_handled;
31403 };
31404
31405 /**
fe2de317 31406@@ -509,13 +509,13 @@ static int sht15_measurement(struct sht15_data *data,
8308f9c9
MT
31407 return ret;
31408
31409 gpio_direction_input(data->pdata->gpio_data);
31410- atomic_set(&data->interrupt_handled, 0);
31411+ atomic_set_unchecked(&data->interrupt_handled, 0);
31412
31413 enable_irq(gpio_to_irq(data->pdata->gpio_data));
31414 if (gpio_get_value(data->pdata->gpio_data) == 0) {
31415 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
66a7e928 31416 /* Only relevant if the interrupt hasn't occurred. */
8308f9c9
MT
31417- if (!atomic_read(&data->interrupt_handled))
31418+ if (!atomic_read_unchecked(&data->interrupt_handled))
31419 schedule_work(&data->read_work);
31420 }
31421 ret = wait_event_timeout(data->wait_queue,
fe2de317 31422@@ -782,7 +782,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
15a11c5b 31423
8308f9c9
MT
31424 /* First disable the interrupt */
31425 disable_irq_nosync(irq);
31426- atomic_inc(&data->interrupt_handled);
31427+ atomic_inc_unchecked(&data->interrupt_handled);
31428 /* Then schedule a reading work struct */
15a11c5b 31429 if (data->state != SHT15_READING_NOTHING)
8308f9c9 31430 schedule_work(&data->read_work);
fe2de317 31431@@ -804,11 +804,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
15a11c5b
MT
31432 * If not, then start the interrupt again - care here as could
31433 * have gone low in meantime so verify it hasn't!
31434 */
8308f9c9
MT
31435- atomic_set(&data->interrupt_handled, 0);
31436+ atomic_set_unchecked(&data->interrupt_handled, 0);
31437 enable_irq(gpio_to_irq(data->pdata->gpio_data));
c6e2a6c8 31438 /* If still not occurred or another handler was scheduled */
8308f9c9
MT
31439 if (gpio_get_value(data->pdata->gpio_data)
31440- || atomic_read(&data->interrupt_handled))
31441+ || atomic_read_unchecked(&data->interrupt_handled))
31442 return;
31443 }
15a11c5b 31444
fe2de317
MT
31445diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
31446index 378fcb5..5e91fa8 100644
31447--- a/drivers/i2c/busses/i2c-amd756-s4882.c
31448+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
15a11c5b
MT
31449@@ -43,7 +43,7 @@
31450 extern struct i2c_adapter amd756_smbus;
31451
31452 static struct i2c_adapter *s4882_adapter;
31453-static struct i2c_algorithm *s4882_algo;
31454+static i2c_algorithm_no_const *s4882_algo;
31455
31456 /* Wrapper access functions for multiplexed SMBus */
31457 static DEFINE_MUTEX(amd756_lock);
fe2de317
MT
31458diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
31459index 29015eb..af2d8e9 100644
31460--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
31461+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
15a11c5b
MT
31462@@ -41,7 +41,7 @@
31463 extern struct i2c_adapter *nforce2_smbus;
66a7e928 31464
15a11c5b
MT
31465 static struct i2c_adapter *s4985_adapter;
31466-static struct i2c_algorithm *s4985_algo;
31467+static i2c_algorithm_no_const *s4985_algo;
66a7e928 31468
15a11c5b
MT
31469 /* Wrapper access functions for multiplexed SMBus */
31470 static DEFINE_MUTEX(nforce2_lock);
fe2de317
MT
31471diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
31472index d7a4833..7fae376 100644
31473--- a/drivers/i2c/i2c-mux.c
31474+++ b/drivers/i2c/i2c-mux.c
15a11c5b
MT
31475@@ -28,7 +28,7 @@
31476 /* multiplexer per channel data */
31477 struct i2c_mux_priv {
31478 struct i2c_adapter adap;
31479- struct i2c_algorithm algo;
31480+ i2c_algorithm_no_const algo;
31481
31482 struct i2c_adapter *parent;
31483 void *mux_dev; /* the mux chip/device */
fe2de317
MT
31484diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
31485index 57d00ca..0145194 100644
31486--- a/drivers/ide/aec62xx.c
31487+++ b/drivers/ide/aec62xx.c
31488@@ -181,7 +181,7 @@ static const struct ide_port_ops atp86x_port_ops = {
6e9df6a3
MT
31489 .cable_detect = atp86x_cable_detect,
31490 };
31491
31492-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
31493+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
31494 { /* 0: AEC6210 */
31495 .name = DRV_NAME,
31496 .init_chipset = init_chipset_aec62xx,
fe2de317
MT
31497diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
31498index 2c8016a..911a27c 100644
31499--- a/drivers/ide/alim15x3.c
31500+++ b/drivers/ide/alim15x3.c
31501@@ -512,7 +512,7 @@ static const struct ide_dma_ops ali_dma_ops = {
6e9df6a3
MT
31502 .dma_sff_read_status = ide_dma_sff_read_status,
31503 };
31504
31505-static const struct ide_port_info ali15x3_chipset __devinitdata = {
31506+static const struct ide_port_info ali15x3_chipset __devinitconst = {
31507 .name = DRV_NAME,
31508 .init_chipset = init_chipset_ali15x3,
31509 .init_hwif = init_hwif_ali15x3,
fe2de317
MT
31510diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
31511index 3747b25..56fc995 100644
31512--- a/drivers/ide/amd74xx.c
31513+++ b/drivers/ide/amd74xx.c
31514@@ -223,7 +223,7 @@ static const struct ide_port_ops amd_port_ops = {
6e9df6a3
MT
31515 .udma_mask = udma, \
31516 }
31517
31518-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
31519+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
31520 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
31521 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
31522 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
fe2de317
MT
31523diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
31524index 15f0ead..cb43480 100644
31525--- a/drivers/ide/atiixp.c
31526+++ b/drivers/ide/atiixp.c
31527@@ -139,7 +139,7 @@ static const struct ide_port_ops atiixp_port_ops = {
6e9df6a3
MT
31528 .cable_detect = atiixp_cable_detect,
31529 };
31530
31531-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
31532+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
31533 { /* 0: IXP200/300/400/700 */
31534 .name = DRV_NAME,
31535 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
fe2de317
MT
31536diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
31537index 5f80312..d1fc438 100644
31538--- a/drivers/ide/cmd64x.c
31539+++ b/drivers/ide/cmd64x.c
31540@@ -327,7 +327,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
6e9df6a3
MT
31541 .dma_sff_read_status = ide_dma_sff_read_status,
31542 };
31543
31544-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
31545+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
31546 { /* 0: CMD643 */
31547 .name = DRV_NAME,
31548 .init_chipset = init_chipset_cmd64x,
fe2de317
MT
31549diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
31550index 2c1e5f7..1444762 100644
31551--- a/drivers/ide/cs5520.c
31552+++ b/drivers/ide/cs5520.c
31553@@ -94,7 +94,7 @@ static const struct ide_port_ops cs5520_port_ops = {
6e9df6a3
MT
31554 .set_dma_mode = cs5520_set_dma_mode,
31555 };
31556
31557-static const struct ide_port_info cyrix_chipset __devinitdata = {
31558+static const struct ide_port_info cyrix_chipset __devinitconst = {
31559 .name = DRV_NAME,
31560 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
31561 .port_ops = &cs5520_port_ops,
fe2de317
MT
31562diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
31563index 4dc4eb9..49b40ad 100644
31564--- a/drivers/ide/cs5530.c
31565+++ b/drivers/ide/cs5530.c
31566@@ -245,7 +245,7 @@ static const struct ide_port_ops cs5530_port_ops = {
6e9df6a3
MT
31567 .udma_filter = cs5530_udma_filter,
31568 };
31569
31570-static const struct ide_port_info cs5530_chipset __devinitdata = {
31571+static const struct ide_port_info cs5530_chipset __devinitconst = {
31572 .name = DRV_NAME,
31573 .init_chipset = init_chipset_cs5530,
31574 .init_hwif = init_hwif_cs5530,
fe2de317
MT
31575diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
31576index 5059faf..18d4c85 100644
31577--- a/drivers/ide/cs5535.c
31578+++ b/drivers/ide/cs5535.c
31579@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
6e9df6a3
MT
31580 .cable_detect = cs5535_cable_detect,
31581 };
31582
31583-static const struct ide_port_info cs5535_chipset __devinitdata = {
31584+static const struct ide_port_info cs5535_chipset __devinitconst = {
31585 .name = DRV_NAME,
31586 .port_ops = &cs5535_port_ops,
31587 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
fe2de317 31588diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
4c928ab7 31589index 847553f..3ffb49d 100644
fe2de317
MT
31590--- a/drivers/ide/cy82c693.c
31591+++ b/drivers/ide/cy82c693.c
31592@@ -163,7 +163,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
6e9df6a3
MT
31593 .set_dma_mode = cy82c693_set_dma_mode,
31594 };
31595
31596-static const struct ide_port_info cy82c693_chipset __devinitdata = {
31597+static const struct ide_port_info cy82c693_chipset __devinitconst = {
31598 .name = DRV_NAME,
31599 .init_iops = init_iops_cy82c693,
31600 .port_ops = &cy82c693_port_ops,
fe2de317
MT
31601diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
31602index 58c51cd..4aec3b8 100644
31603--- a/drivers/ide/hpt366.c
31604+++ b/drivers/ide/hpt366.c
31605@@ -443,7 +443,7 @@ static struct hpt_timings hpt37x_timings = {
6e9df6a3
MT
31606 }
31607 };
31608
31609-static const struct hpt_info hpt36x __devinitdata = {
31610+static const struct hpt_info hpt36x __devinitconst = {
31611 .chip_name = "HPT36x",
31612 .chip_type = HPT36x,
31613 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
fe2de317 31614@@ -451,7 +451,7 @@ static const struct hpt_info hpt36x __devinitdata = {
6e9df6a3
MT
31615 .timings = &hpt36x_timings
31616 };
31617
31618-static const struct hpt_info hpt370 __devinitdata = {
31619+static const struct hpt_info hpt370 __devinitconst = {
31620 .chip_name = "HPT370",
31621 .chip_type = HPT370,
31622 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
fe2de317 31623@@ -459,7 +459,7 @@ static const struct hpt_info hpt370 __devinitdata = {
6e9df6a3
MT
31624 .timings = &hpt37x_timings
31625 };
31626
31627-static const struct hpt_info hpt370a __devinitdata = {
31628+static const struct hpt_info hpt370a __devinitconst = {
31629 .chip_name = "HPT370A",
31630 .chip_type = HPT370A,
31631 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
fe2de317 31632@@ -467,7 +467,7 @@ static const struct hpt_info hpt370a __devinitdata = {
6e9df6a3
MT
31633 .timings = &hpt37x_timings
31634 };
31635
31636-static const struct hpt_info hpt374 __devinitdata = {
31637+static const struct hpt_info hpt374 __devinitconst = {
31638 .chip_name = "HPT374",
31639 .chip_type = HPT374,
31640 .udma_mask = ATA_UDMA5,
fe2de317 31641@@ -475,7 +475,7 @@ static const struct hpt_info hpt374 __devinitdata = {
6e9df6a3
MT
31642 .timings = &hpt37x_timings
31643 };
31644
31645-static const struct hpt_info hpt372 __devinitdata = {
31646+static const struct hpt_info hpt372 __devinitconst = {
31647 .chip_name = "HPT372",
31648 .chip_type = HPT372,
31649 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 31650@@ -483,7 +483,7 @@ static const struct hpt_info hpt372 __devinitdata = {
6e9df6a3
MT
31651 .timings = &hpt37x_timings
31652 };
31653
31654-static const struct hpt_info hpt372a __devinitdata = {
31655+static const struct hpt_info hpt372a __devinitconst = {
31656 .chip_name = "HPT372A",
31657 .chip_type = HPT372A,
31658 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 31659@@ -491,7 +491,7 @@ static const struct hpt_info hpt372a __devinitdata = {
6e9df6a3
MT
31660 .timings = &hpt37x_timings
31661 };
31662
31663-static const struct hpt_info hpt302 __devinitdata = {
31664+static const struct hpt_info hpt302 __devinitconst = {
31665 .chip_name = "HPT302",
31666 .chip_type = HPT302,
31667 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 31668@@ -499,7 +499,7 @@ static const struct hpt_info hpt302 __devinitdata = {
6e9df6a3
MT
31669 .timings = &hpt37x_timings
31670 };
31671
31672-static const struct hpt_info hpt371 __devinitdata = {
31673+static const struct hpt_info hpt371 __devinitconst = {
31674 .chip_name = "HPT371",
31675 .chip_type = HPT371,
31676 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 31677@@ -507,7 +507,7 @@ static const struct hpt_info hpt371 __devinitdata = {
6e9df6a3
MT
31678 .timings = &hpt37x_timings
31679 };
31680
31681-static const struct hpt_info hpt372n __devinitdata = {
31682+static const struct hpt_info hpt372n __devinitconst = {
31683 .chip_name = "HPT372N",
31684 .chip_type = HPT372N,
31685 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 31686@@ -515,7 +515,7 @@ static const struct hpt_info hpt372n __devinitdata = {
6e9df6a3
MT
31687 .timings = &hpt37x_timings
31688 };
31689
31690-static const struct hpt_info hpt302n __devinitdata = {
31691+static const struct hpt_info hpt302n __devinitconst = {
31692 .chip_name = "HPT302N",
31693 .chip_type = HPT302N,
31694 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 31695@@ -523,7 +523,7 @@ static const struct hpt_info hpt302n __devinitdata = {
6e9df6a3
MT
31696 .timings = &hpt37x_timings
31697 };
31698
31699-static const struct hpt_info hpt371n __devinitdata = {
31700+static const struct hpt_info hpt371n __devinitconst = {
31701 .chip_name = "HPT371N",
31702 .chip_type = HPT371N,
31703 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
fe2de317 31704@@ -1361,7 +1361,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
6e9df6a3
MT
31705 .dma_sff_read_status = ide_dma_sff_read_status,
31706 };
31707
31708-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
31709+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
31710 { /* 0: HPT36x */
31711 .name = DRV_NAME,
31712 .init_chipset = init_chipset_hpt366,
fe2de317 31713diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
4c928ab7 31714index 8126824..55a2798 100644
fe2de317
MT
31715--- a/drivers/ide/ide-cd.c
31716+++ b/drivers/ide/ide-cd.c
4c928ab7 31717@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
ae4e228f
MT
31718 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
31719 if ((unsigned long)buf & alignment
31720 || blk_rq_bytes(rq) & q->dma_pad_mask
31721- || object_is_on_stack(buf))
31722+ || object_starts_on_stack(buf))
31723 drive->dma = 0;
31724 }
31725 }
fe2de317 31726diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
5e856224 31727index 7f56b73..dab5b67 100644
fe2de317
MT
31728--- a/drivers/ide/ide-pci-generic.c
31729+++ b/drivers/ide/ide-pci-generic.c
31730@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
6e9df6a3
MT
31731 .udma_mask = ATA_UDMA6, \
31732 }
31733
31734-static const struct ide_port_info generic_chipsets[] __devinitdata = {
31735+static const struct ide_port_info generic_chipsets[] __devinitconst = {
31736 /* 0: Unknown */
31737 DECLARE_GENERIC_PCI_DEV(0),
31738
fe2de317
MT
31739diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
31740index 560e66d..d5dd180 100644
31741--- a/drivers/ide/it8172.c
31742+++ b/drivers/ide/it8172.c
31743@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
6e9df6a3
MT
31744 .set_dma_mode = it8172_set_dma_mode,
31745 };
31746
31747-static const struct ide_port_info it8172_port_info __devinitdata = {
31748+static const struct ide_port_info it8172_port_info __devinitconst = {
31749 .name = DRV_NAME,
31750 .port_ops = &it8172_port_ops,
31751 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
fe2de317
MT
31752diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
31753index 46816ba..1847aeb 100644
31754--- a/drivers/ide/it8213.c
31755+++ b/drivers/ide/it8213.c
31756@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
6e9df6a3
MT
31757 .cable_detect = it8213_cable_detect,
31758 };
31759
31760-static const struct ide_port_info it8213_chipset __devinitdata = {
31761+static const struct ide_port_info it8213_chipset __devinitconst = {
31762 .name = DRV_NAME,
31763 .enablebits = { {0x41, 0x80, 0x80} },
31764 .port_ops = &it8213_port_ops,
fe2de317
MT
31765diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
31766index 2e3169f..c5611db 100644
31767--- a/drivers/ide/it821x.c
31768+++ b/drivers/ide/it821x.c
31769@@ -630,7 +630,7 @@ static const struct ide_port_ops it821x_port_ops = {
6e9df6a3
MT
31770 .cable_detect = it821x_cable_detect,
31771 };
31772
31773-static const struct ide_port_info it821x_chipset __devinitdata = {
31774+static const struct ide_port_info it821x_chipset __devinitconst = {
31775 .name = DRV_NAME,
31776 .init_chipset = init_chipset_it821x,
31777 .init_hwif = init_hwif_it821x,
fe2de317
MT
31778diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
31779index 74c2c4a..efddd7d 100644
31780--- a/drivers/ide/jmicron.c
31781+++ b/drivers/ide/jmicron.c
31782@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
6e9df6a3
MT
31783 .cable_detect = jmicron_cable_detect,
31784 };
31785
31786-static const struct ide_port_info jmicron_chipset __devinitdata = {
31787+static const struct ide_port_info jmicron_chipset __devinitconst = {
31788 .name = DRV_NAME,
31789 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
31790 .port_ops = &jmicron_port_ops,
fe2de317
MT
31791diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
31792index 95327a2..73f78d8 100644
31793--- a/drivers/ide/ns87415.c
31794+++ b/drivers/ide/ns87415.c
31795@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
6e9df6a3
MT
31796 .dma_sff_read_status = superio_dma_sff_read_status,
31797 };
31798
31799-static const struct ide_port_info ns87415_chipset __devinitdata = {
31800+static const struct ide_port_info ns87415_chipset __devinitconst = {
31801 .name = DRV_NAME,
31802 .init_hwif = init_hwif_ns87415,
31803 .tp_ops = &ns87415_tp_ops,
fe2de317
MT
31804diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
31805index 1a53a4c..39edc66 100644
31806--- a/drivers/ide/opti621.c
31807+++ b/drivers/ide/opti621.c
31808@@ -131,7 +131,7 @@ static const struct ide_port_ops opti621_port_ops = {
6e9df6a3
MT
31809 .set_pio_mode = opti621_set_pio_mode,
31810 };
31811
31812-static const struct ide_port_info opti621_chipset __devinitdata = {
31813+static const struct ide_port_info opti621_chipset __devinitconst = {
31814 .name = DRV_NAME,
31815 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
31816 .port_ops = &opti621_port_ops,
fe2de317
MT
31817diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
31818index 9546fe2..2e5ceb6 100644
31819--- a/drivers/ide/pdc202xx_new.c
31820+++ b/drivers/ide/pdc202xx_new.c
31821@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
6e9df6a3
MT
31822 .udma_mask = udma, \
31823 }
31824
31825-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
31826+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
31827 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
31828 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
31829 };
fe2de317
MT
31830diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
31831index 3a35ec6..5634510 100644
31832--- a/drivers/ide/pdc202xx_old.c
31833+++ b/drivers/ide/pdc202xx_old.c
31834@@ -270,7 +270,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
6e9df6a3
MT
31835 .max_sectors = sectors, \
31836 }
31837
31838-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
31839+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
31840 { /* 0: PDC20246 */
31841 .name = DRV_NAME,
31842 .init_chipset = init_chipset_pdc202xx,
fe2de317 31843diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
4c928ab7 31844index 1892e81..fe0fd60 100644
fe2de317
MT
31845--- a/drivers/ide/piix.c
31846+++ b/drivers/ide/piix.c
31847@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
6e9df6a3
MT
31848 .udma_mask = udma, \
31849 }
31850
31851-static const struct ide_port_info piix_pci_info[] __devinitdata = {
31852+static const struct ide_port_info piix_pci_info[] __devinitconst = {
31853 /* 0: MPIIX */
31854 { /*
31855 * MPIIX actually has only a single IDE channel mapped to
fe2de317
MT
31856diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
31857index a6414a8..c04173e 100644
31858--- a/drivers/ide/rz1000.c
31859+++ b/drivers/ide/rz1000.c
31860@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
6e9df6a3
MT
31861 }
31862 }
31863
31864-static const struct ide_port_info rz1000_chipset __devinitdata = {
31865+static const struct ide_port_info rz1000_chipset __devinitconst = {
31866 .name = DRV_NAME,
31867 .host_flags = IDE_HFLAG_NO_DMA,
31868 };
fe2de317
MT
31869diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
31870index 356b9b5..d4758eb 100644
31871--- a/drivers/ide/sc1200.c
31872+++ b/drivers/ide/sc1200.c
31873@@ -291,7 +291,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
6e9df6a3
MT
31874 .dma_sff_read_status = ide_dma_sff_read_status,
31875 };
31876
31877-static const struct ide_port_info sc1200_chipset __devinitdata = {
31878+static const struct ide_port_info sc1200_chipset __devinitconst = {
31879 .name = DRV_NAME,
31880 .port_ops = &sc1200_port_ops,
31881 .dma_ops = &sc1200_dma_ops,
fe2de317
MT
31882diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
31883index b7f5b0c..9701038 100644
31884--- a/drivers/ide/scc_pata.c
31885+++ b/drivers/ide/scc_pata.c
31886@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
6e9df6a3
MT
31887 .dma_sff_read_status = scc_dma_sff_read_status,
31888 };
31889
31890-static const struct ide_port_info scc_chipset __devinitdata = {
31891+static const struct ide_port_info scc_chipset __devinitconst = {
31892 .name = "sccIDE",
31893 .init_iops = init_iops_scc,
31894 .init_dma = scc_init_dma,
fe2de317
MT
31895diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
31896index 35fb8da..24d72ef 100644
31897--- a/drivers/ide/serverworks.c
31898+++ b/drivers/ide/serverworks.c
31899@@ -337,7 +337,7 @@ static const struct ide_port_ops svwks_port_ops = {
6e9df6a3
MT
31900 .cable_detect = svwks_cable_detect,
31901 };
31902
31903-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
31904+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
31905 { /* 0: OSB4 */
31906 .name = DRV_NAME,
31907 .init_chipset = init_chipset_svwks,
fe2de317
MT
31908diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
31909index ddeda44..46f7e30 100644
31910--- a/drivers/ide/siimage.c
31911+++ b/drivers/ide/siimage.c
31912@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
6e9df6a3
MT
31913 .udma_mask = ATA_UDMA6, \
31914 }
31915
31916-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
31917+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
31918 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
31919 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
31920 };
fe2de317
MT
31921diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
31922index 4a00225..09e61b4 100644
31923--- a/drivers/ide/sis5513.c
31924+++ b/drivers/ide/sis5513.c
31925@@ -563,7 +563,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
6e9df6a3
MT
31926 .cable_detect = sis_cable_detect,
31927 };
31928
31929-static const struct ide_port_info sis5513_chipset __devinitdata = {
31930+static const struct ide_port_info sis5513_chipset __devinitconst = {
31931 .name = DRV_NAME,
31932 .init_chipset = init_chipset_sis5513,
31933 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
fe2de317
MT
31934diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
31935index f21dc2a..d051cd2 100644
31936--- a/drivers/ide/sl82c105.c
31937+++ b/drivers/ide/sl82c105.c
31938@@ -299,7 +299,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
6e9df6a3
MT
31939 .dma_sff_read_status = ide_dma_sff_read_status,
31940 };
31941
31942-static const struct ide_port_info sl82c105_chipset __devinitdata = {
31943+static const struct ide_port_info sl82c105_chipset __devinitconst = {
31944 .name = DRV_NAME,
31945 .init_chipset = init_chipset_sl82c105,
31946 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
fe2de317
MT
31947diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
31948index 864ffe0..863a5e9 100644
31949--- a/drivers/ide/slc90e66.c
31950+++ b/drivers/ide/slc90e66.c
31951@@ -132,7 +132,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
6e9df6a3
MT
31952 .cable_detect = slc90e66_cable_detect,
31953 };
31954
31955-static const struct ide_port_info slc90e66_chipset __devinitdata = {
31956+static const struct ide_port_info slc90e66_chipset __devinitconst = {
31957 .name = DRV_NAME,
31958 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
31959 .port_ops = &slc90e66_port_ops,
fe2de317 31960diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
4c928ab7 31961index 4799d5c..1794678 100644
fe2de317
MT
31962--- a/drivers/ide/tc86c001.c
31963+++ b/drivers/ide/tc86c001.c
4c928ab7 31964@@ -192,7 +192,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
6e9df6a3
MT
31965 .dma_sff_read_status = ide_dma_sff_read_status,
31966 };
31967
31968-static const struct ide_port_info tc86c001_chipset __devinitdata = {
31969+static const struct ide_port_info tc86c001_chipset __devinitconst = {
31970 .name = DRV_NAME,
31971 .init_hwif = init_hwif_tc86c001,
31972 .port_ops = &tc86c001_port_ops,
fe2de317 31973diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
4c928ab7 31974index 281c914..55ce1b8 100644
fe2de317
MT
31975--- a/drivers/ide/triflex.c
31976+++ b/drivers/ide/triflex.c
31977@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
6e9df6a3
MT
31978 .set_dma_mode = triflex_set_mode,
31979 };
31980
31981-static const struct ide_port_info triflex_device __devinitdata = {
31982+static const struct ide_port_info triflex_device __devinitconst = {
31983 .name = DRV_NAME,
31984 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
31985 .port_ops = &triflex_port_ops,
fe2de317
MT
31986diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
31987index 4b42ca0..e494a98 100644
31988--- a/drivers/ide/trm290.c
31989+++ b/drivers/ide/trm290.c
31990@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
6e9df6a3
MT
31991 .dma_check = trm290_dma_check,
31992 };
31993
31994-static const struct ide_port_info trm290_chipset __devinitdata = {
31995+static const struct ide_port_info trm290_chipset __devinitconst = {
31996 .name = DRV_NAME,
31997 .init_hwif = init_hwif_trm290,
31998 .tp_ops = &trm290_tp_ops,
fe2de317
MT
31999diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
32000index f46f49c..eb77678 100644
32001--- a/drivers/ide/via82cxxx.c
32002+++ b/drivers/ide/via82cxxx.c
32003@@ -403,7 +403,7 @@ static const struct ide_port_ops via_port_ops = {
6e9df6a3
MT
32004 .cable_detect = via82cxxx_cable_detect,
32005 };
32006
32007-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
32008+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
32009 .name = DRV_NAME,
32010 .init_chipset = init_chipset_via82cxxx,
32011 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
4c928ab7 32012diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
5e856224 32013index 73d4531..c90cd2d 100644
4c928ab7
MT
32014--- a/drivers/ieee802154/fakehard.c
32015+++ b/drivers/ieee802154/fakehard.c
32016@@ -386,7 +386,7 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
32017 phy->transmit_power = 0xbf;
32018
32019 dev->netdev_ops = &fake_ops;
32020- dev->ml_priv = &fake_mlme;
32021+ dev->ml_priv = (void *)&fake_mlme;
32022
32023 priv = netdev_priv(dev);
32024 priv->phy = phy;
fe2de317 32025diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
5e856224 32026index c889aae..6cf5aa7 100644
fe2de317
MT
32027--- a/drivers/infiniband/core/cm.c
32028+++ b/drivers/infiniband/core/cm.c
4c928ab7 32029@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
ae4e228f
MT
32030
32031 struct cm_counter_group {
32032 struct kobject obj;
32033- atomic_long_t counter[CM_ATTR_COUNT];
32034+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
32035 };
32036
32037 struct cm_counter_attribute {
4c928ab7 32038@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
ae4e228f
MT
32039 struct ib_mad_send_buf *msg = NULL;
32040 int ret;
32041
32042- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32043+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32044 counter[CM_REQ_COUNTER]);
32045
32046 /* Quick state check to discard duplicate REQs. */
4c928ab7 32047@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
ae4e228f
MT
32048 if (!cm_id_priv)
32049 return;
58c5fc13 32050
ae4e228f
MT
32051- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32052+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32053 counter[CM_REP_COUNTER]);
32054 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
32055 if (ret)
4c928ab7 32056@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
ae4e228f
MT
32057 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
32058 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
32059 spin_unlock_irq(&cm_id_priv->lock);
32060- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32061+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32062 counter[CM_RTU_COUNTER]);
32063 goto out;
32064 }
4c928ab7 32065@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
ae4e228f
MT
32066 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
32067 dreq_msg->local_comm_id);
32068 if (!cm_id_priv) {
32069- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32070+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32071 counter[CM_DREQ_COUNTER]);
32072 cm_issue_drep(work->port, work->mad_recv_wc);
32073 return -EINVAL;
4c928ab7 32074@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
ae4e228f
MT
32075 case IB_CM_MRA_REP_RCVD:
32076 break;
32077 case IB_CM_TIMEWAIT:
32078- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32079+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32080 counter[CM_DREQ_COUNTER]);
32081 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32082 goto unlock;
4c928ab7 32083@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
ae4e228f
MT
32084 cm_free_msg(msg);
32085 goto deref;
32086 case IB_CM_DREQ_RCVD:
32087- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32088+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32089 counter[CM_DREQ_COUNTER]);
32090 goto unlock;
32091 default:
4c928ab7 32092@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
ae4e228f
MT
32093 ib_modify_mad(cm_id_priv->av.port->mad_agent,
32094 cm_id_priv->msg, timeout)) {
32095 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
32096- atomic_long_inc(&work->port->
32097+ atomic_long_inc_unchecked(&work->port->
32098 counter_group[CM_RECV_DUPLICATES].
32099 counter[CM_MRA_COUNTER]);
32100 goto out;
4c928ab7 32101@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
ae4e228f
MT
32102 break;
32103 case IB_CM_MRA_REQ_RCVD:
32104 case IB_CM_MRA_REP_RCVD:
32105- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32106+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32107 counter[CM_MRA_COUNTER]);
32108 /* fall through */
32109 default:
4c928ab7 32110@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
ae4e228f
MT
32111 case IB_CM_LAP_IDLE:
32112 break;
32113 case IB_CM_MRA_LAP_SENT:
32114- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32115+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32116 counter[CM_LAP_COUNTER]);
32117 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
32118 goto unlock;
4c928ab7 32119@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
ae4e228f
MT
32120 cm_free_msg(msg);
32121 goto deref;
32122 case IB_CM_LAP_RCVD:
32123- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32124+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32125 counter[CM_LAP_COUNTER]);
32126 goto unlock;
32127 default:
4c928ab7 32128@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
ae4e228f
MT
32129 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
32130 if (cur_cm_id_priv) {
32131 spin_unlock_irq(&cm.lock);
32132- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
32133+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
32134 counter[CM_SIDR_REQ_COUNTER]);
32135 goto out; /* Duplicate message. */
32136 }
4c928ab7 32137@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
ae4e228f
MT
32138 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
32139 msg->retries = 1;
32140
32141- atomic_long_add(1 + msg->retries,
32142+ atomic_long_add_unchecked(1 + msg->retries,
32143 &port->counter_group[CM_XMIT].counter[attr_index]);
32144 if (msg->retries)
32145- atomic_long_add(msg->retries,
32146+ atomic_long_add_unchecked(msg->retries,
32147 &port->counter_group[CM_XMIT_RETRIES].
32148 counter[attr_index]);
32149
4c928ab7 32150@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
ae4e228f
MT
32151 }
32152
32153 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
32154- atomic_long_inc(&port->counter_group[CM_RECV].
32155+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
32156 counter[attr_id - CM_ATTR_ID_OFFSET]);
32157
32158 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
4c928ab7 32159@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
ae4e228f
MT
32160 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
32161
32162 return sprintf(buf, "%ld\n",
32163- atomic_long_read(&group->counter[cm_attr->index]));
32164+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
32165 }
32166
df50ba0c 32167 static const struct sysfs_ops cm_counter_ops = {
fe2de317 32168diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
4c928ab7 32169index 176c8f9..2627b62 100644
fe2de317
MT
32170--- a/drivers/infiniband/core/fmr_pool.c
32171+++ b/drivers/infiniband/core/fmr_pool.c
4c928ab7 32172@@ -98,8 +98,8 @@ struct ib_fmr_pool {
8308f9c9
MT
32173
32174 struct task_struct *thread;
32175
32176- atomic_t req_ser;
32177- atomic_t flush_ser;
32178+ atomic_unchecked_t req_ser;
32179+ atomic_unchecked_t flush_ser;
32180
32181 wait_queue_head_t force_wait;
32182 };
4c928ab7 32183@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
8308f9c9
MT
32184 struct ib_fmr_pool *pool = pool_ptr;
32185
32186 do {
32187- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
32188+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
32189 ib_fmr_batch_release(pool);
32190
32191- atomic_inc(&pool->flush_ser);
32192+ atomic_inc_unchecked(&pool->flush_ser);
32193 wake_up_interruptible(&pool->force_wait);
32194
32195 if (pool->flush_function)
4c928ab7 32196@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
8308f9c9
MT
32197 }
32198
32199 set_current_state(TASK_INTERRUPTIBLE);
32200- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
32201+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
32202 !kthread_should_stop())
32203 schedule();
32204 __set_current_state(TASK_RUNNING);
4c928ab7 32205@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
8308f9c9
MT
32206 pool->dirty_watermark = params->dirty_watermark;
32207 pool->dirty_len = 0;
32208 spin_lock_init(&pool->pool_lock);
32209- atomic_set(&pool->req_ser, 0);
32210- atomic_set(&pool->flush_ser, 0);
32211+ atomic_set_unchecked(&pool->req_ser, 0);
32212+ atomic_set_unchecked(&pool->flush_ser, 0);
32213 init_waitqueue_head(&pool->force_wait);
32214
32215 pool->thread = kthread_run(ib_fmr_cleanup_thread,
4c928ab7 32216@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
8308f9c9
MT
32217 }
32218 spin_unlock_irq(&pool->pool_lock);
32219
32220- serial = atomic_inc_return(&pool->req_ser);
32221+ serial = atomic_inc_return_unchecked(&pool->req_ser);
32222 wake_up_process(pool->thread);
32223
32224 if (wait_event_interruptible(pool->force_wait,
32225- atomic_read(&pool->flush_ser) - serial >= 0))
32226+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
32227 return -EINTR;
32228
32229 return 0;
4c928ab7 32230@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
8308f9c9
MT
32231 } else {
32232 list_add_tail(&fmr->list, &pool->dirty_list);
32233 if (++pool->dirty_len >= pool->dirty_watermark) {
32234- atomic_inc(&pool->req_ser);
32235+ atomic_inc_unchecked(&pool->req_ser);
32236 wake_up_process(pool->thread);
32237 }
32238 }
fe2de317
MT
32239diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
32240index 40c8353..946b0e4 100644
32241--- a/drivers/infiniband/hw/cxgb4/mem.c
32242+++ b/drivers/infiniband/hw/cxgb4/mem.c
32243@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
8308f9c9
MT
32244 int err;
32245 struct fw_ri_tpte tpt;
32246 u32 stag_idx;
32247- static atomic_t key;
32248+ static atomic_unchecked_t key;
32249
32250 if (c4iw_fatal_error(rdev))
32251 return -EIO;
fe2de317 32252@@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
8308f9c9
MT
32253 &rdev->resource.tpt_fifo_lock);
32254 if (!stag_idx)
32255 return -ENOMEM;
32256- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
32257+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
32258 }
32259 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
32260 __func__, stag_state, type, pdid, stag_idx);
fe2de317
MT
32261diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
32262index 79b3dbc..96e5fcc 100644
32263--- a/drivers/infiniband/hw/ipath/ipath_rc.c
32264+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
32265@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
66a7e928
MT
32266 struct ib_atomic_eth *ateth;
32267 struct ipath_ack_entry *e;
32268 u64 vaddr;
32269- atomic64_t *maddr;
32270+ atomic64_unchecked_t *maddr;
32271 u64 sdata;
32272 u32 rkey;
32273 u8 next;
fe2de317 32274@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
66a7e928
MT
32275 IB_ACCESS_REMOTE_ATOMIC)))
32276 goto nack_acc_unlck;
32277 /* Perform atomic OP and save result. */
32278- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32279+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32280 sdata = be64_to_cpu(ateth->swap_data);
32281 e = &qp->s_ack_queue[qp->r_head_ack_queue];
32282 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
32283- (u64) atomic64_add_return(sdata, maddr) - sdata :
32284+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32285 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32286 be64_to_cpu(ateth->compare_data),
32287 sdata);
fe2de317
MT
32288diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
32289index 1f95bba..9530f87 100644
32290--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
32291+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
32292@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
66a7e928
MT
32293 unsigned long flags;
32294 struct ib_wc wc;
32295 u64 sdata;
32296- atomic64_t *maddr;
32297+ atomic64_unchecked_t *maddr;
32298 enum ib_wc_status send_status;
32299
32300 /*
32301@@ -382,11 +382,11 @@ again:
32302 IB_ACCESS_REMOTE_ATOMIC)))
32303 goto acc_err;
32304 /* Perform atomic OP and save result. */
32305- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
32306+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
32307 sdata = wqe->wr.wr.atomic.compare_add;
32308 *(u64 *) sqp->s_sge.sge.vaddr =
32309 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
32310- (u64) atomic64_add_return(sdata, maddr) - sdata :
32311+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
32312 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
32313 sdata, wqe->wr.wr.atomic.swap);
32314 goto send_comp;
fe2de317 32315diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
5e856224 32316index 7140199..da60063 100644
fe2de317
MT
32317--- a/drivers/infiniband/hw/nes/nes.c
32318+++ b/drivers/infiniband/hw/nes/nes.c
32319@@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
8308f9c9
MT
32320 LIST_HEAD(nes_adapter_list);
32321 static LIST_HEAD(nes_dev_list);
32322
32323-atomic_t qps_destroyed;
32324+atomic_unchecked_t qps_destroyed;
32325
32326 static unsigned int ee_flsh_adapter;
32327 static unsigned int sysfs_nonidx_addr;
4c928ab7 32328@@ -272,7 +272,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
8308f9c9
MT
32329 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
32330 struct nes_adapter *nesadapter = nesdev->nesadapter;
32331
32332- atomic_inc(&qps_destroyed);
32333+ atomic_inc_unchecked(&qps_destroyed);
32334
32335 /* Free the control structures */
32336
fe2de317 32337diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
5e856224 32338index c438e46..ca30356 100644
fe2de317
MT
32339--- a/drivers/infiniband/hw/nes/nes.h
32340+++ b/drivers/infiniband/hw/nes/nes.h
4c928ab7 32341@@ -178,17 +178,17 @@ extern unsigned int nes_debug_level;
fe2de317
MT
32342 extern unsigned int wqm_quanta;
32343 extern struct list_head nes_adapter_list;
32344
32345-extern atomic_t cm_connects;
32346-extern atomic_t cm_accepts;
32347-extern atomic_t cm_disconnects;
32348-extern atomic_t cm_closes;
32349-extern atomic_t cm_connecteds;
32350-extern atomic_t cm_connect_reqs;
32351-extern atomic_t cm_rejects;
32352-extern atomic_t mod_qp_timouts;
32353-extern atomic_t qps_created;
32354-extern atomic_t qps_destroyed;
32355-extern atomic_t sw_qps_destroyed;
32356+extern atomic_unchecked_t cm_connects;
32357+extern atomic_unchecked_t cm_accepts;
32358+extern atomic_unchecked_t cm_disconnects;
32359+extern atomic_unchecked_t cm_closes;
32360+extern atomic_unchecked_t cm_connecteds;
32361+extern atomic_unchecked_t cm_connect_reqs;
32362+extern atomic_unchecked_t cm_rejects;
32363+extern atomic_unchecked_t mod_qp_timouts;
32364+extern atomic_unchecked_t qps_created;
32365+extern atomic_unchecked_t qps_destroyed;
32366+extern atomic_unchecked_t sw_qps_destroyed;
32367 extern u32 mh_detected;
32368 extern u32 mh_pauses_sent;
32369 extern u32 cm_packets_sent;
4c928ab7 32370@@ -197,16 +197,16 @@ extern u32 cm_packets_created;
fe2de317
MT
32371 extern u32 cm_packets_received;
32372 extern u32 cm_packets_dropped;
32373 extern u32 cm_packets_retrans;
32374-extern atomic_t cm_listens_created;
32375-extern atomic_t cm_listens_destroyed;
32376+extern atomic_unchecked_t cm_listens_created;
32377+extern atomic_unchecked_t cm_listens_destroyed;
32378 extern u32 cm_backlog_drops;
32379-extern atomic_t cm_loopbacks;
32380-extern atomic_t cm_nodes_created;
32381-extern atomic_t cm_nodes_destroyed;
32382-extern atomic_t cm_accel_dropped_pkts;
32383-extern atomic_t cm_resets_recvd;
4c928ab7
MT
32384-extern atomic_t pau_qps_created;
32385-extern atomic_t pau_qps_destroyed;
fe2de317
MT
32386+extern atomic_unchecked_t cm_loopbacks;
32387+extern atomic_unchecked_t cm_nodes_created;
32388+extern atomic_unchecked_t cm_nodes_destroyed;
32389+extern atomic_unchecked_t cm_accel_dropped_pkts;
32390+extern atomic_unchecked_t cm_resets_recvd;
4c928ab7
MT
32391+extern atomic_unchecked_t pau_qps_created;
32392+extern atomic_unchecked_t pau_qps_destroyed;
fe2de317
MT
32393
32394 extern u32 int_mod_timer_init;
32395 extern u32 int_mod_cq_depth_256;
32396diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
c6e2a6c8 32397index 71edfbb..15b62ae 100644
fe2de317
MT
32398--- a/drivers/infiniband/hw/nes/nes_cm.c
32399+++ b/drivers/infiniband/hw/nes/nes_cm.c
8308f9c9
MT
32400@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
32401 u32 cm_packets_retrans;
32402 u32 cm_packets_created;
32403 u32 cm_packets_received;
32404-atomic_t cm_listens_created;
32405-atomic_t cm_listens_destroyed;
32406+atomic_unchecked_t cm_listens_created;
32407+atomic_unchecked_t cm_listens_destroyed;
32408 u32 cm_backlog_drops;
32409-atomic_t cm_loopbacks;
32410-atomic_t cm_nodes_created;
32411-atomic_t cm_nodes_destroyed;
32412-atomic_t cm_accel_dropped_pkts;
32413-atomic_t cm_resets_recvd;
32414+atomic_unchecked_t cm_loopbacks;
32415+atomic_unchecked_t cm_nodes_created;
32416+atomic_unchecked_t cm_nodes_destroyed;
32417+atomic_unchecked_t cm_accel_dropped_pkts;
32418+atomic_unchecked_t cm_resets_recvd;
32419
4c928ab7
MT
32420 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
32421 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
32422@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
8308f9c9
MT
32423
32424 static struct nes_cm_core *g_cm_core;
32425
32426-atomic_t cm_connects;
32427-atomic_t cm_accepts;
32428-atomic_t cm_disconnects;
32429-atomic_t cm_closes;
32430-atomic_t cm_connecteds;
32431-atomic_t cm_connect_reqs;
32432-atomic_t cm_rejects;
32433+atomic_unchecked_t cm_connects;
32434+atomic_unchecked_t cm_accepts;
32435+atomic_unchecked_t cm_disconnects;
32436+atomic_unchecked_t cm_closes;
32437+atomic_unchecked_t cm_connecteds;
32438+atomic_unchecked_t cm_connect_reqs;
32439+atomic_unchecked_t cm_rejects;
32440
4c928ab7
MT
32441 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
32442 {
c6e2a6c8 32443@@ -1279,7 +1279,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
8308f9c9
MT
32444 kfree(listener);
32445 listener = NULL;
32446 ret = 0;
32447- atomic_inc(&cm_listens_destroyed);
32448+ atomic_inc_unchecked(&cm_listens_destroyed);
32449 } else {
32450 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
32451 }
c6e2a6c8 32452@@ -1482,7 +1482,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
8308f9c9
MT
32453 cm_node->rem_mac);
32454
32455 add_hte_node(cm_core, cm_node);
32456- atomic_inc(&cm_nodes_created);
32457+ atomic_inc_unchecked(&cm_nodes_created);
32458
32459 return cm_node;
32460 }
c6e2a6c8 32461@@ -1540,7 +1540,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
8308f9c9
MT
32462 }
32463
32464 atomic_dec(&cm_core->node_cnt);
32465- atomic_inc(&cm_nodes_destroyed);
32466+ atomic_inc_unchecked(&cm_nodes_destroyed);
32467 nesqp = cm_node->nesqp;
32468 if (nesqp) {
32469 nesqp->cm_node = NULL;
c6e2a6c8 32470@@ -1604,7 +1604,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
8308f9c9
MT
32471
32472 static void drop_packet(struct sk_buff *skb)
32473 {
32474- atomic_inc(&cm_accel_dropped_pkts);
32475+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32476 dev_kfree_skb_any(skb);
32477 }
32478
c6e2a6c8 32479@@ -1667,7 +1667,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
8308f9c9
MT
32480 {
32481
32482 int reset = 0; /* whether to send reset in case of err.. */
32483- atomic_inc(&cm_resets_recvd);
32484+ atomic_inc_unchecked(&cm_resets_recvd);
32485 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
32486 " refcnt=%d\n", cm_node, cm_node->state,
32487 atomic_read(&cm_node->ref_count));
c6e2a6c8 32488@@ -2308,7 +2308,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
8308f9c9
MT
32489 rem_ref_cm_node(cm_node->cm_core, cm_node);
32490 return NULL;
32491 }
32492- atomic_inc(&cm_loopbacks);
32493+ atomic_inc_unchecked(&cm_loopbacks);
32494 loopbackremotenode->loopbackpartner = cm_node;
32495 loopbackremotenode->tcp_cntxt.rcv_wscale =
32496 NES_CM_DEFAULT_RCV_WND_SCALE;
c6e2a6c8 32497@@ -2583,7 +2583,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
4c928ab7
MT
32498 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
32499 else {
32500 rem_ref_cm_node(cm_core, cm_node);
32501- atomic_inc(&cm_accel_dropped_pkts);
32502+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
32503 dev_kfree_skb_any(skb);
32504 }
8308f9c9 32505 break;
c6e2a6c8 32506@@ -2890,7 +2890,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
8308f9c9
MT
32507
32508 if ((cm_id) && (cm_id->event_handler)) {
32509 if (issue_disconn) {
32510- atomic_inc(&cm_disconnects);
32511+ atomic_inc_unchecked(&cm_disconnects);
32512 cm_event.event = IW_CM_EVENT_DISCONNECT;
32513 cm_event.status = disconn_status;
32514 cm_event.local_addr = cm_id->local_addr;
c6e2a6c8 32515@@ -2912,7 +2912,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
8308f9c9
MT
32516 }
32517
32518 if (issue_close) {
32519- atomic_inc(&cm_closes);
32520+ atomic_inc_unchecked(&cm_closes);
32521 nes_disconnect(nesqp, 1);
32522
32523 cm_id->provider_data = nesqp;
c6e2a6c8 32524@@ -3048,7 +3048,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
8308f9c9
MT
32525
32526 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
32527 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
32528- atomic_inc(&cm_accepts);
32529+ atomic_inc_unchecked(&cm_accepts);
32530
32531 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
32532 netdev_refcnt_read(nesvnic->netdev));
c6e2a6c8 32533@@ -3250,7 +3250,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
8308f9c9 32534 struct nes_cm_core *cm_core;
4c928ab7 32535 u8 *start_buff;
8308f9c9
MT
32536
32537- atomic_inc(&cm_rejects);
32538+ atomic_inc_unchecked(&cm_rejects);
4c928ab7 32539 cm_node = (struct nes_cm_node *)cm_id->provider_data;
8308f9c9
MT
32540 loopback = cm_node->loopbackpartner;
32541 cm_core = cm_node->cm_core;
c6e2a6c8 32542@@ -3310,7 +3310,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
4c928ab7
MT
32543 ntohl(cm_id->local_addr.sin_addr.s_addr),
32544 ntohs(cm_id->local_addr.sin_port));
8308f9c9
MT
32545
32546- atomic_inc(&cm_connects);
32547+ atomic_inc_unchecked(&cm_connects);
32548 nesqp->active_conn = 1;
32549
32550 /* cache the cm_id in the qp */
c6e2a6c8 32551@@ -3416,7 +3416,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
8308f9c9
MT
32552 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
32553 return err;
32554 }
32555- atomic_inc(&cm_listens_created);
32556+ atomic_inc_unchecked(&cm_listens_created);
32557 }
32558
32559 cm_id->add_ref(cm_id);
c6e2a6c8 32560@@ -3517,7 +3517,7 @@ static void cm_event_connected(struct nes_cm_event *event)
4c928ab7
MT
32561
32562 if (nesqp->destroyed)
8308f9c9 32563 return;
8308f9c9
MT
32564- atomic_inc(&cm_connecteds);
32565+ atomic_inc_unchecked(&cm_connecteds);
32566 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
4c928ab7
MT
32567 " local port 0x%04X. jiffies = %lu.\n",
32568 nesqp->hwqp.qp_id,
c6e2a6c8 32569@@ -3704,7 +3704,7 @@ static void cm_event_reset(struct nes_cm_event *event)
8308f9c9
MT
32570
32571 cm_id->add_ref(cm_id);
32572 ret = cm_id->event_handler(cm_id, &cm_event);
32573- atomic_inc(&cm_closes);
32574+ atomic_inc_unchecked(&cm_closes);
32575 cm_event.event = IW_CM_EVENT_CLOSE;
15a11c5b 32576 cm_event.status = 0;
8308f9c9 32577 cm_event.provider_data = cm_id->provider_data;
c6e2a6c8 32578@@ -3740,7 +3740,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
8308f9c9
MT
32579 return;
32580 cm_id = cm_node->cm_id;
32581
32582- atomic_inc(&cm_connect_reqs);
32583+ atomic_inc_unchecked(&cm_connect_reqs);
32584 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
4c928ab7 32585 cm_node, cm_id, jiffies);
8308f9c9 32586
c6e2a6c8 32587@@ -3780,7 +3780,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
8308f9c9
MT
32588 return;
32589 cm_id = cm_node->cm_id;
32590
32591- atomic_inc(&cm_connect_reqs);
32592+ atomic_inc_unchecked(&cm_connect_reqs);
32593 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
4c928ab7
MT
32594 cm_node, cm_id, jiffies);
32595
32596diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
5e856224 32597index 3ba7be3..c81f6ff 100644
4c928ab7
MT
32598--- a/drivers/infiniband/hw/nes/nes_mgt.c
32599+++ b/drivers/infiniband/hw/nes/nes_mgt.c
32600@@ -40,8 +40,8 @@
32601 #include "nes.h"
32602 #include "nes_mgt.h"
32603
32604-atomic_t pau_qps_created;
32605-atomic_t pau_qps_destroyed;
32606+atomic_unchecked_t pau_qps_created;
32607+atomic_unchecked_t pau_qps_destroyed;
32608
32609 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
32610 {
32611@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
32612 {
32613 struct sk_buff *skb;
32614 unsigned long flags;
32615- atomic_inc(&pau_qps_destroyed);
32616+ atomic_inc_unchecked(&pau_qps_destroyed);
32617
32618 /* Free packets that have not yet been forwarded */
32619 /* Lock is acquired by skb_dequeue when removing the skb */
32620@@ -812,7 +812,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
32621 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
32622 skb_queue_head_init(&nesqp->pau_list);
32623 spin_lock_init(&nesqp->pau_lock);
32624- atomic_inc(&pau_qps_created);
32625+ atomic_inc_unchecked(&pau_qps_created);
32626 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
32627 }
8308f9c9 32628
fe2de317 32629diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
5e856224 32630index f3a3ecf..57d311d 100644
fe2de317
MT
32631--- a/drivers/infiniband/hw/nes/nes_nic.c
32632+++ b/drivers/infiniband/hw/nes/nes_nic.c
4c928ab7 32633@@ -1277,39 +1277,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
8308f9c9
MT
32634 target_stat_values[++index] = mh_detected;
32635 target_stat_values[++index] = mh_pauses_sent;
32636 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
32637- target_stat_values[++index] = atomic_read(&cm_connects);
32638- target_stat_values[++index] = atomic_read(&cm_accepts);
32639- target_stat_values[++index] = atomic_read(&cm_disconnects);
32640- target_stat_values[++index] = atomic_read(&cm_connecteds);
32641- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
32642- target_stat_values[++index] = atomic_read(&cm_rejects);
32643- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
32644- target_stat_values[++index] = atomic_read(&qps_created);
32645- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
32646- target_stat_values[++index] = atomic_read(&qps_destroyed);
32647- target_stat_values[++index] = atomic_read(&cm_closes);
32648+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
32649+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
32650+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
32651+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
32652+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
32653+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
32654+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
32655+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
32656+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
32657+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
32658+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
32659 target_stat_values[++index] = cm_packets_sent;
32660 target_stat_values[++index] = cm_packets_bounced;
32661 target_stat_values[++index] = cm_packets_created;
32662 target_stat_values[++index] = cm_packets_received;
32663 target_stat_values[++index] = cm_packets_dropped;
32664 target_stat_values[++index] = cm_packets_retrans;
32665- target_stat_values[++index] = atomic_read(&cm_listens_created);
32666- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
32667+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
32668+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
32669 target_stat_values[++index] = cm_backlog_drops;
32670- target_stat_values[++index] = atomic_read(&cm_loopbacks);
32671- target_stat_values[++index] = atomic_read(&cm_nodes_created);
32672- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
32673- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
32674- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
32675+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
32676+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
32677+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
32678+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
32679+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
32680 target_stat_values[++index] = nesadapter->free_4kpbl;
32681 target_stat_values[++index] = nesadapter->free_256pbl;
32682 target_stat_values[++index] = int_mod_timer_init;
4c928ab7
MT
32683 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
32684 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
32685 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
32686- target_stat_values[++index] = atomic_read(&pau_qps_created);
32687- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
32688+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
32689+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
32690 }
32691
32692 /**
fe2de317 32693diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
c6e2a6c8 32694index 8b8812d..a5e1133 100644
fe2de317
MT
32695--- a/drivers/infiniband/hw/nes/nes_verbs.c
32696+++ b/drivers/infiniband/hw/nes/nes_verbs.c
8308f9c9
MT
32697@@ -46,9 +46,9 @@
32698
32699 #include <rdma/ib_umem.h>
32700
32701-atomic_t mod_qp_timouts;
32702-atomic_t qps_created;
32703-atomic_t sw_qps_destroyed;
32704+atomic_unchecked_t mod_qp_timouts;
32705+atomic_unchecked_t qps_created;
32706+atomic_unchecked_t sw_qps_destroyed;
32707
32708 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
32709
fe2de317 32710@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
8308f9c9
MT
32711 if (init_attr->create_flags)
32712 return ERR_PTR(-EINVAL);
32713
32714- atomic_inc(&qps_created);
32715+ atomic_inc_unchecked(&qps_created);
32716 switch (init_attr->qp_type) {
32717 case IB_QPT_RC:
32718 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
fe2de317 32719@@ -1460,7 +1460,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
8308f9c9 32720 struct iw_cm_event cm_event;
4c928ab7 32721 int ret = 0;
8308f9c9
MT
32722
32723- atomic_inc(&sw_qps_destroyed);
32724+ atomic_inc_unchecked(&sw_qps_destroyed);
32725 nesqp->destroyed = 1;
32726
32727 /* Blow away the connection if it exists. */
fe2de317 32728diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
c6e2a6c8 32729index 6b811e3..f8acf88 100644
fe2de317
MT
32730--- a/drivers/infiniband/hw/qib/qib.h
32731+++ b/drivers/infiniband/hw/qib/qib.h
6892158b 32732@@ -51,6 +51,7 @@
57199397
MT
32733 #include <linux/completion.h>
32734 #include <linux/kref.h>
32735 #include <linux/sched.h>
32736+#include <linux/slab.h>
32737
32738 #include "qib_common.h"
32739 #include "qib_verbs.h"
fe2de317 32740diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
c6e2a6c8 32741index da739d9..da1c7f4 100644
fe2de317
MT
32742--- a/drivers/input/gameport/gameport.c
32743+++ b/drivers/input/gameport/gameport.c
c6e2a6c8 32744@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
8308f9c9
MT
32745 */
32746 static void gameport_init_port(struct gameport *gameport)
32747 {
32748- static atomic_t gameport_no = ATOMIC_INIT(0);
32749+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
32750
32751 __module_get(THIS_MODULE);
32752
32753 mutex_init(&gameport->drv_mutex);
32754 device_initialize(&gameport->dev);
32755 dev_set_name(&gameport->dev, "gameport%lu",
32756- (unsigned long)atomic_inc_return(&gameport_no) - 1);
32757+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
32758 gameport->dev.bus = &gameport_bus;
32759 gameport->dev.release = gameport_release_port;
32760 if (gameport->parent)
fe2de317 32761diff --git a/drivers/input/input.c b/drivers/input/input.c
c6e2a6c8 32762index 8921c61..f5cd63d 100644
fe2de317
MT
32763--- a/drivers/input/input.c
32764+++ b/drivers/input/input.c
32765@@ -1814,7 +1814,7 @@ static void input_cleanse_bitmasks(struct input_dev *dev)
8308f9c9
MT
32766 */
32767 int input_register_device(struct input_dev *dev)
32768 {
32769- static atomic_t input_no = ATOMIC_INIT(0);
32770+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
32771 struct input_handler *handler;
32772 const char *path;
32773 int error;
fe2de317 32774@@ -1851,7 +1851,7 @@ int input_register_device(struct input_dev *dev)
66a7e928 32775 dev->setkeycode = input_default_setkeycode;
8308f9c9
MT
32776
32777 dev_set_name(&dev->dev, "input%ld",
32778- (unsigned long) atomic_inc_return(&input_no) - 1);
32779+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
32780
32781 error = device_add(&dev->dev);
32782 if (error)
fe2de317 32783diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
4c928ab7 32784index b8d8611..7a4a04b 100644
fe2de317
MT
32785--- a/drivers/input/joystick/sidewinder.c
32786+++ b/drivers/input/joystick/sidewinder.c
66a7e928
MT
32787@@ -30,6 +30,7 @@
32788 #include <linux/kernel.h>
32789 #include <linux/module.h>
32790 #include <linux/slab.h>
32791+#include <linux/sched.h>
32792 #include <linux/init.h>
32793 #include <linux/input.h>
32794 #include <linux/gameport.h>
fe2de317 32795diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
572b4308 32796index 42f7b25..09fcf46 100644
fe2de317
MT
32797--- a/drivers/input/joystick/xpad.c
32798+++ b/drivers/input/joystick/xpad.c
572b4308 32799@@ -714,7 +714,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
8308f9c9
MT
32800
32801 static int xpad_led_probe(struct usb_xpad *xpad)
32802 {
32803- static atomic_t led_seq = ATOMIC_INIT(0);
32804+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
32805 long led_no;
32806 struct xpad_led *led;
32807 struct led_classdev *led_cdev;
572b4308 32808@@ -727,7 +727,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
8308f9c9
MT
32809 if (!led)
32810 return -ENOMEM;
32811
32812- led_no = (long)atomic_inc_return(&led_seq) - 1;
32813+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
32814
32815 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
32816 led->xpad = xpad;
fe2de317
MT
32817diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
32818index 0110b5a..d3ad144 100644
32819--- a/drivers/input/mousedev.c
32820+++ b/drivers/input/mousedev.c
32821@@ -763,7 +763,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
ae4e228f
MT
32822
32823 spin_unlock_irq(&client->packet_lock);
32824
32825- if (copy_to_user(buffer, data, count))
32826+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
32827 return -EFAULT;
32828
32829 return count;
fe2de317 32830diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
c6e2a6c8 32831index d0f7533..fb8215b 100644
fe2de317
MT
32832--- a/drivers/input/serio/serio.c
32833+++ b/drivers/input/serio/serio.c
c6e2a6c8 32834@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
8308f9c9
MT
32835 */
32836 static void serio_init_port(struct serio *serio)
32837 {
32838- static atomic_t serio_no = ATOMIC_INIT(0);
32839+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
32840
32841 __module_get(THIS_MODULE);
32842
c6e2a6c8 32843@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
8308f9c9
MT
32844 mutex_init(&serio->drv_mutex);
32845 device_initialize(&serio->dev);
32846 dev_set_name(&serio->dev, "serio%ld",
32847- (long)atomic_inc_return(&serio_no) - 1);
32848+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
32849 serio->dev.bus = &serio_bus;
32850 serio->dev.release = serio_release_port;
32851 serio->dev.groups = serio_device_attr_groups;
fe2de317 32852diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
c6e2a6c8 32853index b902794..fc7b85b 100644
fe2de317
MT
32854--- a/drivers/isdn/capi/capi.c
32855+++ b/drivers/isdn/capi/capi.c
15a11c5b 32856@@ -83,8 +83,8 @@ struct capiminor {
8308f9c9
MT
32857
32858 struct capi20_appl *ap;
32859 u32 ncci;
32860- atomic_t datahandle;
32861- atomic_t msgid;
32862+ atomic_unchecked_t datahandle;
32863+ atomic_unchecked_t msgid;
32864
32865 struct tty_port port;
32866 int ttyinstop;
fe2de317 32867@@ -397,7 +397,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
8308f9c9
MT
32868 capimsg_setu16(s, 2, mp->ap->applid);
32869 capimsg_setu8 (s, 4, CAPI_DATA_B3);
32870 capimsg_setu8 (s, 5, CAPI_RESP);
32871- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
32872+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
32873 capimsg_setu32(s, 8, mp->ncci);
32874 capimsg_setu16(s, 12, datahandle);
32875 }
fe2de317 32876@@ -518,14 +518,14 @@ static void handle_minor_send(struct capiminor *mp)
8308f9c9
MT
32877 mp->outbytes -= len;
32878 spin_unlock_bh(&mp->outlock);
32879
32880- datahandle = atomic_inc_return(&mp->datahandle);
32881+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
32882 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
32883 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32884 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
32885 capimsg_setu16(skb->data, 2, mp->ap->applid);
32886 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
32887 capimsg_setu8 (skb->data, 5, CAPI_REQ);
32888- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
32889+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
32890 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
32891 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
32892 capimsg_setu16(skb->data, 16, len); /* Data length */
fe2de317 32893diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
c6e2a6c8 32894index 821f7ac..28d4030 100644
fe2de317
MT
32895--- a/drivers/isdn/hardware/avm/b1.c
32896+++ b/drivers/isdn/hardware/avm/b1.c
c6e2a6c8 32897@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
ae4e228f
MT
32898 }
32899 if (left) {
32900 if (t4file->user) {
32901- if (copy_from_user(buf, dp, left))
bc901d79 32902+ if (left > sizeof buf || copy_from_user(buf, dp, left))
ae4e228f
MT
32903 return -EFAULT;
32904 } else {
32905 memcpy(buf, dp, left);
c6e2a6c8 32906@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
ae4e228f
MT
32907 }
32908 if (left) {
32909 if (config->user) {
32910- if (copy_from_user(buf, dp, left))
bc901d79 32911+ if (left > sizeof buf || copy_from_user(buf, dp, left))
ae4e228f
MT
32912 return -EFAULT;
32913 } else {
32914 memcpy(buf, dp, left);
fe2de317 32915diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
c6e2a6c8 32916index dd6b53a..19d9ee6 100644
fe2de317
MT
32917--- a/drivers/isdn/hardware/eicon/divasync.h
32918+++ b/drivers/isdn/hardware/eicon/divasync.h
15a11c5b
MT
32919@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
32920 } diva_didd_add_adapter_t;
32921 typedef struct _diva_didd_remove_adapter {
c6e2a6c8 32922 IDI_CALL p_request;
15a11c5b
MT
32923-} diva_didd_remove_adapter_t;
32924+} __no_const diva_didd_remove_adapter_t;
32925 typedef struct _diva_didd_read_adapter_array {
c6e2a6c8
MT
32926 void *buffer;
32927 dword length;
fe2de317 32928diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
c6e2a6c8 32929index d303e65..28bcb7b 100644
fe2de317
MT
32930--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
32931+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
15a11c5b
MT
32932@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
32933 typedef struct _diva_os_idi_adapter_interface {
32934 diva_init_card_proc_t cleanup_adapter_proc;
32935 diva_cmd_card_proc_t cmd_proc;
32936-} diva_os_idi_adapter_interface_t;
32937+} __no_const diva_os_idi_adapter_interface_t;
32938
32939 typedef struct _diva_os_xdi_adapter {
32940 struct list_head link;
fe2de317 32941diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
c6e2a6c8 32942index e74df7c..03a03ba 100644
fe2de317
MT
32943--- a/drivers/isdn/icn/icn.c
32944+++ b/drivers/isdn/icn/icn.c
c6e2a6c8 32945@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
ae4e228f
MT
32946 if (count > len)
32947 count = len;
32948 if (user) {
32949- if (copy_from_user(msg, buf, count))
bc901d79 32950+ if (count > sizeof msg || copy_from_user(msg, buf, count))
ae4e228f
MT
32951 return -EFAULT;
32952 } else
32953 memcpy(msg, buf, count);
c6e2a6c8
MT
32954diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
32955index 8bc4915..4cc6a2e 100644
32956--- a/drivers/leds/leds-mc13783.c
32957+++ b/drivers/leds/leds-mc13783.c
32958@@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
32959 return -EINVAL;
32960 }
32961
32962- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
32963+ led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
32964 if (led == NULL) {
32965 dev_err(&pdev->dev, "failed to alloc memory\n");
32966 return -ENOMEM;
fe2de317 32967diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
4c928ab7 32968index b5fdcb7..5b6c59f 100644
fe2de317
MT
32969--- a/drivers/lguest/core.c
32970+++ b/drivers/lguest/core.c
df50ba0c 32971@@ -92,9 +92,17 @@ static __init int map_switcher(void)
58c5fc13
MT
32972 * it's worked so far. The end address needs +1 because __get_vm_area
32973 * allocates an extra guard page, so we need space for that.
32974 */
32975+
32976+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
32977+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32978+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
32979+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32980+#else
32981 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
32982 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
32983 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
32984+#endif
32985+
32986 if (!switcher_vma) {
32987 err = -ENOMEM;
32988 printk("lguest: could not map switcher pages high\n");
bc901d79
MT
32989@@ -119,7 +127,7 @@ static __init int map_switcher(void)
32990 * Now the Switcher is mapped at the right address, we can't fail!
6e9df6a3 32991 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
bc901d79
MT
32992 */
32993- memcpy(switcher_vma->addr, start_switcher_text,
32994+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
32995 end_switcher_text - start_switcher_text);
32996
32997 printk(KERN_INFO "lguest: mapped switcher at %p\n",
fe2de317 32998diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
572b4308 32999index 39809035..ce25c5e 100644
fe2de317
MT
33000--- a/drivers/lguest/x86/core.c
33001+++ b/drivers/lguest/x86/core.c
bc901d79
MT
33002@@ -59,7 +59,7 @@ static struct {
33003 /* Offset from where switcher.S was compiled to where we've copied it */
33004 static unsigned long switcher_offset(void)
33005 {
33006- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
33007+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
33008 }
33009
33010 /* This cpu's struct lguest_pages. */
fe2de317 33011@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
bc901d79
MT
33012 * These copies are pretty cheap, so we do them unconditionally: */
33013 /* Save the current Host top-level page directory.
33014 */
33015+
33016+#ifdef CONFIG_PAX_PER_CPU_PGD
33017+ pages->state.host_cr3 = read_cr3();
33018+#else
33019 pages->state.host_cr3 = __pa(current->mm->pgd);
33020+#endif
33021+
33022 /*
33023 * Set up the Guest's page tables to see this CPU's pages (and no
33024 * other CPU's pages).
6e9df6a3 33025@@ -472,7 +478,7 @@ void __init lguest_arch_host_init(void)
bc901d79
MT
33026 * compiled-in switcher code and the high-mapped copy we just made.
33027 */
33028 for (i = 0; i < IDT_ENTRIES; i++)
33029- default_idt_entries[i] += switcher_offset();
33030+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
33031
33032 /*
33033 * Set up the Switcher's per-cpu areas.
6e9df6a3 33034@@ -555,7 +561,7 @@ void __init lguest_arch_host_init(void)
bc901d79
MT
33035 * it will be undisturbed when we switch. To change %cs and jump we
33036 * need this structure to feed to Intel's "lcall" instruction.
33037 */
33038- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
33039+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
33040 lguest_entry.segment = LGUEST_CS;
33041
33042 /*
fe2de317
MT
33043diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
33044index 40634b0..4f5855e 100644
33045--- a/drivers/lguest/x86/switcher_32.S
33046+++ b/drivers/lguest/x86/switcher_32.S
bc901d79
MT
33047@@ -87,6 +87,7 @@
33048 #include <asm/page.h>
33049 #include <asm/segment.h>
33050 #include <asm/lguest.h>
33051+#include <asm/processor-flags.h>
33052
33053 // We mark the start of the code to copy
33054 // It's placed in .text tho it's never run here
33055@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
33056 // Changes type when we load it: damn Intel!
33057 // For after we switch over our page tables
33058 // That entry will be read-only: we'd crash.
33059+
33060+#ifdef CONFIG_PAX_KERNEXEC
33061+ mov %cr0, %edx
33062+ xor $X86_CR0_WP, %edx
33063+ mov %edx, %cr0
33064+#endif
33065+
33066 movl $(GDT_ENTRY_TSS*8), %edx
33067 ltr %dx
33068
33069@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
33070 // Let's clear it again for our return.
33071 // The GDT descriptor of the Host
33072 // Points to the table after two "size" bytes
33073- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
33074+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
33075 // Clear "used" from type field (byte 5, bit 2)
33076- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
33077+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
33078+
33079+#ifdef CONFIG_PAX_KERNEXEC
33080+ mov %cr0, %eax
33081+ xor $X86_CR0_WP, %eax
33082+ mov %eax, %cr0
33083+#endif
33084
33085 // Once our page table's switched, the Guest is live!
33086 // The Host fades as we run this final step.
33087@@ -295,13 +309,12 @@ deliver_to_host:
33088 // I consulted gcc, and it gave
33089 // These instructions, which I gladly credit:
33090 leal (%edx,%ebx,8), %eax
33091- movzwl (%eax),%edx
33092- movl 4(%eax), %eax
33093- xorw %ax, %ax
33094- orl %eax, %edx
33095+ movl 4(%eax), %edx
33096+ movw (%eax), %dx
33097 // Now the address of the handler's in %edx
33098 // We call it now: its "iret" drops us home.
33099- jmp *%edx
33100+ ljmp $__KERNEL_CS, $1f
33101+1: jmp *%edx
33102
33103 // Every interrupt can come to us here
33104 // But we must truly tell each apart.
fe2de317 33105diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
c6e2a6c8 33106index 20e5c2c..9e849a9 100644
fe2de317
MT
33107--- a/drivers/macintosh/macio_asic.c
33108+++ b/drivers/macintosh/macio_asic.c
33109@@ -748,7 +748,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
6e9df6a3
MT
33110 * MacIO is matched against any Apple ID, it's probe() function
33111 * will then decide wether it applies or not
33112 */
33113-static const struct pci_device_id __devinitdata pci_ids [] = { {
33114+static const struct pci_device_id __devinitconst pci_ids [] = { {
33115 .vendor = PCI_VENDOR_ID_APPLE,
33116 .device = PCI_ANY_ID,
33117 .subvendor = PCI_ANY_ID,
c6e2a6c8
MT
33118diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
33119index 17e2b47..bcbeec4 100644
33120--- a/drivers/md/bitmap.c
33121+++ b/drivers/md/bitmap.c
33122@@ -1823,7 +1823,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
33123 chunk_kb ? "KB" : "B");
33124 if (bitmap->file) {
33125 seq_printf(seq, ", file: ");
33126- seq_path(seq, &bitmap->file->f_path, " \t\n");
33127+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
33128 }
33129
33130 seq_printf(seq, "\n");
fe2de317 33131diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
c6e2a6c8 33132index a1a3e6d..1918bfc 100644
fe2de317
MT
33133--- a/drivers/md/dm-ioctl.c
33134+++ b/drivers/md/dm-ioctl.c
c6e2a6c8 33135@@ -1590,7 +1590,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
16454cff
MT
33136 cmd == DM_LIST_VERSIONS_CMD)
33137 return 0;
33138
33139- if ((cmd == DM_DEV_CREATE_CMD)) {
33140+ if (cmd == DM_DEV_CREATE_CMD) {
33141 if (!*param->name) {
33142 DMWARN("name not supplied when creating device");
33143 return -EINVAL;
fe2de317 33144diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
c6e2a6c8 33145index d039de8..0cf5b87 100644
fe2de317
MT
33146--- a/drivers/md/dm-raid1.c
33147+++ b/drivers/md/dm-raid1.c
15a11c5b 33148@@ -40,7 +40,7 @@ enum dm_raid1_error {
8308f9c9
MT
33149
33150 struct mirror {
33151 struct mirror_set *ms;
33152- atomic_t error_count;
33153+ atomic_unchecked_t error_count;
33154 unsigned long error_type;
33155 struct dm_dev *dev;
33156 sector_t offset;
fe2de317 33157@@ -185,7 +185,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
8308f9c9
MT
33158 struct mirror *m;
33159
33160 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
33161- if (!atomic_read(&m->error_count))
33162+ if (!atomic_read_unchecked(&m->error_count))
33163 return m;
33164
33165 return NULL;
fe2de317 33166@@ -217,7 +217,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
8308f9c9
MT
33167 * simple way to tell if a device has encountered
33168 * errors.
33169 */
33170- atomic_inc(&m->error_count);
33171+ atomic_inc_unchecked(&m->error_count);
33172
33173 if (test_and_set_bit(error_type, &m->error_type))
33174 return;
fe2de317 33175@@ -408,7 +408,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
8308f9c9
MT
33176 struct mirror *m = get_default_mirror(ms);
33177
33178 do {
33179- if (likely(!atomic_read(&m->error_count)))
33180+ if (likely(!atomic_read_unchecked(&m->error_count)))
33181 return m;
33182
33183 if (m-- == ms->mirror)
15a11c5b 33184@@ -422,7 +422,7 @@ static int default_ok(struct mirror *m)
8308f9c9
MT
33185 {
33186 struct mirror *default_mirror = get_default_mirror(m->ms);
33187
33188- return !atomic_read(&default_mirror->error_count);
33189+ return !atomic_read_unchecked(&default_mirror->error_count);
33190 }
33191
33192 static int mirror_available(struct mirror_set *ms, struct bio *bio)
fe2de317 33193@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
8308f9c9
MT
33194 */
33195 if (likely(region_in_sync(ms, region, 1)))
33196 m = choose_mirror(ms, bio->bi_sector);
33197- else if (m && atomic_read(&m->error_count))
33198+ else if (m && atomic_read_unchecked(&m->error_count))
33199 m = NULL;
33200
33201 if (likely(m))
c6e2a6c8 33202@@ -938,7 +938,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
8308f9c9
MT
33203 }
33204
33205 ms->mirror[mirror].ms = ms;
33206- atomic_set(&(ms->mirror[mirror].error_count), 0);
33207+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
33208 ms->mirror[mirror].error_type = 0;
33209 ms->mirror[mirror].offset = offset;
33210
c6e2a6c8 33211@@ -1351,7 +1351,7 @@ static void mirror_resume(struct dm_target *ti)
8308f9c9
MT
33212 */
33213 static char device_status_char(struct mirror *m)
33214 {
33215- if (!atomic_read(&(m->error_count)))
33216+ if (!atomic_read_unchecked(&(m->error_count)))
33217 return 'A';
33218
33219 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
fe2de317 33220diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
c6e2a6c8 33221index 35c94ff..20d4c17 100644
fe2de317
MT
33222--- a/drivers/md/dm-stripe.c
33223+++ b/drivers/md/dm-stripe.c
8308f9c9
MT
33224@@ -20,7 +20,7 @@ struct stripe {
33225 struct dm_dev *dev;
33226 sector_t physical_start;
33227
33228- atomic_t error_count;
33229+ atomic_unchecked_t error_count;
33230 };
33231
33232 struct stripe_c {
c6e2a6c8 33233@@ -193,7 +193,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
8308f9c9
MT
33234 kfree(sc);
33235 return r;
33236 }
33237- atomic_set(&(sc->stripe[i].error_count), 0);
33238+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
33239 }
33240
33241 ti->private = sc;
c6e2a6c8 33242@@ -315,7 +315,7 @@ static int stripe_status(struct dm_target *ti,
8308f9c9
MT
33243 DMEMIT("%d ", sc->stripes);
33244 for (i = 0; i < sc->stripes; i++) {
33245 DMEMIT("%s ", sc->stripe[i].dev->name);
33246- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
33247+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
33248 'D' : 'A';
33249 }
33250 buffer[i] = '\0';
c6e2a6c8 33251@@ -362,8 +362,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
8308f9c9
MT
33252 */
33253 for (i = 0; i < sc->stripes; i++)
33254 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
33255- atomic_inc(&(sc->stripe[i].error_count));
33256- if (atomic_read(&(sc->stripe[i].error_count)) <
33257+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
33258+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
33259 DM_IO_ERROR_THRESHOLD)
33260 schedule_work(&sc->trigger_event);
33261 }
fe2de317 33262diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
c6e2a6c8 33263index 2e227fb..44ead1f 100644
fe2de317
MT
33264--- a/drivers/md/dm-table.c
33265+++ b/drivers/md/dm-table.c
c6e2a6c8 33266@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
58c5fc13
MT
33267 if (!dev_size)
33268 return 0;
33269
33270- if ((start >= dev_size) || (start + len > dev_size)) {
33271+ if ((start >= dev_size) || (len > dev_size - start)) {
33272 DMWARN("%s: %s too small for target: "
33273 "start=%llu, len=%llu, dev_size=%llu",
33274 dm_device_name(ti->table->md), bdevname(bdev, b),
4c928ab7 33275diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
c6e2a6c8 33276index 737d388..811ad5a 100644
4c928ab7
MT
33277--- a/drivers/md/dm-thin-metadata.c
33278+++ b/drivers/md/dm-thin-metadata.c
33279@@ -432,7 +432,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33280
33281 pmd->info.tm = tm;
33282 pmd->info.levels = 2;
33283- pmd->info.value_type.context = pmd->data_sm;
33284+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33285 pmd->info.value_type.size = sizeof(__le64);
33286 pmd->info.value_type.inc = data_block_inc;
33287 pmd->info.value_type.dec = data_block_dec;
33288@@ -451,7 +451,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
33289
33290 pmd->bl_info.tm = tm;
33291 pmd->bl_info.levels = 1;
33292- pmd->bl_info.value_type.context = pmd->data_sm;
33293+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
33294 pmd->bl_info.value_type.size = sizeof(__le64);
33295 pmd->bl_info.value_type.inc = data_block_inc;
33296 pmd->bl_info.value_type.dec = data_block_dec;
fe2de317 33297diff --git a/drivers/md/dm.c b/drivers/md/dm.c
c6e2a6c8 33298index e24143c..ce2f21a1 100644
fe2de317
MT
33299--- a/drivers/md/dm.c
33300+++ b/drivers/md/dm.c
5e856224 33301@@ -176,9 +176,9 @@ struct mapped_device {
fe2de317
MT
33302 /*
33303 * Event handling.
33304 */
33305- atomic_t event_nr;
33306+ atomic_unchecked_t event_nr;
33307 wait_queue_head_t eventq;
33308- atomic_t uevent_seq;
33309+ atomic_unchecked_t uevent_seq;
33310 struct list_head uevent_list;
33311 spinlock_t uevent_lock; /* Protect access to uevent_list */
33312
c6e2a6c8 33313@@ -1845,8 +1845,8 @@ static struct mapped_device *alloc_dev(int minor)
fe2de317
MT
33314 rwlock_init(&md->map_lock);
33315 atomic_set(&md->holders, 1);
33316 atomic_set(&md->open_count, 0);
33317- atomic_set(&md->event_nr, 0);
33318- atomic_set(&md->uevent_seq, 0);
33319+ atomic_set_unchecked(&md->event_nr, 0);
33320+ atomic_set_unchecked(&md->uevent_seq, 0);
33321 INIT_LIST_HEAD(&md->uevent_list);
33322 spin_lock_init(&md->uevent_lock);
33323
c6e2a6c8 33324@@ -1980,7 +1980,7 @@ static void event_callback(void *context)
fe2de317
MT
33325
33326 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
33327
33328- atomic_inc(&md->event_nr);
33329+ atomic_inc_unchecked(&md->event_nr);
33330 wake_up(&md->eventq);
33331 }
33332
c6e2a6c8 33333@@ -2622,18 +2622,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
fe2de317
MT
33334
33335 uint32_t dm_next_uevent_seq(struct mapped_device *md)
33336 {
33337- return atomic_add_return(1, &md->uevent_seq);
33338+ return atomic_add_return_unchecked(1, &md->uevent_seq);
33339 }
33340
33341 uint32_t dm_get_event_nr(struct mapped_device *md)
33342 {
33343- return atomic_read(&md->event_nr);
33344+ return atomic_read_unchecked(&md->event_nr);
33345 }
33346
33347 int dm_wait_event(struct mapped_device *md, int event_nr)
33348 {
33349 return wait_event_interruptible(md->eventq,
33350- (event_nr != atomic_read(&md->event_nr)));
33351+ (event_nr != atomic_read_unchecked(&md->event_nr)));
33352 }
33353
33354 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
33355diff --git a/drivers/md/md.c b/drivers/md/md.c
572b4308 33356index 2b30ffd..362b519 100644
fe2de317
MT
33357--- a/drivers/md/md.c
33358+++ b/drivers/md/md.c
5e856224 33359@@ -277,10 +277,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
8308f9c9
MT
33360 * start build, activate spare
33361 */
33362 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
33363-static atomic_t md_event_count;
33364+static atomic_unchecked_t md_event_count;
4c928ab7 33365 void md_new_event(struct mddev *mddev)
8308f9c9
MT
33366 {
33367- atomic_inc(&md_event_count);
33368+ atomic_inc_unchecked(&md_event_count);
33369 wake_up(&md_event_waiters);
33370 }
33371 EXPORT_SYMBOL_GPL(md_new_event);
5e856224 33372@@ -290,7 +290,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
8308f9c9 33373 */
4c928ab7 33374 static void md_new_event_inintr(struct mddev *mddev)
8308f9c9
MT
33375 {
33376- atomic_inc(&md_event_count);
33377+ atomic_inc_unchecked(&md_event_count);
33378 wake_up(&md_event_waiters);
33379 }
33380
c6e2a6c8 33381@@ -1526,7 +1526,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
8308f9c9
MT
33382
33383 rdev->preferred_minor = 0xffff;
33384 rdev->data_offset = le64_to_cpu(sb->data_offset);
33385- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33386+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
33387
33388 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
33389 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
c6e2a6c8 33390@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
8308f9c9
MT
33391 else
33392 sb->resync_offset = cpu_to_le64(0);
33393
33394- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
33395+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
33396
33397 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
33398 sb->size = cpu_to_le64(mddev->dev_sectors);
c6e2a6c8 33399@@ -2691,7 +2691,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
8308f9c9 33400 static ssize_t
4c928ab7 33401 errors_show(struct md_rdev *rdev, char *page)
8308f9c9
MT
33402 {
33403- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
33404+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
33405 }
33406
33407 static ssize_t
c6e2a6c8 33408@@ -2700,7 +2700,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
8308f9c9
MT
33409 char *e;
33410 unsigned long n = simple_strtoul(buf, &e, 10);
33411 if (*buf && (*e == 0 || *e == '\n')) {
33412- atomic_set(&rdev->corrected_errors, n);
33413+ atomic_set_unchecked(&rdev->corrected_errors, n);
33414 return len;
33415 }
33416 return -EINVAL;
c6e2a6c8 33417@@ -3086,8 +3086,8 @@ int md_rdev_init(struct md_rdev *rdev)
6e9df6a3
MT
33418 rdev->sb_loaded = 0;
33419 rdev->bb_page = NULL;
8308f9c9
MT
33420 atomic_set(&rdev->nr_pending, 0);
33421- atomic_set(&rdev->read_errors, 0);
33422- atomic_set(&rdev->corrected_errors, 0);
33423+ atomic_set_unchecked(&rdev->read_errors, 0);
33424+ atomic_set_unchecked(&rdev->corrected_errors, 0);
33425
33426 INIT_LIST_HEAD(&rdev->same_set);
33427 init_waitqueue_head(&rdev->blocked_wait);
572b4308
MT
33428@@ -3744,8 +3744,8 @@ array_state_show(struct mddev *mddev, char *page)
33429 return sprintf(page, "%s\n", array_states[st]);
33430 }
33431
33432-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
33433-static int md_set_readonly(struct mddev * mddev, int is_open);
33434+static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
33435+static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
33436 static int do_md_run(struct mddev * mddev);
33437 static int restart_array(struct mddev *mddev);
33438
33439@@ -3761,14 +3761,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
33440 /* stopping an active array */
33441 if (atomic_read(&mddev->openers) > 0)
33442 return -EBUSY;
33443- err = do_md_stop(mddev, 0, 0);
33444+ err = do_md_stop(mddev, 0, NULL);
33445 break;
33446 case inactive:
33447 /* stopping an active array */
33448 if (mddev->pers) {
33449 if (atomic_read(&mddev->openers) > 0)
33450 return -EBUSY;
33451- err = do_md_stop(mddev, 2, 0);
33452+ err = do_md_stop(mddev, 2, NULL);
33453 } else
33454 err = 0; /* already inactive */
33455 break;
33456@@ -3776,7 +3776,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
33457 break; /* not supported yet */
33458 case readonly:
33459 if (mddev->pers)
33460- err = md_set_readonly(mddev, 0);
33461+ err = md_set_readonly(mddev, NULL);
33462 else {
33463 mddev->ro = 1;
33464 set_disk_ro(mddev->gendisk, 1);
33465@@ -3786,7 +3786,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
33466 case read_auto:
33467 if (mddev->pers) {
33468 if (mddev->ro == 0)
33469- err = md_set_readonly(mddev, 0);
33470+ err = md_set_readonly(mddev, NULL);
33471 else if (mddev->ro == 1)
33472 err = restart_array(mddev);
33473 if (err == 0) {
33474@@ -5124,15 +5124,17 @@ void md_stop(struct mddev *mddev)
33475 }
33476 EXPORT_SYMBOL_GPL(md_stop);
33477
33478-static int md_set_readonly(struct mddev *mddev, int is_open)
33479+static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
33480 {
33481 int err = 0;
33482 mutex_lock(&mddev->open_mutex);
33483- if (atomic_read(&mddev->openers) > is_open) {
33484+ if (atomic_read(&mddev->openers) > !!bdev) {
33485 printk("md: %s still in use.\n",mdname(mddev));
33486 err = -EBUSY;
33487 goto out;
33488 }
33489+ if (bdev)
33490+ sync_blockdev(bdev);
33491 if (mddev->pers) {
33492 __md_stop_writes(mddev);
33493
33494@@ -5154,18 +5156,26 @@ out:
33495 * 0 - completely stop and dis-assemble array
33496 * 2 - stop but do not disassemble array
33497 */
33498-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
33499+static int do_md_stop(struct mddev * mddev, int mode,
33500+ struct block_device *bdev)
33501 {
33502 struct gendisk *disk = mddev->gendisk;
33503 struct md_rdev *rdev;
33504
33505 mutex_lock(&mddev->open_mutex);
33506- if (atomic_read(&mddev->openers) > is_open ||
33507+ if (atomic_read(&mddev->openers) > !!bdev ||
33508 mddev->sysfs_active) {
33509 printk("md: %s still in use.\n",mdname(mddev));
33510 mutex_unlock(&mddev->open_mutex);
33511 return -EBUSY;
33512 }
33513+ if (bdev)
33514+ /* It is possible IO was issued on some other
33515+ * open file which was closed before we took ->open_mutex.
33516+ * As that was not the last close __blkdev_put will not
33517+ * have called sync_blockdev, so we must.
33518+ */
33519+ sync_blockdev(bdev);
33520
33521 if (mddev->pers) {
33522 if (mddev->ro)
33523@@ -5239,7 +5249,7 @@ static void autorun_array(struct mddev *mddev)
33524 err = do_md_run(mddev);
33525 if (err) {
33526 printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
33527- do_md_stop(mddev, 0, 0);
33528+ do_md_stop(mddev, 0, NULL);
33529 }
33530 }
33531
33532@@ -6237,11 +6247,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
33533 goto done_unlock;
33534
33535 case STOP_ARRAY:
33536- err = do_md_stop(mddev, 0, 1);
33537+ err = do_md_stop(mddev, 0, bdev);
33538 goto done_unlock;
33539
33540 case STOP_ARRAY_RO:
33541- err = md_set_readonly(mddev, 1);
33542+ err = md_set_readonly(mddev, bdev);
33543 goto done_unlock;
33544
33545 case BLKROSET:
33546@@ -6738,7 +6748,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
8308f9c9
MT
33547
33548 spin_unlock(&pers_lock);
33549 seq_printf(seq, "\n");
6e9df6a3
MT
33550- seq->poll_event = atomic_read(&md_event_count);
33551+ seq->poll_event = atomic_read_unchecked(&md_event_count);
8308f9c9
MT
33552 return 0;
33553 }
33554 if (v == (void*)2) {
572b4308 33555@@ -6841,7 +6851,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
6e9df6a3
MT
33556 return error;
33557
33558 seq = file->private_data;
33559- seq->poll_event = atomic_read(&md_event_count);
33560+ seq->poll_event = atomic_read_unchecked(&md_event_count);
8308f9c9
MT
33561 return error;
33562 }
6e9df6a3 33563
572b4308 33564@@ -6855,7 +6865,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
8308f9c9
MT
33565 /* always allow read */
33566 mask = POLLIN | POLLRDNORM;
33567
6e9df6a3
MT
33568- if (seq->poll_event != atomic_read(&md_event_count))
33569+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
8308f9c9
MT
33570 mask |= POLLERR | POLLPRI;
33571 return mask;
33572 }
572b4308 33573@@ -6899,7 +6909,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
58c5fc13
MT
33574 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
33575 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
33576 (int)part_stat_read(&disk->part0, sectors[1]) -
33577- atomic_read(&disk->sync_io);
33578+ atomic_read_unchecked(&disk->sync_io);
33579 /* sync IO will cause sync_io to increase before the disk_stats
33580 * as sync_io is counted when a request starts, and
33581 * disk_stats is counted when it completes.
fe2de317 33582diff --git a/drivers/md/md.h b/drivers/md/md.h
c6e2a6c8 33583index 1c2063c..9639970 100644
fe2de317
MT
33584--- a/drivers/md/md.h
33585+++ b/drivers/md/md.h
5e856224 33586@@ -93,13 +93,13 @@ struct md_rdev {
8308f9c9
MT
33587 * only maintained for arrays that
33588 * support hot removal
33589 */
33590- atomic_t read_errors; /* number of consecutive read errors that
33591+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
33592 * we have tried to ignore.
33593 */
33594 struct timespec last_read_error; /* monotonic time since our
33595 * last read error
33596 */
33597- atomic_t corrected_errors; /* number of corrected read errors,
33598+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
33599 * for reporting to userspace and storing
33600 * in superblock.
33601 */
c6e2a6c8 33602@@ -429,7 +429,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
58c5fc13
MT
33603
33604 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
33605 {
33606- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33607+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
33608 }
33609
4c928ab7
MT
33610 struct md_personality
33611diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
572b4308 33612index fc90c11..c8cd9a9 100644
4c928ab7
MT
33613--- a/drivers/md/persistent-data/dm-space-map-checker.c
33614+++ b/drivers/md/persistent-data/dm-space-map-checker.c
572b4308 33615@@ -167,7 +167,7 @@ static int ca_commit(struct count_array *old, struct count_array *new)
4c928ab7
MT
33616 /*----------------------------------------------------------------*/
33617
33618 struct sm_checker {
33619- struct dm_space_map sm;
33620+ dm_space_map_no_const sm;
33621
33622 struct count_array old_counts;
33623 struct count_array counts;
33624diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
572b4308 33625index 3d0ed53..35dc592 100644
4c928ab7
MT
33626--- a/drivers/md/persistent-data/dm-space-map-disk.c
33627+++ b/drivers/md/persistent-data/dm-space-map-disk.c
33628@@ -23,7 +23,7 @@
33629 * Space map interface.
33630 */
33631 struct sm_disk {
33632- struct dm_space_map sm;
33633+ dm_space_map_no_const sm;
33634
33635 struct ll_disk ll;
33636 struct ll_disk old_ll;
33637diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
33638index e89ae5e..062e4c2 100644
33639--- a/drivers/md/persistent-data/dm-space-map-metadata.c
33640+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
33641@@ -43,7 +43,7 @@ struct block_op {
33642 };
33643
33644 struct sm_metadata {
33645- struct dm_space_map sm;
33646+ dm_space_map_no_const sm;
33647
33648 struct ll_disk ll;
33649 struct ll_disk old_ll;
33650diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
33651index 1cbfc6b..56e1dbb 100644
33652--- a/drivers/md/persistent-data/dm-space-map.h
33653+++ b/drivers/md/persistent-data/dm-space-map.h
33654@@ -60,6 +60,7 @@ struct dm_space_map {
33655 int (*root_size)(struct dm_space_map *sm, size_t *result);
33656 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
33657 };
33658+typedef struct dm_space_map __no_const dm_space_map_no_const;
33659
33660 /*----------------------------------------------------------------*/
33661
fe2de317 33662diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
572b4308 33663index d1f74ab..d1b24fd 100644
fe2de317
MT
33664--- a/drivers/md/raid1.c
33665+++ b/drivers/md/raid1.c
c6e2a6c8 33666@@ -1688,7 +1688,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
fe2de317
MT
33667 if (r1_sync_page_io(rdev, sect, s,
33668 bio->bi_io_vec[idx].bv_page,
33669 READ) != 0)
33670- atomic_add(s, &rdev->corrected_errors);
33671+ atomic_add_unchecked(s, &rdev->corrected_errors);
33672 }
33673 sectors -= s;
33674 sect += s;
c6e2a6c8 33675@@ -1902,7 +1902,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
fe2de317
MT
33676 test_bit(In_sync, &rdev->flags)) {
33677 if (r1_sync_page_io(rdev, sect, s,
33678 conf->tmppage, READ)) {
33679- atomic_add(s, &rdev->corrected_errors);
33680+ atomic_add_unchecked(s, &rdev->corrected_errors);
33681 printk(KERN_INFO
33682 "md/raid1:%s: read error corrected "
33683 "(%d sectors at %llu on %s)\n",
33684diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
572b4308 33685index a954c95..6e7a21c 100644
fe2de317
MT
33686--- a/drivers/md/raid10.c
33687+++ b/drivers/md/raid10.c
c6e2a6c8 33688@@ -1684,7 +1684,7 @@ static void end_sync_read(struct bio *bio, int error)
6e9df6a3
MT
33689 /* The write handler will notice the lack of
33690 * R10BIO_Uptodate and record any errors etc
33691 */
8308f9c9
MT
33692- atomic_add(r10_bio->sectors,
33693+ atomic_add_unchecked(r10_bio->sectors,
33694 &conf->mirrors[d].rdev->corrected_errors);
6e9df6a3
MT
33695
33696 /* for reconstruct, we always reschedule after a read.
c6e2a6c8 33697@@ -2033,7 +2033,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
8308f9c9
MT
33698 {
33699 struct timespec cur_time_mon;
33700 unsigned long hours_since_last;
33701- unsigned int read_errors = atomic_read(&rdev->read_errors);
33702+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
33703
33704 ktime_get_ts(&cur_time_mon);
33705
c6e2a6c8 33706@@ -2055,9 +2055,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
8308f9c9
MT
33707 * overflowing the shift of read_errors by hours_since_last.
33708 */
33709 if (hours_since_last >= 8 * sizeof(read_errors))
33710- atomic_set(&rdev->read_errors, 0);
33711+ atomic_set_unchecked(&rdev->read_errors, 0);
33712 else
33713- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
33714+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
33715 }
33716
4c928ab7 33717 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
c6e2a6c8 33718@@ -2111,8 +2111,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
15a11c5b 33719 return;
8308f9c9 33720
15a11c5b
MT
33721 check_decay_read_errors(mddev, rdev);
33722- atomic_inc(&rdev->read_errors);
33723- if (atomic_read(&rdev->read_errors) > max_read_errors) {
33724+ atomic_inc_unchecked(&rdev->read_errors);
33725+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
33726 char b[BDEVNAME_SIZE];
33727 bdevname(rdev->bdev, b);
33728
c6e2a6c8 33729@@ -2120,7 +2120,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
15a11c5b
MT
33730 "md/raid10:%s: %s: Raid device exceeded "
33731 "read_error threshold [cur %d:max %d]\n",
33732 mdname(mddev), b,
33733- atomic_read(&rdev->read_errors), max_read_errors);
33734+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
33735 printk(KERN_NOTICE
33736 "md/raid10:%s: %s: Failing raid device\n",
33737 mdname(mddev), b);
c6e2a6c8 33738@@ -2271,7 +2271,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
6e9df6a3
MT
33739 (unsigned long long)(
33740 sect + rdev->data_offset),
33741 bdevname(rdev->bdev, b));
8308f9c9
MT
33742- atomic_add(s, &rdev->corrected_errors);
33743+ atomic_add_unchecked(s, &rdev->corrected_errors);
6e9df6a3
MT
33744 }
33745
33746 rdev_dec_pending(rdev, mddev);
fe2de317 33747diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
572b4308 33748index 73a5800..2b0e3b1 100644
fe2de317
MT
33749--- a/drivers/md/raid5.c
33750+++ b/drivers/md/raid5.c
572b4308 33751@@ -1694,18 +1694,18 @@ static void raid5_end_read_request(struct bio * bi, int error)
6e9df6a3
MT
33752 (unsigned long long)(sh->sector
33753 + rdev->data_offset),
33754 bdevname(rdev->bdev, b));
33755- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
33756+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
8308f9c9
MT
33757 clear_bit(R5_ReadError, &sh->dev[i].flags);
33758 clear_bit(R5_ReWrite, &sh->dev[i].flags);
33759 }
5e856224
MT
33760- if (atomic_read(&rdev->read_errors))
33761- atomic_set(&rdev->read_errors, 0);
33762+ if (atomic_read_unchecked(&rdev->read_errors))
33763+ atomic_set_unchecked(&rdev->read_errors, 0);
8308f9c9 33764 } else {
5e856224 33765 const char *bdn = bdevname(rdev->bdev, b);
8308f9c9 33766 int retry = 0;
8308f9c9
MT
33767
33768 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
33769- atomic_inc(&rdev->read_errors);
33770+ atomic_inc_unchecked(&rdev->read_errors);
5e856224 33771 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
6e9df6a3
MT
33772 printk_ratelimited(
33773 KERN_WARNING
572b4308 33774@@ -1734,7 +1734,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
6e9df6a3
MT
33775 (unsigned long long)(sh->sector
33776 + rdev->data_offset),
33777 bdn);
8308f9c9
MT
33778- else if (atomic_read(&rdev->read_errors)
33779+ else if (atomic_read_unchecked(&rdev->read_errors)
33780 > conf->max_nr_stripes)
33781 printk(KERN_WARNING
33782 "md/raid:%s: Too many read errors, failing device %s.\n",
fe2de317 33783diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
c6e2a6c8 33784index d88c4aa..17c80b1 100644
fe2de317
MT
33785--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
33786+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
c6e2a6c8 33787@@ -1679,7 +1679,7 @@ static struct ddb_info ddb_v6 = {
6e9df6a3
MT
33788 .subvendor = _subvend, .subdevice = _subdev, \
33789 .driver_data = (unsigned long)&_driverdata }
33790
33791-static const struct pci_device_id ddb_id_tbl[] __devinitdata = {
33792+static const struct pci_device_id ddb_id_tbl[] __devinitconst = {
33793 DDB_ID(DDVID, 0x0002, DDVID, 0x0001, ddb_octopus),
33794 DDB_ID(DDVID, 0x0003, DDVID, 0x0001, ddb_octopus),
33795 DDB_ID(DDVID, 0x0003, DDVID, 0x0002, ddb_octopus_le),
fe2de317
MT
33796diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
33797index a7d876f..8c21b61 100644
33798--- a/drivers/media/dvb/dvb-core/dvb_demux.h
33799+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
6e9df6a3 33800@@ -73,7 +73,7 @@ struct dvb_demux_feed {
15a11c5b
MT
33801 union {
33802 dmx_ts_cb ts;
33803 dmx_section_cb sec;
33804- } cb;
33805+ } __no_const cb;
33806
33807 struct dvb_demux *demux;
33808 void *priv;
fe2de317 33809diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
572b4308 33810index 39eab73..60033e7 100644
fe2de317
MT
33811--- a/drivers/media/dvb/dvb-core/dvbdev.c
33812+++ b/drivers/media/dvb/dvb-core/dvbdev.c
33813@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
ae4e228f
MT
33814 const struct dvb_device *template, void *priv, int type)
33815 {
33816 struct dvb_device *dvbdev;
16454cff 33817- struct file_operations *dvbdevfops;
15a11c5b 33818+ file_operations_no_const *dvbdevfops;
ae4e228f
MT
33819 struct device *clsdev;
33820 int minor;
16454cff 33821 int id;
fe2de317 33822diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
5e856224 33823index 3940bb0..fb3952a 100644
fe2de317
MT
33824--- a/drivers/media/dvb/dvb-usb/cxusb.c
33825+++ b/drivers/media/dvb/dvb-usb/cxusb.c
5e856224
MT
33826@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
33827
15a11c5b 33828 struct dib0700_adapter_state {
5e856224 33829 int (*set_param_save) (struct dvb_frontend *);
15a11c5b
MT
33830-};
33831+} __no_const;
33832
5e856224
MT
33833 static int dib7070_set_param_override(struct dvb_frontend *fe)
33834 {
fe2de317 33835diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
5e856224 33836index 451c5a7..649f711 100644
fe2de317
MT
33837--- a/drivers/media/dvb/dvb-usb/dw2102.c
33838+++ b/drivers/media/dvb/dvb-usb/dw2102.c
15a11c5b
MT
33839@@ -95,7 +95,7 @@ struct su3000_state {
33840
33841 struct s6x0_state {
33842 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
33843-};
33844+} __no_const;
33845
33846 /* debug */
33847 static int dvb_usb_dw2102_debug;
fe2de317 33848diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
4c928ab7 33849index 404f63a..4796533 100644
fe2de317
MT
33850--- a/drivers/media/dvb/frontends/dib3000.h
33851+++ b/drivers/media/dvb/frontends/dib3000.h
6e9df6a3
MT
33852@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
33853 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
15a11c5b
MT
33854 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
33855 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
6e9df6a3
MT
33856-};
33857+} __no_const;
15a11c5b
MT
33858
33859 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
33860 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
fe2de317 33861diff --git a/drivers/media/dvb/ngene/ngene-cards.c b/drivers/media/dvb/ngene/ngene-cards.c
c6e2a6c8 33862index 7539a5d..06531a6 100644
fe2de317
MT
33863--- a/drivers/media/dvb/ngene/ngene-cards.c
33864+++ b/drivers/media/dvb/ngene/ngene-cards.c
c6e2a6c8 33865@@ -478,7 +478,7 @@ static struct ngene_info ngene_info_m780 = {
6e9df6a3
MT
33866
33867 /****************************************************************************/
33868
33869-static const struct pci_device_id ngene_id_tbl[] __devinitdata = {
33870+static const struct pci_device_id ngene_id_tbl[] __devinitconst = {
33871 NGENE_ID(0x18c3, 0xabc3, ngene_info_cineS2),
33872 NGENE_ID(0x18c3, 0xabc4, ngene_info_cineS2),
33873 NGENE_ID(0x18c3, 0xdb01, ngene_info_satixS2),
fe2de317 33874diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
c6e2a6c8 33875index 16a089f..1661b11 100644
fe2de317
MT
33876--- a/drivers/media/radio/radio-cadet.c
33877+++ b/drivers/media/radio/radio-cadet.c
33878@@ -326,6 +326,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
6e9df6a3
MT
33879 unsigned char readbuf[RDS_BUFFER];
33880 int i = 0;
33881
33882+ if (count > RDS_BUFFER)
33883+ return -EFAULT;
33884 mutex_lock(&dev->lock);
33885 if (dev->rdsstat == 0) {
33886 dev->rdsstat = 1;
c6e2a6c8
MT
33887@@ -347,7 +349,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
33888 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
33889 mutex_unlock(&dev->lock);
33890
33891- if (copy_to_user(data, readbuf, i))
33892+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
33893 return -EFAULT;
33894 return i;
33895 }
fe2de317
MT
33896diff --git a/drivers/media/video/au0828/au0828.h b/drivers/media/video/au0828/au0828.h
33897index 9cde353..8c6a1c3 100644
33898--- a/drivers/media/video/au0828/au0828.h
33899+++ b/drivers/media/video/au0828/au0828.h
6e9df6a3
MT
33900@@ -191,7 +191,7 @@ struct au0828_dev {
33901
33902 /* I2C */
33903 struct i2c_adapter i2c_adap;
33904- struct i2c_algorithm i2c_algo;
33905+ i2c_algorithm_no_const i2c_algo;
33906 struct i2c_client i2c_client;
33907 u32 i2c_rc;
33908
572b4308
MT
33909diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
33910index 7930ca5..235bf7d 100644
33911--- a/drivers/media/video/cx25821/cx25821-core.c
33912+++ b/drivers/media/video/cx25821/cx25821-core.c
33913@@ -912,9 +912,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
33914 list_add_tail(&dev->devlist, &cx25821_devlist);
33915 mutex_unlock(&cx25821_devlist_mutex);
33916
33917- strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
33918- strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
33919-
33920 if (dev->pci->device != 0x8210) {
33921 pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
33922 __func__, dev->pci->device);
33923diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
33924index b9aa801..029f293 100644
33925--- a/drivers/media/video/cx25821/cx25821.h
33926+++ b/drivers/media/video/cx25821/cx25821.h
33927@@ -187,7 +187,7 @@ enum port {
33928 };
33929
33930 struct cx25821_board {
33931- char *name;
33932+ const char *name;
33933 enum port porta;
33934 enum port portb;
33935 enum port portc;
fe2de317 33936diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
5e856224 33937index 04bf662..e0ac026 100644
fe2de317
MT
33938--- a/drivers/media/video/cx88/cx88-alsa.c
33939+++ b/drivers/media/video/cx88/cx88-alsa.c
33940@@ -766,7 +766,7 @@ static struct snd_kcontrol_new snd_cx88_alc_switch = {
6e9df6a3
MT
33941 * Only boards with eeprom and byte 1 at eeprom=1 have it
33942 */
33943
33944-static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitdata = {
33945+static const struct pci_device_id const cx88_audio_pci_tbl[] __devinitconst = {
33946 {0x14f1,0x8801,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33947 {0x14f1,0x8811,PCI_ANY_ID,PCI_ANY_ID,0,0,0},
33948 {0, }
4c928ab7 33949diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
c6e2a6c8 33950index 88cf9d9..bbc4b2c 100644
4c928ab7
MT
33951--- a/drivers/media/video/omap/omap_vout.c
33952+++ b/drivers/media/video/omap/omap_vout.c
33953@@ -64,7 +64,6 @@ enum omap_vout_channels {
33954 OMAP_VIDEO2,
33955 };
66a7e928 33956
4c928ab7
MT
33957-static struct videobuf_queue_ops video_vbq_ops;
33958 /* Variables configurable through module params*/
33959 static u32 video1_numbuffers = 3;
33960 static u32 video2_numbuffers = 3;
5e856224 33961@@ -1000,6 +999,12 @@ static int omap_vout_open(struct file *file)
4c928ab7
MT
33962 {
33963 struct videobuf_queue *q;
33964 struct omap_vout_device *vout = NULL;
33965+ static struct videobuf_queue_ops video_vbq_ops = {
33966+ .buf_setup = omap_vout_buffer_setup,
33967+ .buf_prepare = omap_vout_buffer_prepare,
33968+ .buf_release = omap_vout_buffer_release,
33969+ .buf_queue = omap_vout_buffer_queue,
33970+ };
33971
33972 vout = video_drvdata(file);
33973 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
5e856224 33974@@ -1017,10 +1022,6 @@ static int omap_vout_open(struct file *file)
4c928ab7 33975 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
66a7e928 33976
4c928ab7
MT
33977 q = &vout->vbq;
33978- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
33979- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
33980- video_vbq_ops.buf_release = omap_vout_buffer_release;
33981- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
33982 spin_lock_init(&vout->vbq_lock);
33983
33984 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
fe2de317
MT
33985diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33986index 305e6aa..0143317 100644
33987--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
33988+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
6e9df6a3
MT
33989@@ -196,7 +196,7 @@ struct pvr2_hdw {
33990
33991 /* I2C stuff */
33992 struct i2c_adapter i2c_adap;
33993- struct i2c_algorithm i2c_algo;
33994+ i2c_algorithm_no_const i2c_algo;
33995 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
33996 int i2c_cx25840_hack_state;
33997 int i2c_linked;
fe2de317 33998diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
c6e2a6c8 33999index 02194c0..091733b 100644
fe2de317
MT
34000--- a/drivers/media/video/timblogiw.c
34001+++ b/drivers/media/video/timblogiw.c
4c928ab7 34002@@ -745,7 +745,7 @@ static int timblogiw_mmap(struct file *file, struct vm_area_struct *vma)
15a11c5b
MT
34003
34004 /* Platform device functions */
34005
34006-static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
34007+static __devinitconst v4l2_ioctl_ops_no_const timblogiw_ioctl_ops = {
34008 .vidioc_querycap = timblogiw_querycap,
34009 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
34010 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
4c928ab7 34011@@ -767,7 +767,7 @@ static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
6e9df6a3
MT
34012 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
34013 };
34014
34015-static __devinitconst struct v4l2_file_operations timblogiw_fops = {
34016+static __devinitconst v4l2_file_operations_no_const timblogiw_fops = {
34017 .owner = THIS_MODULE,
34018 .open = timblogiw_open,
34019 .release = timblogiw_close,
fe2de317 34020diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
c6e2a6c8 34021index a5c591f..db692a3 100644
fe2de317
MT
34022--- a/drivers/message/fusion/mptbase.c
34023+++ b/drivers/message/fusion/mptbase.c
5e856224 34024@@ -6754,8 +6754,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
6892158b
MT
34025 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
34026 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
57199397
MT
34027
34028+#ifdef CONFIG_GRKERNSEC_HIDESYM
6892158b 34029+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
57199397 34030+#else
6892158b 34031 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
57199397
MT
34032 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
34033+#endif
34034+
34035 /*
34036 * Rounding UP to nearest 4-kB boundary here...
34037 */
fe2de317 34038diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
5e856224 34039index 551262e..7551198 100644
fe2de317
MT
34040--- a/drivers/message/fusion/mptsas.c
34041+++ b/drivers/message/fusion/mptsas.c
4c928ab7 34042@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
df50ba0c
MT
34043 return 0;
34044 }
34045
34046+static inline void
34047+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34048+{
34049+ if (phy_info->port_details) {
34050+ phy_info->port_details->rphy = rphy;
34051+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34052+ ioc->name, rphy));
34053+ }
34054+
34055+ if (rphy) {
34056+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34057+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34058+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34059+ ioc->name, rphy, rphy->dev.release));
34060+ }
34061+}
34062+
34063 /* no mutex */
34064 static void
34065 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
4c928ab7 34066@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
df50ba0c
MT
34067 return NULL;
34068 }
34069
34070-static inline void
34071-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
34072-{
34073- if (phy_info->port_details) {
34074- phy_info->port_details->rphy = rphy;
34075- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
34076- ioc->name, rphy));
34077- }
34078-
34079- if (rphy) {
34080- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
34081- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
34082- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
34083- ioc->name, rphy, rphy->dev.release));
34084- }
34085-}
34086-
34087 static inline struct sas_port *
34088 mptsas_get_port(struct mptsas_phyinfo *phy_info)
34089 {
fe2de317 34090diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
4c928ab7 34091index 0c3ced7..1fe34ec 100644
fe2de317
MT
34092--- a/drivers/message/fusion/mptscsih.c
34093+++ b/drivers/message/fusion/mptscsih.c
4c928ab7 34094@@ -1270,15 +1270,16 @@ mptscsih_info(struct Scsi_Host *SChost)
6892158b
MT
34095
34096 h = shost_priv(SChost);
34097
34098- if (h) {
34099- if (h->info_kbuf == NULL)
34100- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34101- return h->info_kbuf;
34102- h->info_kbuf[0] = '\0';
34103+ if (!h)
34104+ return NULL;
34105
34106- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34107- h->info_kbuf[size-1] = '\0';
34108- }
34109+ if (h->info_kbuf == NULL)
34110+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
34111+ return h->info_kbuf;
34112+ h->info_kbuf[0] = '\0';
34113+
34114+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
34115+ h->info_kbuf[size-1] = '\0';
34116
34117 return h->info_kbuf;
34118 }
fe2de317 34119diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
5e856224 34120index 6d115c7..58ff7fd 100644
fe2de317
MT
34121--- a/drivers/message/i2o/i2o_proc.c
34122+++ b/drivers/message/i2o/i2o_proc.c
df50ba0c 34123@@ -255,13 +255,6 @@ static char *scsi_devices[] = {
58c5fc13
MT
34124 "Array Controller Device"
34125 };
34126
34127-static char *chtostr(u8 * chars, int n)
34128-{
34129- char tmp[256];
34130- tmp[0] = 0;
34131- return strncat(tmp, (char *)chars, n);
34132-}
34133-
34134 static int i2o_report_query_status(struct seq_file *seq, int block_status,
34135 char *group)
34136 {
fe2de317 34137@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
58c5fc13
MT
34138
34139 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
34140 seq_printf(seq, "%-#8x", ddm_table.module_id);
34141- seq_printf(seq, "%-29s",
34142- chtostr(ddm_table.module_name_version, 28));
34143+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
34144 seq_printf(seq, "%9d ", ddm_table.data_size);
34145 seq_printf(seq, "%8d", ddm_table.code_size);
34146
fe2de317 34147@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
58c5fc13
MT
34148
34149 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
34150 seq_printf(seq, "%-#8x", dst->module_id);
34151- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
34152- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
34153+ seq_printf(seq, "%-.28s", dst->module_name_version);
34154+ seq_printf(seq, "%-.8s", dst->date);
34155 seq_printf(seq, "%8d ", dst->module_size);
34156 seq_printf(seq, "%8d ", dst->mpb_size);
34157 seq_printf(seq, "0x%04x", dst->module_flags);
fe2de317 34158@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
58c5fc13
MT
34159 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
34160 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
34161 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
34162- seq_printf(seq, "Vendor info : %s\n",
34163- chtostr((u8 *) (work32 + 2), 16));
34164- seq_printf(seq, "Product info : %s\n",
34165- chtostr((u8 *) (work32 + 6), 16));
34166- seq_printf(seq, "Description : %s\n",
34167- chtostr((u8 *) (work32 + 10), 16));
34168- seq_printf(seq, "Product rev. : %s\n",
34169- chtostr((u8 *) (work32 + 14), 8));
34170+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
34171+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
34172+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
34173+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
34174
34175 seq_printf(seq, "Serial number : ");
34176 print_serial_number(seq, (u8 *) (work32 + 16),
fe2de317 34177@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
58c5fc13
MT
34178 }
34179
34180 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
34181- seq_printf(seq, "Module name : %s\n",
34182- chtostr(result.module_name, 24));
34183- seq_printf(seq, "Module revision : %s\n",
34184- chtostr(result.module_rev, 8));
34185+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
34186+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
34187
34188 seq_printf(seq, "Serial number : ");
34189 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
fe2de317 34190@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
58c5fc13
MT
34191 return 0;
34192 }
34193
34194- seq_printf(seq, "Device name : %s\n",
34195- chtostr(result.device_name, 64));
34196- seq_printf(seq, "Service name : %s\n",
34197- chtostr(result.service_name, 64));
34198- seq_printf(seq, "Physical name : %s\n",
34199- chtostr(result.physical_location, 64));
34200- seq_printf(seq, "Instance number : %s\n",
34201- chtostr(result.instance_number, 4));
34202+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
34203+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
34204+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
34205+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
34206
34207 return 0;
34208 }
fe2de317
MT
34209diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
34210index a8c08f3..155fe3d 100644
34211--- a/drivers/message/i2o/iop.c
34212+++ b/drivers/message/i2o/iop.c
34213@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
8308f9c9
MT
34214
34215 spin_lock_irqsave(&c->context_list_lock, flags);
34216
34217- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
34218- atomic_inc(&c->context_list_counter);
34219+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
34220+ atomic_inc_unchecked(&c->context_list_counter);
34221
34222- entry->context = atomic_read(&c->context_list_counter);
34223+ entry->context = atomic_read_unchecked(&c->context_list_counter);
34224
34225 list_add(&entry->list, &c->context_list);
34226
fe2de317 34227@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
8308f9c9
MT
34228
34229 #if BITS_PER_LONG == 64
34230 spin_lock_init(&c->context_list_lock);
34231- atomic_set(&c->context_list_counter, 0);
34232+ atomic_set_unchecked(&c->context_list_counter, 0);
34233 INIT_LIST_HEAD(&c->context_list);
34234 #endif
34235
fe2de317 34236diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
4c928ab7 34237index 7ce65f4..e66e9bc 100644
fe2de317
MT
34238--- a/drivers/mfd/abx500-core.c
34239+++ b/drivers/mfd/abx500-core.c
4c928ab7 34240@@ -15,7 +15,7 @@ static LIST_HEAD(abx500_list);
66a7e928 34241
15a11c5b
MT
34242 struct abx500_device_entry {
34243 struct list_head list;
34244- struct abx500_ops ops;
34245+ abx500_ops_no_const ops;
66a7e928
MT
34246 struct device *dev;
34247 };
34248
fe2de317 34249diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
5e856224 34250index a9223ed..4127b13 100644
fe2de317
MT
34251--- a/drivers/mfd/janz-cmodio.c
34252+++ b/drivers/mfd/janz-cmodio.c
57199397
MT
34253@@ -13,6 +13,7 @@
34254
34255 #include <linux/kernel.h>
34256 #include <linux/module.h>
34257+#include <linux/slab.h>
34258 #include <linux/init.h>
34259 #include <linux/pci.h>
34260 #include <linux/interrupt.h>
fe2de317 34261diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
5e856224 34262index a981e2a..5ca0c8b 100644
fe2de317
MT
34263--- a/drivers/misc/lis3lv02d/lis3lv02d.c
34264+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
5e856224 34265@@ -466,7 +466,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
66a7e928
MT
34266 * the lid is closed. This leads to interrupts as soon as a little move
34267 * is done.
34268 */
4c928ab7
MT
34269- atomic_inc(&lis3->count);
34270+ atomic_inc_unchecked(&lis3->count);
66a7e928 34271
4c928ab7
MT
34272 wake_up_interruptible(&lis3->misc_wait);
34273 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
5e856224 34274@@ -552,7 +552,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
4c928ab7
MT
34275 if (lis3->pm_dev)
34276 pm_runtime_get_sync(lis3->pm_dev);
66a7e928 34277
4c928ab7
MT
34278- atomic_set(&lis3->count, 0);
34279+ atomic_set_unchecked(&lis3->count, 0);
66a7e928
MT
34280 return 0;
34281 }
34282
5e856224 34283@@ -585,7 +585,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
4c928ab7 34284 add_wait_queue(&lis3->misc_wait, &wait);
66a7e928
MT
34285 while (true) {
34286 set_current_state(TASK_INTERRUPTIBLE);
4c928ab7
MT
34287- data = atomic_xchg(&lis3->count, 0);
34288+ data = atomic_xchg_unchecked(&lis3->count, 0);
66a7e928
MT
34289 if (data)
34290 break;
34291
5e856224 34292@@ -626,7 +626,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
4c928ab7
MT
34293 struct lis3lv02d, miscdev);
34294
34295 poll_wait(file, &lis3->misc_wait, wait);
34296- if (atomic_read(&lis3->count))
34297+ if (atomic_read_unchecked(&lis3->count))
66a7e928
MT
34298 return POLLIN | POLLRDNORM;
34299 return 0;
34300 }
fe2de317 34301diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
4c928ab7 34302index 2b1482a..5d33616 100644
fe2de317
MT
34303--- a/drivers/misc/lis3lv02d/lis3lv02d.h
34304+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
4c928ab7 34305@@ -266,7 +266,7 @@ struct lis3lv02d {
66a7e928
MT
34306 struct input_polled_dev *idev; /* input device */
34307 struct platform_device *pdev; /* platform device */
34308 struct regulator_bulk_data regulators[2];
34309- atomic_t count; /* interrupt count after last read */
34310+ atomic_unchecked_t count; /* interrupt count after last read */
34311 union axis_conversion ac; /* hw -> logical axis */
34312 int mapped_btns[3];
34313
fe2de317
MT
34314diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
34315index 2f30bad..c4c13d0 100644
34316--- a/drivers/misc/sgi-gru/gruhandles.c
34317+++ b/drivers/misc/sgi-gru/gruhandles.c
34318@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
ae4e228f
MT
34319 unsigned long nsec;
34320
34321 nsec = CLKS2NSEC(clks);
34322- atomic_long_inc(&mcs_op_statistics[op].count);
34323- atomic_long_add(nsec, &mcs_op_statistics[op].total);
34324+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
34325+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
34326 if (mcs_op_statistics[op].max < nsec)
34327 mcs_op_statistics[op].max = nsec;
34328 }
fe2de317 34329diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
5e856224 34330index 950dbe9..eeef0f8 100644
fe2de317
MT
34331--- a/drivers/misc/sgi-gru/gruprocfs.c
34332+++ b/drivers/misc/sgi-gru/gruprocfs.c
ae4e228f
MT
34333@@ -32,9 +32,9 @@
34334
34335 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
34336
34337-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
34338+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
34339 {
34340- unsigned long val = atomic_long_read(v);
34341+ unsigned long val = atomic_long_read_unchecked(v);
34342
34343 seq_printf(s, "%16lu %s\n", val, id);
34344 }
fe2de317 34345@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
ae4e228f
MT
34346
34347 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
34348 for (op = 0; op < mcsop_last; op++) {
34349- count = atomic_long_read(&mcs_op_statistics[op].count);
34350- total = atomic_long_read(&mcs_op_statistics[op].total);
34351+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
34352+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
34353 max = mcs_op_statistics[op].max;
34354 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
34355 count ? total / count : 0, max);
fe2de317
MT
34356diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
34357index 5c3ce24..4915ccb 100644
34358--- a/drivers/misc/sgi-gru/grutables.h
34359+++ b/drivers/misc/sgi-gru/grutables.h
ae4e228f
MT
34360@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
34361 * GRU statistics.
34362 */
34363 struct gru_stats_s {
34364- atomic_long_t vdata_alloc;
34365- atomic_long_t vdata_free;
34366- atomic_long_t gts_alloc;
34367- atomic_long_t gts_free;
34368- atomic_long_t gms_alloc;
34369- atomic_long_t gms_free;
34370- atomic_long_t gts_double_allocate;
34371- atomic_long_t assign_context;
34372- atomic_long_t assign_context_failed;
34373- atomic_long_t free_context;
34374- atomic_long_t load_user_context;
34375- atomic_long_t load_kernel_context;
34376- atomic_long_t lock_kernel_context;
34377- atomic_long_t unlock_kernel_context;
34378- atomic_long_t steal_user_context;
34379- atomic_long_t steal_kernel_context;
34380- atomic_long_t steal_context_failed;
34381- atomic_long_t nopfn;
34382- atomic_long_t asid_new;
34383- atomic_long_t asid_next;
34384- atomic_long_t asid_wrap;
34385- atomic_long_t asid_reuse;
34386- atomic_long_t intr;
34387- atomic_long_t intr_cbr;
34388- atomic_long_t intr_tfh;
34389- atomic_long_t intr_spurious;
34390- atomic_long_t intr_mm_lock_failed;
34391- atomic_long_t call_os;
34392- atomic_long_t call_os_wait_queue;
34393- atomic_long_t user_flush_tlb;
34394- atomic_long_t user_unload_context;
34395- atomic_long_t user_exception;
34396- atomic_long_t set_context_option;
34397- atomic_long_t check_context_retarget_intr;
34398- atomic_long_t check_context_unload;
34399- atomic_long_t tlb_dropin;
34400- atomic_long_t tlb_preload_page;
34401- atomic_long_t tlb_dropin_fail_no_asid;
34402- atomic_long_t tlb_dropin_fail_upm;
34403- atomic_long_t tlb_dropin_fail_invalid;
34404- atomic_long_t tlb_dropin_fail_range_active;
34405- atomic_long_t tlb_dropin_fail_idle;
34406- atomic_long_t tlb_dropin_fail_fmm;
34407- atomic_long_t tlb_dropin_fail_no_exception;
34408- atomic_long_t tfh_stale_on_fault;
34409- atomic_long_t mmu_invalidate_range;
34410- atomic_long_t mmu_invalidate_page;
34411- atomic_long_t flush_tlb;
34412- atomic_long_t flush_tlb_gru;
34413- atomic_long_t flush_tlb_gru_tgh;
34414- atomic_long_t flush_tlb_gru_zero_asid;
ae4e228f
MT
34415+ atomic_long_unchecked_t vdata_alloc;
34416+ atomic_long_unchecked_t vdata_free;
34417+ atomic_long_unchecked_t gts_alloc;
34418+ atomic_long_unchecked_t gts_free;
34419+ atomic_long_unchecked_t gms_alloc;
34420+ atomic_long_unchecked_t gms_free;
34421+ atomic_long_unchecked_t gts_double_allocate;
34422+ atomic_long_unchecked_t assign_context;
34423+ atomic_long_unchecked_t assign_context_failed;
34424+ atomic_long_unchecked_t free_context;
34425+ atomic_long_unchecked_t load_user_context;
34426+ atomic_long_unchecked_t load_kernel_context;
34427+ atomic_long_unchecked_t lock_kernel_context;
34428+ atomic_long_unchecked_t unlock_kernel_context;
34429+ atomic_long_unchecked_t steal_user_context;
34430+ atomic_long_unchecked_t steal_kernel_context;
34431+ atomic_long_unchecked_t steal_context_failed;
34432+ atomic_long_unchecked_t nopfn;
34433+ atomic_long_unchecked_t asid_new;
34434+ atomic_long_unchecked_t asid_next;
34435+ atomic_long_unchecked_t asid_wrap;
34436+ atomic_long_unchecked_t asid_reuse;
34437+ atomic_long_unchecked_t intr;
34438+ atomic_long_unchecked_t intr_cbr;
34439+ atomic_long_unchecked_t intr_tfh;
34440+ atomic_long_unchecked_t intr_spurious;
34441+ atomic_long_unchecked_t intr_mm_lock_failed;
34442+ atomic_long_unchecked_t call_os;
34443+ atomic_long_unchecked_t call_os_wait_queue;
34444+ atomic_long_unchecked_t user_flush_tlb;
34445+ atomic_long_unchecked_t user_unload_context;
34446+ atomic_long_unchecked_t user_exception;
34447+ atomic_long_unchecked_t set_context_option;
34448+ atomic_long_unchecked_t check_context_retarget_intr;
34449+ atomic_long_unchecked_t check_context_unload;
34450+ atomic_long_unchecked_t tlb_dropin;
34451+ atomic_long_unchecked_t tlb_preload_page;
34452+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
34453+ atomic_long_unchecked_t tlb_dropin_fail_upm;
34454+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
34455+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
34456+ atomic_long_unchecked_t tlb_dropin_fail_idle;
34457+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
34458+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
34459+ atomic_long_unchecked_t tfh_stale_on_fault;
34460+ atomic_long_unchecked_t mmu_invalidate_range;
34461+ atomic_long_unchecked_t mmu_invalidate_page;
34462+ atomic_long_unchecked_t flush_tlb;
34463+ atomic_long_unchecked_t flush_tlb_gru;
34464+ atomic_long_unchecked_t flush_tlb_gru_tgh;
34465+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
fe2de317
MT
34466
34467- atomic_long_t copy_gpa;
34468- atomic_long_t read_gpa;
ae4e228f
MT
34469+ atomic_long_unchecked_t copy_gpa;
34470+ atomic_long_unchecked_t read_gpa;
fe2de317
MT
34471
34472- atomic_long_t mesq_receive;
34473- atomic_long_t mesq_receive_none;
34474- atomic_long_t mesq_send;
34475- atomic_long_t mesq_send_failed;
34476- atomic_long_t mesq_noop;
34477- atomic_long_t mesq_send_unexpected_error;
34478- atomic_long_t mesq_send_lb_overflow;
34479- atomic_long_t mesq_send_qlimit_reached;
34480- atomic_long_t mesq_send_amo_nacked;
34481- atomic_long_t mesq_send_put_nacked;
34482- atomic_long_t mesq_page_overflow;
34483- atomic_long_t mesq_qf_locked;
34484- atomic_long_t mesq_qf_noop_not_full;
34485- atomic_long_t mesq_qf_switch_head_failed;
34486- atomic_long_t mesq_qf_unexpected_error;
34487- atomic_long_t mesq_noop_unexpected_error;
34488- atomic_long_t mesq_noop_lb_overflow;
34489- atomic_long_t mesq_noop_qlimit_reached;
34490- atomic_long_t mesq_noop_amo_nacked;
34491- atomic_long_t mesq_noop_put_nacked;
34492- atomic_long_t mesq_noop_page_overflow;
ae4e228f
MT
34493+ atomic_long_unchecked_t mesq_receive;
34494+ atomic_long_unchecked_t mesq_receive_none;
34495+ atomic_long_unchecked_t mesq_send;
34496+ atomic_long_unchecked_t mesq_send_failed;
34497+ atomic_long_unchecked_t mesq_noop;
34498+ atomic_long_unchecked_t mesq_send_unexpected_error;
34499+ atomic_long_unchecked_t mesq_send_lb_overflow;
34500+ atomic_long_unchecked_t mesq_send_qlimit_reached;
34501+ atomic_long_unchecked_t mesq_send_amo_nacked;
34502+ atomic_long_unchecked_t mesq_send_put_nacked;
34503+ atomic_long_unchecked_t mesq_page_overflow;
34504+ atomic_long_unchecked_t mesq_qf_locked;
34505+ atomic_long_unchecked_t mesq_qf_noop_not_full;
34506+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
34507+ atomic_long_unchecked_t mesq_qf_unexpected_error;
34508+ atomic_long_unchecked_t mesq_noop_unexpected_error;
34509+ atomic_long_unchecked_t mesq_noop_lb_overflow;
34510+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
34511+ atomic_long_unchecked_t mesq_noop_amo_nacked;
34512+ atomic_long_unchecked_t mesq_noop_put_nacked;
34513+ atomic_long_unchecked_t mesq_noop_page_overflow;
58c5fc13 34514
58c5fc13 34515 };
58c5fc13 34516
fe2de317 34517@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
ae4e228f 34518 tghop_invalidate, mcsop_last};
58c5fc13 34519
ae4e228f
MT
34520 struct mcs_op_statistic {
34521- atomic_long_t count;
34522- atomic_long_t total;
34523+ atomic_long_unchecked_t count;
34524+ atomic_long_unchecked_t total;
34525 unsigned long max;
58c5fc13
MT
34526 };
34527
fe2de317 34528@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
58c5fc13 34529
ae4e228f
MT
34530 #define STAT(id) do { \
34531 if (gru_options & OPT_STATS) \
34532- atomic_long_inc(&gru_stats.id); \
34533+ atomic_long_inc_unchecked(&gru_stats.id); \
34534 } while (0)
58c5fc13 34535
ae4e228f 34536 #ifdef CONFIG_SGI_GRU_DEBUG
fe2de317 34537diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
c6e2a6c8 34538index c862cd4..0d176fe 100644
fe2de317
MT
34539--- a/drivers/misc/sgi-xp/xp.h
34540+++ b/drivers/misc/sgi-xp/xp.h
c6e2a6c8 34541@@ -288,7 +288,7 @@ struct xpc_interface {
fe2de317
MT
34542 xpc_notify_func, void *);
34543 void (*received) (short, int, void *);
34544 enum xp_retval (*partid_to_nasids) (short, void *);
34545-};
34546+} __no_const;
34547
34548 extern struct xpc_interface xpc_interface;
34549
34550diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
34551index b94d5f7..7f494c5 100644
34552--- a/drivers/misc/sgi-xp/xpc.h
34553+++ b/drivers/misc/sgi-xp/xpc.h
6e9df6a3
MT
34554@@ -835,6 +835,7 @@ struct xpc_arch_operations {
34555 void (*received_payload) (struct xpc_channel *, void *);
34556 void (*notify_senders_of_disconnect) (struct xpc_channel *);
34557 };
34558+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
34559
34560 /* struct xpc_partition act_state values (for XPC HB) */
34561
fe2de317 34562@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
6e9df6a3
MT
34563 /* found in xpc_main.c */
34564 extern struct device *xpc_part;
34565 extern struct device *xpc_chan;
34566-extern struct xpc_arch_operations xpc_arch_ops;
34567+extern xpc_arch_operations_no_const xpc_arch_ops;
34568 extern int xpc_disengage_timelimit;
34569 extern int xpc_disengage_timedout;
34570 extern int xpc_activate_IRQ_rcvd;
fe2de317
MT
34571diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
34572index 8d082b4..aa749ae 100644
34573--- a/drivers/misc/sgi-xp/xpc_main.c
34574+++ b/drivers/misc/sgi-xp/xpc_main.c
34575@@ -162,7 +162,7 @@ static struct notifier_block xpc_die_notifier = {
6e9df6a3
MT
34576 .notifier_call = xpc_system_die,
34577 };
34578
34579-struct xpc_arch_operations xpc_arch_ops;
34580+xpc_arch_operations_no_const xpc_arch_ops;
34581
34582 /*
34583 * Timer function to enforce the timelimit on the partition disengage.
fe2de317 34584diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
c6e2a6c8 34585index 69ef0be..f3ef91e 100644
fe2de317
MT
34586--- a/drivers/mmc/host/sdhci-pci.c
34587+++ b/drivers/mmc/host/sdhci-pci.c
c6e2a6c8 34588@@ -652,7 +652,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
6e9df6a3
MT
34589 .probe = via_probe,
34590 };
34591
34592-static const struct pci_device_id pci_ids[] __devinitdata = {
34593+static const struct pci_device_id pci_ids[] __devinitconst = {
34594 {
34595 .vendor = PCI_VENDOR_ID_RICOH,
34596 .device = PCI_DEVICE_ID_RICOH_R5C822,
fe2de317 34597diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
c6e2a6c8 34598index a4eb8b5..8c0628f 100644
fe2de317
MT
34599--- a/drivers/mtd/devices/doc2000.c
34600+++ b/drivers/mtd/devices/doc2000.c
c6e2a6c8 34601@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
58c5fc13
MT
34602
34603 /* The ECC will not be calculated correctly if less than 512 is written */
34604 /* DBB-
34605- if (len != 0x200 && eccbuf)
34606+ if (len != 0x200)
34607 printk(KERN_WARNING
34608 "ECC needs a full sector write (adr: %lx size %lx)\n",
34609 (long) to, (long) len);
fe2de317 34610diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
c6e2a6c8 34611index a9e57d6..c6d8731 100644
fe2de317
MT
34612--- a/drivers/mtd/nand/denali.c
34613+++ b/drivers/mtd/nand/denali.c
15a11c5b 34614@@ -26,6 +26,7 @@
57199397
MT
34615 #include <linux/pci.h>
34616 #include <linux/mtd/mtd.h>
34617 #include <linux/module.h>
34618+#include <linux/slab.h>
34619
34620 #include "denali.h"
34621
fe2de317 34622diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
5e856224 34623index 51b9d6a..52af9a7 100644
fe2de317
MT
34624--- a/drivers/mtd/nftlmount.c
34625+++ b/drivers/mtd/nftlmount.c
66a7e928
MT
34626@@ -24,6 +24,7 @@
34627 #include <asm/errno.h>
34628 #include <linux/delay.h>
34629 #include <linux/slab.h>
34630+#include <linux/sched.h>
34631 #include <linux/mtd/mtd.h>
34632 #include <linux/mtd/nand.h>
34633 #include <linux/mtd/nftl.h>
4c928ab7 34634diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
c6e2a6c8 34635index 6762dc4..9956862 100644
4c928ab7
MT
34636--- a/drivers/net/ethernet/atheros/atlx/atl2.c
34637+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
c6e2a6c8 34638@@ -2859,7 +2859,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
6e9df6a3
MT
34639 */
34640
34641 #define ATL2_PARAM(X, desc) \
34642- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34643+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
34644 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
34645 MODULE_PARM_DESC(X, desc);
34646 #else
4c928ab7 34647diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
c6e2a6c8 34648index 61a7670..7da6e34 100644
4c928ab7
MT
34649--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
34650+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
c6e2a6c8 34651@@ -483,7 +483,7 @@ struct bnx2x_rx_mode_obj {
6e9df6a3
MT
34652
34653 int (*wait_comp)(struct bnx2x *bp,
34654 struct bnx2x_rx_mode_ramrod_params *p);
34655-};
34656+} __no_const;
34657
34658 /********************** Set multicast group ***********************************/
34659
4c928ab7 34660diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
c6e2a6c8 34661index 93865f8..5448741 100644
4c928ab7
MT
34662--- a/drivers/net/ethernet/broadcom/tg3.h
34663+++ b/drivers/net/ethernet/broadcom/tg3.h
5e856224 34664@@ -140,6 +140,7 @@
4c928ab7
MT
34665 #define CHIPREV_ID_5750_A0 0x4000
34666 #define CHIPREV_ID_5750_A1 0x4001
34667 #define CHIPREV_ID_5750_A3 0x4003
34668+#define CHIPREV_ID_5750_C1 0x4201
34669 #define CHIPREV_ID_5750_C2 0x4202
34670 #define CHIPREV_ID_5752_A0_HW 0x5000
34671 #define CHIPREV_ID_5752_A0 0x6000
34672diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
5e856224 34673index c4e8643..0979484 100644
4c928ab7
MT
34674--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
34675+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
fe2de317 34676@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
15a11c5b
MT
34677 */
34678 struct l2t_skb_cb {
34679 arp_failure_handler_func arp_failure_handler;
34680-};
34681+} __no_const;
34682
34683 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
34684
4c928ab7 34685diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
c6e2a6c8 34686index 18b106c..2b38d36 100644
4c928ab7
MT
34687--- a/drivers/net/ethernet/dec/tulip/de4x5.c
34688+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
c6e2a6c8 34689@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4c928ab7
MT
34690 for (i=0; i<ETH_ALEN; i++) {
34691 tmp.addr[i] = dev->dev_addr[i];
34692 }
34693- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34694+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
34695 break;
66a7e928 34696
4c928ab7 34697 case DE4X5_SET_HWADDR: /* Set the hardware address */
c6e2a6c8 34698@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4c928ab7
MT
34699 spin_lock_irqsave(&lp->lock, flags);
34700 memcpy(&statbuf, &lp->pktStats, ioc->len);
34701 spin_unlock_irqrestore(&lp->lock, flags);
34702- if (copy_to_user(ioc->data, &statbuf, ioc->len))
34703+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
34704 return -EFAULT;
34705 break;
34706 }
34707diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
c6e2a6c8 34708index ed7d1dc..d426748 100644
4c928ab7
MT
34709--- a/drivers/net/ethernet/dec/tulip/eeprom.c
34710+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
34711@@ -79,7 +79,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
34712 {NULL}};
66a7e928 34713
66a7e928 34714
4c928ab7
MT
34715-static const char *block_name[] __devinitdata = {
34716+static const char *block_name[] __devinitconst = {
34717 "21140 non-MII",
34718 "21140 MII PHY",
34719 "21142 Serial PHY",
34720diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
c6e2a6c8 34721index 2ac6fff..2d127d0 100644
4c928ab7
MT
34722--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
34723+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
34724@@ -236,7 +236,7 @@ struct pci_id_info {
34725 int drv_flags; /* Driver use, intended as capability flags. */
34726 };
34727
34728-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34729+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34730 { /* Sometime a Level-One switch card. */
34731 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
34732 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
34733diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
c6e2a6c8 34734index d783f4f..97fa1b0 100644
4c928ab7
MT
34735--- a/drivers/net/ethernet/dlink/sundance.c
34736+++ b/drivers/net/ethernet/dlink/sundance.c
34737@@ -218,7 +218,7 @@ enum {
34738 struct pci_id_info {
34739 const char *name;
34740 };
34741-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
34742+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
34743 {"D-Link DFE-550TX FAST Ethernet Adapter"},
34744 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
34745 {"D-Link DFE-580TX 4 port Server Adapter"},
34746diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
572b4308 34747index 1bbf6b3..430dcd0 100644
4c928ab7
MT
34748--- a/drivers/net/ethernet/emulex/benet/be_main.c
34749+++ b/drivers/net/ethernet/emulex/benet/be_main.c
c6e2a6c8 34750@@ -403,7 +403,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
4c928ab7
MT
34751
34752 if (wrapped)
34753 newacc += 65536;
34754- ACCESS_ONCE(*acc) = newacc;
34755+ ACCESS_ONCE_RW(*acc) = newacc;
34756 }
34757
34758 void be_parse_stats(struct be_adapter *adapter)
34759diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
c6e2a6c8 34760index 16b0704..d2c07d7 100644
4c928ab7
MT
34761--- a/drivers/net/ethernet/faraday/ftgmac100.c
34762+++ b/drivers/net/ethernet/faraday/ftgmac100.c
5e856224 34763@@ -31,6 +31,8 @@
4c928ab7
MT
34764 #include <linux/netdevice.h>
34765 #include <linux/phy.h>
34766 #include <linux/platform_device.h>
34767+#include <linux/interrupt.h>
34768+#include <linux/irqreturn.h>
34769 #include <net/ip.h>
34770
34771 #include "ftgmac100.h"
34772diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
c6e2a6c8 34773index 829b109..4ae5f6a 100644
4c928ab7
MT
34774--- a/drivers/net/ethernet/faraday/ftmac100.c
34775+++ b/drivers/net/ethernet/faraday/ftmac100.c
5e856224 34776@@ -31,6 +31,8 @@
4c928ab7
MT
34777 #include <linux/module.h>
34778 #include <linux/netdevice.h>
34779 #include <linux/platform_device.h>
34780+#include <linux/interrupt.h>
34781+#include <linux/irqreturn.h>
34782
34783 #include "ftmac100.h"
34784
34785diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
c6e2a6c8 34786index 1637b98..c42f87b 100644
4c928ab7
MT
34787--- a/drivers/net/ethernet/fealnx.c
34788+++ b/drivers/net/ethernet/fealnx.c
34789@@ -150,7 +150,7 @@ struct chip_info {
34790 int flags;
34791 };
34792
34793-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
34794+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
34795 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
34796 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
34797 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
c6e2a6c8
MT
34798diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
34799index b83897f..b2d970f 100644
34800--- a/drivers/net/ethernet/intel/e1000e/e1000.h
34801+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
34802@@ -181,7 +181,7 @@ struct e1000_info;
34803 #define E1000_TXDCTL_DMA_BURST_ENABLE \
34804 (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
34805 E1000_TXDCTL_COUNT_DESC | \
34806- (5 << 16) | /* wthresh must be +1 more than desired */\
34807+ (1 << 16) | /* wthresh must be +1 more than desired */\
34808 (1 << 8) | /* hthresh */ \
34809 0x1f) /* pthresh */
34810
4c928ab7 34811diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
c6e2a6c8 34812index f82ecf5..7d59ecb 100644
4c928ab7
MT
34813--- a/drivers/net/ethernet/intel/e1000e/hw.h
34814+++ b/drivers/net/ethernet/intel/e1000e/hw.h
c6e2a6c8
MT
34815@@ -784,6 +784,7 @@ struct e1000_mac_operations {
34816 void (*config_collision_dist)(struct e1000_hw *);
15a11c5b 34817 s32 (*read_mac_addr)(struct e1000_hw *);
ae4e228f 34818 };
15a11c5b 34819+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
ae4e228f 34820
6e9df6a3
MT
34821 /*
34822 * When to use various PHY register access functions:
c6e2a6c8 34823@@ -824,6 +825,7 @@ struct e1000_phy_operations {
15a11c5b
MT
34824 void (*power_up)(struct e1000_hw *);
34825 void (*power_down)(struct e1000_hw *);
ae4e228f 34826 };
15a11c5b 34827+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
ae4e228f 34828
15a11c5b
MT
34829 /* Function pointers for the NVM. */
34830 struct e1000_nvm_operations {
c6e2a6c8 34831@@ -836,9 +838,10 @@ struct e1000_nvm_operations {
15a11c5b
MT
34832 s32 (*validate)(struct e1000_hw *);
34833 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
ae4e228f 34834 };
15a11c5b 34835+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
ae4e228f
MT
34836
34837 struct e1000_mac_info {
15a11c5b
MT
34838- struct e1000_mac_operations ops;
34839+ e1000_mac_operations_no_const ops;
66a7e928
MT
34840 u8 addr[ETH_ALEN];
34841 u8 perm_addr[ETH_ALEN];
15a11c5b 34842
c6e2a6c8 34843@@ -879,7 +882,7 @@ struct e1000_mac_info {
bc901d79
MT
34844 };
34845
34846 struct e1000_phy_info {
15a11c5b
MT
34847- struct e1000_phy_operations ops;
34848+ e1000_phy_operations_no_const ops;
bc901d79
MT
34849
34850 enum e1000_phy_type type;
15a11c5b 34851
c6e2a6c8 34852@@ -913,7 +916,7 @@ struct e1000_phy_info {
ae4e228f
MT
34853 };
34854
34855 struct e1000_nvm_info {
15a11c5b
MT
34856- struct e1000_nvm_operations ops;
34857+ e1000_nvm_operations_no_const ops;
ae4e228f
MT
34858
34859 enum e1000_nvm_type type;
15a11c5b 34860 enum e1000_nvm_override override;
4c928ab7 34861diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
5e856224 34862index f67cbd3..cef9e3d 100644
4c928ab7
MT
34863--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
34864+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
15a11c5b
MT
34865@@ -314,6 +314,7 @@ struct e1000_mac_operations {
34866 s32 (*read_mac_addr)(struct e1000_hw *);
34867 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
66a7e928 34868 };
15a11c5b 34869+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
66a7e928 34870
15a11c5b
MT
34871 struct e1000_phy_operations {
34872 s32 (*acquire)(struct e1000_hw *);
34873@@ -330,6 +331,7 @@ struct e1000_phy_operations {
34874 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34875 s32 (*write_reg)(struct e1000_hw *, u32, u16);
66a7e928 34876 };
15a11c5b 34877+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
66a7e928 34878
15a11c5b
MT
34879 struct e1000_nvm_operations {
34880 s32 (*acquire)(struct e1000_hw *);
34881@@ -339,6 +341,7 @@ struct e1000_nvm_operations {
34882 s32 (*update)(struct e1000_hw *);
34883 s32 (*validate)(struct e1000_hw *);
66a7e928 34884 };
15a11c5b 34885+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
ae4e228f
MT
34886
34887 struct e1000_info {
34888 s32 (*get_invariants)(struct e1000_hw *);
15a11c5b 34889@@ -350,7 +353,7 @@ struct e1000_info {
ae4e228f 34890 extern const struct e1000_info e1000_82575_info;
bc901d79
MT
34891
34892 struct e1000_mac_info {
15a11c5b
MT
34893- struct e1000_mac_operations ops;
34894+ e1000_mac_operations_no_const ops;
bc901d79
MT
34895
34896 u8 addr[6];
15a11c5b
MT
34897 u8 perm_addr[6];
34898@@ -388,7 +391,7 @@ struct e1000_mac_info {
bc901d79
MT
34899 };
34900
34901 struct e1000_phy_info {
15a11c5b
MT
34902- struct e1000_phy_operations ops;
34903+ e1000_phy_operations_no_const ops;
bc901d79
MT
34904
34905 enum e1000_phy_type type;
15a11c5b
MT
34906
34907@@ -423,7 +426,7 @@ struct e1000_phy_info {
ae4e228f
MT
34908 };
34909
34910 struct e1000_nvm_info {
15a11c5b
MT
34911- struct e1000_nvm_operations ops;
34912+ e1000_nvm_operations_no_const ops;
ae4e228f 34913 enum e1000_nvm_type type;
66a7e928 34914 enum e1000_nvm_override override;
15a11c5b
MT
34915
34916@@ -468,6 +471,7 @@ struct e1000_mbx_operations {
34917 s32 (*check_for_ack)(struct e1000_hw *, u16);
34918 s32 (*check_for_rst)(struct e1000_hw *, u16);
34919 };
34920+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
34921
34922 struct e1000_mbx_stats {
34923 u32 msgs_tx;
34924@@ -479,7 +483,7 @@ struct e1000_mbx_stats {
bc901d79
MT
34925 };
34926
15a11c5b
MT
34927 struct e1000_mbx_info {
34928- struct e1000_mbx_operations ops;
34929+ e1000_mbx_operations_no_const ops;
34930 struct e1000_mbx_stats stats;
34931 u32 timeout;
34932 u32 usec_delay;
4c928ab7 34933diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
5e856224 34934index 57db3c6..aa825fc 100644
4c928ab7
MT
34935--- a/drivers/net/ethernet/intel/igbvf/vf.h
34936+++ b/drivers/net/ethernet/intel/igbvf/vf.h
15a11c5b
MT
34937@@ -189,9 +189,10 @@ struct e1000_mac_operations {
34938 s32 (*read_mac_addr)(struct e1000_hw *);
34939 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
34940 };
34941+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
34942
bc901d79 34943 struct e1000_mac_info {
15a11c5b
MT
34944- struct e1000_mac_operations ops;
34945+ e1000_mac_operations_no_const ops;
bc901d79
MT
34946 u8 addr[6];
34947 u8 perm_addr[6];
66a7e928 34948
15a11c5b
MT
34949@@ -213,6 +214,7 @@ struct e1000_mbx_operations {
34950 s32 (*check_for_ack)(struct e1000_hw *);
34951 s32 (*check_for_rst)(struct e1000_hw *);
34952 };
34953+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
66a7e928 34954
15a11c5b
MT
34955 struct e1000_mbx_stats {
34956 u32 msgs_tx;
34957@@ -224,7 +226,7 @@ struct e1000_mbx_stats {
34958 };
66a7e928 34959
15a11c5b
MT
34960 struct e1000_mbx_info {
34961- struct e1000_mbx_operations ops;
34962+ e1000_mbx_operations_no_const ops;
34963 struct e1000_mbx_stats stats;
34964 u32 timeout;
34965 u32 usec_delay;
4c928ab7 34966diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
c6e2a6c8 34967index 8636e83..ab9bbc3 100644
4c928ab7
MT
34968--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
34969+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
c6e2a6c8 34970@@ -2710,6 +2710,7 @@ struct ixgbe_eeprom_operations {
15a11c5b
MT
34971 s32 (*update_checksum)(struct ixgbe_hw *);
34972 u16 (*calc_checksum)(struct ixgbe_hw *);
66a7e928 34973 };
15a11c5b 34974+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
66a7e928 34975
15a11c5b
MT
34976 struct ixgbe_mac_operations {
34977 s32 (*init_hw)(struct ixgbe_hw *);
c6e2a6c8 34978@@ -2773,6 +2774,7 @@ struct ixgbe_mac_operations {
6e9df6a3
MT
34979 /* Manageability interface */
34980 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
66a7e928 34981 };
15a11c5b 34982+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
66a7e928 34983
15a11c5b
MT
34984 struct ixgbe_phy_operations {
34985 s32 (*identify)(struct ixgbe_hw *);
c6e2a6c8 34986@@ -2792,9 +2794,10 @@ struct ixgbe_phy_operations {
15a11c5b
MT
34987 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
34988 s32 (*check_overtemp)(struct ixgbe_hw *);
66a7e928 34989 };
15a11c5b 34990+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
66a7e928
MT
34991
34992 struct ixgbe_eeprom_info {
34993- struct ixgbe_eeprom_operations ops;
15a11c5b 34994+ ixgbe_eeprom_operations_no_const ops;
66a7e928
MT
34995 enum ixgbe_eeprom_type type;
34996 u32 semaphore_delay;
34997 u16 word_size;
c6e2a6c8 34998@@ -2804,7 +2807,7 @@ struct ixgbe_eeprom_info {
66a7e928
MT
34999
35000 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
35001 struct ixgbe_mac_info {
35002- struct ixgbe_mac_operations ops;
15a11c5b 35003+ ixgbe_mac_operations_no_const ops;
66a7e928 35004 enum ixgbe_mac_type type;
5e856224
MT
35005 u8 addr[ETH_ALEN];
35006 u8 perm_addr[ETH_ALEN];
c6e2a6c8 35007@@ -2832,7 +2835,7 @@ struct ixgbe_mac_info {
66a7e928
MT
35008 };
35009
35010 struct ixgbe_phy_info {
35011- struct ixgbe_phy_operations ops;
15a11c5b 35012+ ixgbe_phy_operations_no_const ops;
66a7e928
MT
35013 struct mdio_if_info mdio;
35014 enum ixgbe_phy_type type;
35015 u32 id;
c6e2a6c8 35016@@ -2860,6 +2863,7 @@ struct ixgbe_mbx_operations {
15a11c5b
MT
35017 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
35018 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
66a7e928 35019 };
15a11c5b 35020+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
66a7e928 35021
15a11c5b
MT
35022 struct ixgbe_mbx_stats {
35023 u32 msgs_tx;
c6e2a6c8 35024@@ -2871,7 +2875,7 @@ struct ixgbe_mbx_stats {
15a11c5b 35025 };
66a7e928 35026
15a11c5b
MT
35027 struct ixgbe_mbx_info {
35028- struct ixgbe_mbx_operations ops;
35029+ ixgbe_mbx_operations_no_const ops;
35030 struct ixgbe_mbx_stats stats;
35031 u32 timeout;
35032 u32 usec_delay;
572b4308
MT
35033diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
35034index 307611a..d8e4562 100644
35035--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
35036+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
35037@@ -969,8 +969,6 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
35038 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
35039 for (i = 0; i < q_vector->txr_count; i++) {
35040 tx_ring = &(adapter->tx_ring[r_idx]);
35041- tx_ring->total_bytes = 0;
35042- tx_ring->total_packets = 0;
35043 ixgbevf_clean_tx_irq(adapter, tx_ring);
35044 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
35045 r_idx + 1);
35046@@ -994,16 +992,6 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
35047 struct ixgbe_hw *hw = &adapter->hw;
35048 struct ixgbevf_ring *rx_ring;
35049 int r_idx;
35050- int i;
35051-
35052- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
35053- for (i = 0; i < q_vector->rxr_count; i++) {
35054- rx_ring = &(adapter->rx_ring[r_idx]);
35055- rx_ring->total_bytes = 0;
35056- rx_ring->total_packets = 0;
35057- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
35058- r_idx + 1);
35059- }
35060
35061 if (!q_vector->rxr_count)
35062 return IRQ_HANDLED;
4c928ab7 35063diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
5e856224 35064index 25c951d..cc7cf33 100644
4c928ab7
MT
35065--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
35066+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
15a11c5b
MT
35067@@ -70,6 +70,7 @@ struct ixgbe_mac_operations {
35068 s32 (*clear_vfta)(struct ixgbe_hw *);
35069 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
66a7e928 35070 };
15a11c5b 35071+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
66a7e928 35072
15a11c5b
MT
35073 enum ixgbe_mac_type {
35074 ixgbe_mac_unknown = 0,
35075@@ -79,7 +80,7 @@ enum ixgbe_mac_type {
66a7e928
MT
35076 };
35077
15a11c5b
MT
35078 struct ixgbe_mac_info {
35079- struct ixgbe_mac_operations ops;
35080+ ixgbe_mac_operations_no_const ops;
35081 u8 addr[6];
35082 u8 perm_addr[6];
35083
35084@@ -103,6 +104,7 @@ struct ixgbe_mbx_operations {
35085 s32 (*check_for_ack)(struct ixgbe_hw *);
35086 s32 (*check_for_rst)(struct ixgbe_hw *);
66a7e928 35087 };
15a11c5b
MT
35088+typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
35089
35090 struct ixgbe_mbx_stats {
35091 u32 msgs_tx;
35092@@ -114,7 +116,7 @@ struct ixgbe_mbx_stats {
66a7e928
MT
35093 };
35094
15a11c5b
MT
35095 struct ixgbe_mbx_info {
35096- struct ixgbe_mbx_operations ops;
35097+ ixgbe_mbx_operations_no_const ops;
35098 struct ixgbe_mbx_stats stats;
35099 u32 timeout;
35100 u32 udelay;
4c928ab7 35101diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
c6e2a6c8 35102index 8bb05b4..074796f 100644
4c928ab7
MT
35103--- a/drivers/net/ethernet/mellanox/mlx4/main.c
35104+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
5e856224 35105@@ -41,6 +41,7 @@
66a7e928
MT
35106 #include <linux/slab.h>
35107 #include <linux/io-mapping.h>
5e856224 35108 #include <linux/delay.h>
66a7e928
MT
35109+#include <linux/sched.h>
35110
35111 #include <linux/mlx4/device.h>
35112 #include <linux/mlx4/doorbell.h>
4c928ab7
MT
35113diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35114index 5046a64..71ca936 100644
35115--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
35116+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
35117@@ -514,7 +514,7 @@ struct vxge_hw_uld_cbs {
35118 void (*link_down)(struct __vxge_hw_device *devh);
35119 void (*crit_err)(struct __vxge_hw_device *devh,
35120 enum vxge_hw_event type, u64 ext_data);
35121-};
35122+} __no_const;
15a11c5b 35123
4c928ab7
MT
35124 /*
35125 * struct __vxge_hw_blockpool_entry - Block private data structure
35126diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35127index 4a518a3..936b334 100644
35128--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35129+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.h
35130@@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
35131 struct vxge_hw_mempool_dma *dma_object,
35132 u32 index,
35133 u32 is_last);
35134-};
35135+} __no_const;
15a11c5b 35136
4c928ab7
MT
35137 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
35138 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
35139diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
572b4308 35140index 161e045..0bb5b86 100644
4c928ab7
MT
35141--- a/drivers/net/ethernet/realtek/r8169.c
35142+++ b/drivers/net/ethernet/realtek/r8169.c
c6e2a6c8 35143@@ -708,17 +708,17 @@ struct rtl8169_private {
4c928ab7
MT
35144 struct mdio_ops {
35145 void (*write)(void __iomem *, int, int);
35146 int (*read)(void __iomem *, int);
35147- } mdio_ops;
35148+ } __no_const mdio_ops;
15a11c5b 35149
4c928ab7
MT
35150 struct pll_power_ops {
35151 void (*down)(struct rtl8169_private *);
35152 void (*up)(struct rtl8169_private *);
35153- } pll_power_ops;
35154+ } __no_const pll_power_ops;
15a11c5b 35155
4c928ab7
MT
35156 struct jumbo_ops {
35157 void (*enable)(struct rtl8169_private *);
35158 void (*disable)(struct rtl8169_private *);
35159- } jumbo_ops;
35160+ } __no_const jumbo_ops;
15a11c5b 35161
4c928ab7
MT
35162 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
35163 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
35164diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
c6e2a6c8 35165index a9deda8..5507c31 100644
4c928ab7
MT
35166--- a/drivers/net/ethernet/sis/sis190.c
35167+++ b/drivers/net/ethernet/sis/sis190.c
c6e2a6c8 35168@@ -1620,7 +1620,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
4c928ab7
MT
35169 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
35170 struct net_device *dev)
15a11c5b 35171 {
4c928ab7
MT
35172- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
35173+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
35174 struct sis190_private *tp = netdev_priv(dev);
35175 struct pci_dev *isa_bridge;
35176 u8 reg, tmp8;
35177diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
5e856224 35178index c07cfe9..81cbf7e 100644
4c928ab7
MT
35179--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
35180+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
5e856224 35181@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
15a11c5b 35182
4c928ab7 35183 writel(value, ioaddr + MMC_CNTRL);
15a11c5b 35184
4c928ab7
MT
35185- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35186- MMC_CNTRL, value);
35187+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
35188+// MMC_CNTRL, value);
15a11c5b
MT
35189 }
35190
4c928ab7 35191 /* To mask all all interrupts.*/
c6e2a6c8 35192diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
572b4308 35193index 9bdfaba..3d8f8d4 100644
c6e2a6c8
MT
35194--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
35195+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
572b4308 35196@@ -1587,7 +1587,7 @@ static const struct file_operations stmmac_rings_status_fops = {
c6e2a6c8
MT
35197 .open = stmmac_sysfs_ring_open,
35198 .read = seq_read,
35199 .llseek = seq_lseek,
35200- .release = seq_release,
35201+ .release = single_release,
35202 };
35203
35204 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
572b4308 35205@@ -1659,7 +1659,7 @@ static const struct file_operations stmmac_dma_cap_fops = {
c6e2a6c8
MT
35206 .open = stmmac_sysfs_dma_cap_open,
35207 .read = seq_read,
35208 .llseek = seq_lseek,
35209- .release = seq_release,
35210+ .release = single_release,
35211 };
35212
35213 static int stmmac_init_fs(struct net_device *dev)
5e856224 35214diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
c6e2a6c8 35215index c358245..8c1de63 100644
5e856224
MT
35216--- a/drivers/net/hyperv/hyperv_net.h
35217+++ b/drivers/net/hyperv/hyperv_net.h
c6e2a6c8 35218@@ -98,7 +98,7 @@ struct rndis_device {
5e856224
MT
35219
35220 enum rndis_device_state state;
35221 bool link_state;
35222- atomic_t new_req_id;
35223+ atomic_unchecked_t new_req_id;
35224
35225 spinlock_t request_lock;
35226 struct list_head req_list;
35227diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
c6e2a6c8 35228index d6be64b..5d97e3b 100644
5e856224
MT
35229--- a/drivers/net/hyperv/rndis_filter.c
35230+++ b/drivers/net/hyperv/rndis_filter.c
c6e2a6c8 35231@@ -97,7 +97,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
5e856224
MT
35232 * template
35233 */
35234 set = &rndis_msg->msg.set_req;
35235- set->req_id = atomic_inc_return(&dev->new_req_id);
35236+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35237
35238 /* Add to the request list */
35239 spin_lock_irqsave(&dev->request_lock, flags);
c6e2a6c8 35240@@ -648,7 +648,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
5e856224
MT
35241
35242 /* Setup the rndis set */
35243 halt = &request->request_msg.msg.halt_req;
35244- halt->req_id = atomic_inc_return(&dev->new_req_id);
35245+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
35246
35247 /* Ignore return since this msg is optional. */
35248 rndis_filter_send_request(dev, request);
4c928ab7 35249diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
c6e2a6c8 35250index 21d7151..8034208 100644
4c928ab7
MT
35251--- a/drivers/net/ppp/ppp_generic.c
35252+++ b/drivers/net/ppp/ppp_generic.c
5e856224 35253@@ -986,7 +986,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
15a11c5b
MT
35254 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
35255 struct ppp_stats stats;
35256 struct ppp_comp_stats cstats;
35257- char *vers;
35258
35259 switch (cmd) {
35260 case SIOCGPPPSTATS:
5e856224 35261@@ -1008,8 +1007,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
15a11c5b 35262 break;
66a7e928 35263
15a11c5b
MT
35264 case SIOCGPPPVER:
35265- vers = PPP_VERSION;
35266- if (copy_to_user(addr, vers, strlen(vers) + 1))
35267+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
35268 break;
35269 err = 0;
35270 break;
fe2de317 35271diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
c6e2a6c8 35272index b715e6b..6d2490f 100644
fe2de317
MT
35273--- a/drivers/net/tokenring/abyss.c
35274+++ b/drivers/net/tokenring/abyss.c
c6e2a6c8 35275@@ -450,10 +450,12 @@ static struct pci_driver abyss_driver = {
15a11c5b
MT
35276
35277 static int __init abyss_init (void)
35278 {
35279- abyss_netdev_ops = tms380tr_netdev_ops;
35280+ pax_open_kernel();
35281+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35282
35283- abyss_netdev_ops.ndo_open = abyss_open;
35284- abyss_netdev_ops.ndo_stop = abyss_close;
35285+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35286+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35287+ pax_close_kernel();
35288
35289 return pci_register_driver(&abyss_driver);
35290 }
fe2de317 35291diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
c6e2a6c8 35292index 28adcdf..ae82f35 100644
fe2de317
MT
35293--- a/drivers/net/tokenring/madgemc.c
35294+++ b/drivers/net/tokenring/madgemc.c
c6e2a6c8 35295@@ -742,9 +742,11 @@ static struct mca_driver madgemc_driver = {
15a11c5b
MT
35296
35297 static int __init madgemc_init (void)
35298 {
35299- madgemc_netdev_ops = tms380tr_netdev_ops;
35300- madgemc_netdev_ops.ndo_open = madgemc_open;
35301- madgemc_netdev_ops.ndo_stop = madgemc_close;
35302+ pax_open_kernel();
35303+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35304+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35305+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35306+ pax_close_kernel();
35307
35308 return mca_register_driver (&madgemc_driver);
35309 }
fe2de317 35310diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
c6e2a6c8 35311index 62d90e4..9d84237 100644
fe2de317
MT
35312--- a/drivers/net/tokenring/proteon.c
35313+++ b/drivers/net/tokenring/proteon.c
c6e2a6c8 35314@@ -352,9 +352,11 @@ static int __init proteon_init(void)
15a11c5b
MT
35315 struct platform_device *pdev;
35316 int i, num = 0, err = 0;
35317
35318- proteon_netdev_ops = tms380tr_netdev_ops;
35319- proteon_netdev_ops.ndo_open = proteon_open;
35320- proteon_netdev_ops.ndo_stop = tms380tr_close;
35321+ pax_open_kernel();
35322+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35323+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35324+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35325+ pax_close_kernel();
35326
35327 err = platform_driver_register(&proteon_driver);
35328 if (err)
fe2de317 35329diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
c6e2a6c8 35330index ee11e93..c8f19c7 100644
fe2de317
MT
35331--- a/drivers/net/tokenring/skisa.c
35332+++ b/drivers/net/tokenring/skisa.c
c6e2a6c8 35333@@ -362,9 +362,11 @@ static int __init sk_isa_init(void)
15a11c5b
MT
35334 struct platform_device *pdev;
35335 int i, num = 0, err = 0;
35336
35337- sk_isa_netdev_ops = tms380tr_netdev_ops;
35338- sk_isa_netdev_ops.ndo_open = sk_isa_open;
35339- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35340+ pax_open_kernel();
35341+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35342+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35343+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35344+ pax_close_kernel();
35345
35346 err = platform_driver_register(&sk_isa_driver);
35347 if (err)
fe2de317 35348diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
c6e2a6c8 35349index 2d2a688..35f2372 100644
fe2de317
MT
35350--- a/drivers/net/usb/hso.c
35351+++ b/drivers/net/usb/hso.c
c52201e0
MT
35352@@ -71,7 +71,7 @@
35353 #include <asm/byteorder.h>
35354 #include <linux/serial_core.h>
35355 #include <linux/serial.h>
35356-
35357+#include <asm/local.h>
35358
35359 #define MOD_AUTHOR "Option Wireless"
35360 #define MOD_DESCRIPTION "USB High Speed Option driver"
6892158b 35361@@ -257,7 +257,7 @@ struct hso_serial {
58c5fc13
MT
35362
35363 /* from usb_serial_port */
35364 struct tty_struct *tty;
35365- int open_count;
c52201e0 35366+ local_t open_count;
58c5fc13
MT
35367 spinlock_t serial_lock;
35368
35369 int (*write_data) (struct hso_serial *serial);
fe2de317 35370@@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
58c5fc13
MT
35371 struct urb *urb;
35372
35373 urb = serial->rx_urb[0];
35374- if (serial->open_count > 0) {
c52201e0 35375+ if (local_read(&serial->open_count) > 0) {
58c5fc13
MT
35376 count = put_rxbuf_data(urb, serial);
35377 if (count == -1)
35378 return;
fe2de317 35379@@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
58c5fc13
MT
35380 DUMP1(urb->transfer_buffer, urb->actual_length);
35381
35382 /* Anyone listening? */
35383- if (serial->open_count == 0)
c52201e0 35384+ if (local_read(&serial->open_count) == 0)
58c5fc13
MT
35385 return;
35386
35387 if (status == 0) {
fe2de317 35388@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
35389 spin_unlock_irq(&serial->serial_lock);
35390
35391 /* check for port already opened, if not set the termios */
35392- serial->open_count++;
35393- if (serial->open_count == 1) {
c52201e0 35394+ if (local_inc_return(&serial->open_count) == 1) {
58c5fc13
MT
35395 serial->rx_state = RX_IDLE;
35396 /* Force default termio settings */
57199397 35397 _hso_serial_set_termios(tty, NULL);
fe2de317 35398@@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
35399 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35400 if (result) {
35401 hso_stop_serial_device(serial->parent);
35402- serial->open_count--;
c52201e0 35403+ local_dec(&serial->open_count);
58c5fc13
MT
35404 kref_put(&serial->parent->ref, hso_serial_ref_free);
35405 }
35406 } else {
fe2de317 35407@@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
35408
35409 /* reset the rts and dtr */
35410 /* do the actual close */
35411- serial->open_count--;
c52201e0 35412+ local_dec(&serial->open_count);
ae4e228f 35413
58c5fc13
MT
35414- if (serial->open_count <= 0) {
35415- serial->open_count = 0;
c52201e0
MT
35416+ if (local_read(&serial->open_count) <= 0) {
35417+ local_set(&serial->open_count, 0);
58c5fc13
MT
35418 spin_lock_irq(&serial->serial_lock);
35419 if (serial->tty == tty) {
35420 serial->tty->driver_data = NULL;
fe2de317 35421@@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
58c5fc13
MT
35422
35423 /* the actual setup */
35424 spin_lock_irqsave(&serial->serial_lock, flags);
35425- if (serial->open_count)
c52201e0 35426+ if (local_read(&serial->open_count))
58c5fc13
MT
35427 _hso_serial_set_termios(tty, old);
35428 else
35429 tty->termios = old;
fe2de317 35430@@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *urb)
ae4e228f
MT
35431 D1("Pending read interrupt on port %d\n", i);
35432 spin_lock(&serial->serial_lock);
35433 if (serial->rx_state == RX_IDLE &&
35434- serial->open_count > 0) {
c52201e0 35435+ local_read(&serial->open_count) > 0) {
ae4e228f
MT
35436 /* Setup and send a ctrl req read on
35437 * port i */
35438 if (!serial->rx_urb_filled[0]) {
fe2de317 35439@@ -3098,7 +3097,7 @@ static int hso_resume(struct usb_interface *iface)
58c5fc13
MT
35440 /* Start all serial ports */
35441 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35442 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35443- if (dev2ser(serial_table[i])->open_count) {
c52201e0 35444+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
58c5fc13
MT
35445 result =
35446 hso_start_serial_device(serial_table[i], GFP_NOIO);
35447 hso_kick_transmit(dev2ser(serial_table[i]));
fe2de317 35448diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
572b4308 35449index 420d69b..74f90a2 100644
fe2de317
MT
35450--- a/drivers/net/wireless/ath/ath.h
35451+++ b/drivers/net/wireless/ath/ath.h
4c928ab7 35452@@ -119,6 +119,7 @@ struct ath_ops {
fe2de317
MT
35453 void (*write_flush) (void *);
35454 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
35455 };
35456+typedef struct ath_ops __no_const ath_ops_no_const;
35457
35458 struct ath_common;
35459 struct ath_bus_ops;
4c928ab7 35460diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
c6e2a6c8 35461index aa2abaf..5f5152d 100644
4c928ab7
MT
35462--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35463+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
35464@@ -183,8 +183,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35465 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
35466 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
35467
35468- ACCESS_ONCE(ads->ds_link) = i->link;
35469- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
35470+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
35471+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
35472
35473 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
35474 ctl6 = SM(i->keytype, AR_EncrType);
35475@@ -198,26 +198,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35476
35477 if ((i->is_first || i->is_last) &&
35478 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
35479- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
35480+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
35481 | set11nTries(i->rates, 1)
35482 | set11nTries(i->rates, 2)
35483 | set11nTries(i->rates, 3)
35484 | (i->dur_update ? AR_DurUpdateEna : 0)
35485 | SM(0, AR_BurstDur);
35486
35487- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
35488+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
35489 | set11nRate(i->rates, 1)
35490 | set11nRate(i->rates, 2)
35491 | set11nRate(i->rates, 3);
35492 } else {
35493- ACCESS_ONCE(ads->ds_ctl2) = 0;
35494- ACCESS_ONCE(ads->ds_ctl3) = 0;
35495+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
35496+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
35497 }
35498
35499 if (!i->is_first) {
35500- ACCESS_ONCE(ads->ds_ctl0) = 0;
35501- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35502- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35503+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
35504+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35505+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
35506 return;
35507 }
66a7e928 35508
4c928ab7
MT
35509@@ -242,7 +242,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35510 break;
35511 }
66a7e928 35512
4c928ab7
MT
35513- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35514+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
35515 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35516 | SM(i->txpower, AR_XmitPower)
35517 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
35518@@ -252,19 +252,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35519 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
35520 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
66a7e928 35521
4c928ab7
MT
35522- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
35523- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
35524+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
35525+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
15a11c5b 35526
4c928ab7
MT
35527 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
35528 return;
15a11c5b 35529
4c928ab7
MT
35530- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35531+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
35532 | set11nPktDurRTSCTS(i->rates, 1);
35533
35534- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35535+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
35536 | set11nPktDurRTSCTS(i->rates, 3);
35537
35538- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35539+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
35540 | set11nRateFlags(i->rates, 1)
35541 | set11nRateFlags(i->rates, 2)
35542 | set11nRateFlags(i->rates, 3)
35543diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
c6e2a6c8 35544index a66a13b..0ef399e 100644
4c928ab7
MT
35545--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
35546+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
c6e2a6c8
MT
35547@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
35548 (i->qcu << AR_TxQcuNum_S) | desc_len;
4c928ab7
MT
35549
35550 checksum += val;
35551- ACCESS_ONCE(ads->info) = val;
35552+ ACCESS_ONCE_RW(ads->info) = val;
35553
35554 checksum += i->link;
35555- ACCESS_ONCE(ads->link) = i->link;
35556+ ACCESS_ONCE_RW(ads->link) = i->link;
35557
35558 checksum += i->buf_addr[0];
35559- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
35560+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
35561 checksum += i->buf_addr[1];
35562- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
35563+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
35564 checksum += i->buf_addr[2];
35565- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
35566+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
35567 checksum += i->buf_addr[3];
35568- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
35569+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
35570
35571 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
35572- ACCESS_ONCE(ads->ctl3) = val;
35573+ ACCESS_ONCE_RW(ads->ctl3) = val;
35574 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
35575- ACCESS_ONCE(ads->ctl5) = val;
35576+ ACCESS_ONCE_RW(ads->ctl5) = val;
35577 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
35578- ACCESS_ONCE(ads->ctl7) = val;
35579+ ACCESS_ONCE_RW(ads->ctl7) = val;
35580 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
35581- ACCESS_ONCE(ads->ctl9) = val;
35582+ ACCESS_ONCE_RW(ads->ctl9) = val;
35583
35584 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
35585- ACCESS_ONCE(ads->ctl10) = checksum;
35586+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
35587
35588 if (i->is_first || i->is_last) {
35589- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
35590+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
35591 | set11nTries(i->rates, 1)
35592 | set11nTries(i->rates, 2)
35593 | set11nTries(i->rates, 3)
35594 | (i->dur_update ? AR_DurUpdateEna : 0)
35595 | SM(0, AR_BurstDur);
35596
35597- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
35598+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
35599 | set11nRate(i->rates, 1)
35600 | set11nRate(i->rates, 2)
35601 | set11nRate(i->rates, 3);
35602 } else {
35603- ACCESS_ONCE(ads->ctl13) = 0;
35604- ACCESS_ONCE(ads->ctl14) = 0;
35605+ ACCESS_ONCE_RW(ads->ctl13) = 0;
35606+ ACCESS_ONCE_RW(ads->ctl14) = 0;
35607 }
35608
35609 ads->ctl20 = 0;
c6e2a6c8 35610@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
4c928ab7
MT
35611
35612 ctl17 = SM(i->keytype, AR_EncrType);
35613 if (!i->is_first) {
35614- ACCESS_ONCE(ads->ctl11) = 0;
35615- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35616- ACCESS_ONCE(ads->ctl15) = 0;
35617- ACCESS_ONCE(ads->ctl16) = 0;
35618- ACCESS_ONCE(ads->ctl17) = ctl17;
35619- ACCESS_ONCE(ads->ctl18) = 0;
35620- ACCESS_ONCE(ads->ctl19) = 0;
35621+ ACCESS_ONCE_RW(ads->ctl11) = 0;
35622+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
35623+ ACCESS_ONCE_RW(ads->ctl15) = 0;
35624+ ACCESS_ONCE_RW(ads->ctl16) = 0;
35625+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
35626+ ACCESS_ONCE_RW(ads->ctl18) = 0;
35627+ ACCESS_ONCE_RW(ads->ctl19) = 0;
35628 return;
35629 }
66a7e928 35630
4c928ab7
MT
35631- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35632+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
35633 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
35634 | SM(i->txpower, AR_XmitPower)
35635 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
c6e2a6c8 35636@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
4c928ab7
MT
35637 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
35638 ctl12 |= SM(val, AR_PAPRDChainMask);
66a7e928 35639
4c928ab7
MT
35640- ACCESS_ONCE(ads->ctl12) = ctl12;
35641- ACCESS_ONCE(ads->ctl17) = ctl17;
35642+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
35643+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
15a11c5b 35644
4c928ab7
MT
35645- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35646+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
35647 | set11nPktDurRTSCTS(i->rates, 1);
15a11c5b 35648
4c928ab7
MT
35649- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35650+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
35651 | set11nPktDurRTSCTS(i->rates, 3);
66a7e928 35652
4c928ab7
MT
35653- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
35654+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
35655 | set11nRateFlags(i->rates, 1)
35656 | set11nRateFlags(i->rates, 2)
35657 | set11nRateFlags(i->rates, 3)
35658 | SM(i->rtscts_rate, AR_RTSCTSRate);
66a7e928 35659
4c928ab7
MT
35660- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
35661+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
35662 }
15a11c5b 35663
4c928ab7 35664 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
fe2de317 35665diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
c6e2a6c8 35666index e88f182..4e57f5d 100644
fe2de317
MT
35667--- a/drivers/net/wireless/ath/ath9k/hw.h
35668+++ b/drivers/net/wireless/ath/ath9k/hw.h
c6e2a6c8 35669@@ -614,7 +614,7 @@ struct ath_hw_private_ops {
15a11c5b
MT
35670
35671 /* ANI */
35672 void (*ani_cache_ini_regs)(struct ath_hw *ah);
35673-};
35674+} __no_const;
35675
35676 /**
35677 * struct ath_hw_ops - callbacks used by hardware code and driver code
c6e2a6c8 35678@@ -644,7 +644,7 @@ struct ath_hw_ops {
15a11c5b
MT
35679 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
35680 struct ath_hw_antcomb_conf *antconf);
35681
35682-};
35683+} __no_const;
35684
35685 struct ath_nf_limits {
35686 s16 max;
c6e2a6c8 35687@@ -664,7 +664,7 @@ enum ath_cal_list {
4c928ab7 35688 #define AH_FASTCC 0x4
15a11c5b
MT
35689
35690 struct ath_hw {
35691- struct ath_ops reg_ops;
35692+ ath_ops_no_const reg_ops;
35693
35694 struct ieee80211_hw *hw;
35695 struct ath_common common;
4c928ab7 35696diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
5e856224 35697index af00e2c..ab04d34 100644
4c928ab7
MT
35698--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
35699+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
5e856224 35700@@ -545,7 +545,7 @@ struct phy_func_ptr {
4c928ab7
MT
35701 void (*carrsuppr)(struct brcms_phy *);
35702 s32 (*rxsigpwr)(struct brcms_phy *, s32);
35703 void (*detach)(struct brcms_phy *);
35704-};
35705+} __no_const;
66a7e928 35706
4c928ab7
MT
35707 struct brcms_phy {
35708 struct brcms_phy_pub pubpi_ro;
5e856224 35709diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
c6e2a6c8 35710index faec404..a5277f1 100644
5e856224
MT
35711--- a/drivers/net/wireless/iwlegacy/3945-mac.c
35712+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
c6e2a6c8 35713@@ -3611,7 +3611,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
15a11c5b 35714 */
5e856224
MT
35715 if (il3945_mod_params.disable_hw_scan) {
35716 D_INFO("Disabling hw_scan\n");
c6e2a6c8 35717- il3945_mac_ops.hw_scan = NULL;
15a11c5b 35718+ pax_open_kernel();
c6e2a6c8 35719+ *(void **)&il3945_mac_ops.hw_scan = NULL;
15a11c5b 35720+ pax_close_kernel();
66a7e928 35721 }
66a7e928 35722
5e856224 35723 D_INFO("*** LOAD DRIVER ***\n");
fe2de317 35724diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
c6e2a6c8 35725index b7ce6a6..5649756 100644
fe2de317
MT
35726--- a/drivers/net/wireless/mac80211_hwsim.c
35727+++ b/drivers/net/wireless/mac80211_hwsim.c
c6e2a6c8 35728@@ -1721,9 +1721,11 @@ static int __init init_mac80211_hwsim(void)
15a11c5b 35729 return -EINVAL;
66a7e928 35730
15a11c5b
MT
35731 if (fake_hw_scan) {
35732- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35733- mac80211_hwsim_ops.sw_scan_start = NULL;
35734- mac80211_hwsim_ops.sw_scan_complete = NULL;
35735+ pax_open_kernel();
35736+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
35737+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
35738+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
35739+ pax_close_kernel();
35740 }
ae4e228f 35741
15a11c5b 35742 spin_lock_init(&hwsim_radio_lock);
fe2de317 35743diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
c6e2a6c8 35744index 35225e9..95e6bf9 100644
fe2de317
MT
35745--- a/drivers/net/wireless/mwifiex/main.h
35746+++ b/drivers/net/wireless/mwifiex/main.h
c6e2a6c8 35747@@ -537,7 +537,7 @@ struct mwifiex_if_ops {
6e9df6a3 35748 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
4c928ab7
MT
35749 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
35750 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
6e9df6a3
MT
35751-};
35752+} __no_const;
35753
35754 struct mwifiex_adapter {
4c928ab7 35755 u8 iface_type;
fe2de317 35756diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
c6e2a6c8 35757index d66e298..55b0a89 100644
fe2de317
MT
35758--- a/drivers/net/wireless/rndis_wlan.c
35759+++ b/drivers/net/wireless/rndis_wlan.c
5e856224 35760@@ -1278,7 +1278,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
df50ba0c
MT
35761
35762 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
35763
35764- if (rts_threshold < 0 || rts_threshold > 2347)
35765+ if (rts_threshold > 2347)
35766 rts_threshold = 2347;
35767
35768 tmp = cpu_to_le32(rts_threshold);
c1e3898a
MT
35769diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
35770index c264dfa..08ee30e 100644
35771--- a/drivers/net/wireless/rt2x00/rt2x00.h
35772+++ b/drivers/net/wireless/rt2x00/rt2x00.h
35773@@ -396,7 +396,7 @@ struct rt2x00_intf {
35774 * for hardware which doesn't support hardware
35775 * sequence counting.
35776 */
35777- atomic_t seqno;
35778+ atomic_unchecked_t seqno;
35779 };
35780
35781 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
35782diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
35783index 50f92d5..f3afc41 100644
35784--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
35785+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
35786@@ -229,9 +229,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
35787 * sequence counter given by mac80211.
35788 */
35789 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
35790- seqno = atomic_add_return(0x10, &intf->seqno);
35791+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
35792 else
35793- seqno = atomic_read(&intf->seqno);
35794+ seqno = atomic_read_unchecked(&intf->seqno);
35795
35796 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
35797 hdr->seq_ctrl |= cpu_to_le16(seqno);
fe2de317 35798diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
c6e2a6c8 35799index 9d8f581..0f6589e 100644
fe2de317
MT
35800--- a/drivers/net/wireless/wl1251/wl1251.h
35801+++ b/drivers/net/wireless/wl1251/wl1251.h
15a11c5b
MT
35802@@ -266,7 +266,7 @@ struct wl1251_if_operations {
35803 void (*reset)(struct wl1251 *wl);
35804 void (*enable_irq)(struct wl1251 *wl);
35805 void (*disable_irq)(struct wl1251 *wl);
35806-};
35807+} __no_const;
35808
35809 struct wl1251 {
35810 struct ieee80211_hw *hw;
fe2de317
MT
35811diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
35812index f34b5b2..b5abb9f 100644
35813--- a/drivers/oprofile/buffer_sync.c
35814+++ b/drivers/oprofile/buffer_sync.c
35815@@ -343,7 +343,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
58c5fc13
MT
35816 if (cookie == NO_COOKIE)
35817 offset = pc;
35818 if (cookie == INVALID_COOKIE) {
35819- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35820+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35821 offset = pc;
35822 }
35823 if (cookie != last_cookie) {
fe2de317 35824@@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
58c5fc13
MT
35825 /* add userspace sample */
35826
35827 if (!mm) {
35828- atomic_inc(&oprofile_stats.sample_lost_no_mm);
35829+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
35830 return 0;
35831 }
35832
35833 cookie = lookup_dcookie(mm, s->eip, &offset);
35834
35835 if (cookie == INVALID_COOKIE) {
35836- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
35837+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
35838 return 0;
35839 }
35840
15a11c5b 35841@@ -563,7 +563,7 @@ void sync_buffer(int cpu)
58c5fc13
MT
35842 /* ignore backtraces if failed to add a sample */
35843 if (state == sb_bt_start) {
35844 state = sb_bt_ignore;
35845- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
35846+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
35847 }
35848 }
35849 release_mm(mm);
fe2de317 35850diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
4c928ab7 35851index c0cc4e7..44d4e54 100644
fe2de317
MT
35852--- a/drivers/oprofile/event_buffer.c
35853+++ b/drivers/oprofile/event_buffer.c
35854@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
ae4e228f
MT
35855 }
35856
58c5fc13
MT
35857 if (buffer_pos == buffer_size) {
35858- atomic_inc(&oprofile_stats.event_lost_overflow);
35859+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
35860 return;
35861 }
35862
fe2de317 35863diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
5e856224 35864index ed2c3ec..deda85a 100644
fe2de317
MT
35865--- a/drivers/oprofile/oprof.c
35866+++ b/drivers/oprofile/oprof.c
35867@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
ae4e228f
MT
35868 if (oprofile_ops.switch_events())
35869 return;
58c5fc13 35870
ae4e228f
MT
35871- atomic_inc(&oprofile_stats.multiplex_counter);
35872+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
35873 start_switch_worker();
35874 }
58c5fc13 35875
fe2de317
MT
35876diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
35877index 917d28e..d62d981 100644
35878--- a/drivers/oprofile/oprofile_stats.c
35879+++ b/drivers/oprofile/oprofile_stats.c
ae4e228f 35880@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
58c5fc13
MT
35881 cpu_buf->sample_invalid_eip = 0;
35882 }
35883
35884- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
35885- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
35886- atomic_set(&oprofile_stats.event_lost_overflow, 0);
35887- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
ae4e228f 35888- atomic_set(&oprofile_stats.multiplex_counter, 0);
58c5fc13
MT
35889+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
35890+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
35891+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
35892+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
ae4e228f 35893+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
58c5fc13
MT
35894 }
35895
35896
fe2de317
MT
35897diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
35898index 38b6fc0..b5cbfce 100644
35899--- a/drivers/oprofile/oprofile_stats.h
35900+++ b/drivers/oprofile/oprofile_stats.h
ae4e228f 35901@@ -13,11 +13,11 @@
6e9df6a3 35902 #include <linux/atomic.h>
58c5fc13
MT
35903
35904 struct oprofile_stat_struct {
35905- atomic_t sample_lost_no_mm;
35906- atomic_t sample_lost_no_mapping;
35907- atomic_t bt_lost_no_mapping;
35908- atomic_t event_lost_overflow;
ae4e228f 35909- atomic_t multiplex_counter;
58c5fc13
MT
35910+ atomic_unchecked_t sample_lost_no_mm;
35911+ atomic_unchecked_t sample_lost_no_mapping;
35912+ atomic_unchecked_t bt_lost_no_mapping;
35913+ atomic_unchecked_t event_lost_overflow;
ae4e228f 35914+ atomic_unchecked_t multiplex_counter;
58c5fc13
MT
35915 };
35916
35917 extern struct oprofile_stat_struct oprofile_stats;
fe2de317 35918diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
c6e2a6c8 35919index 849357c..b83c1e0 100644
fe2de317
MT
35920--- a/drivers/oprofile/oprofilefs.c
35921+++ b/drivers/oprofile/oprofilefs.c
c6e2a6c8 35922@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
fe2de317
MT
35923
35924
35925 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
35926- char const *name, atomic_t *val)
35927+ char const *name, atomic_unchecked_t *val)
35928 {
35929 return __oprofilefs_create_file(sb, root, name,
35930 &atomic_ro_fops, 0444, val);
35931diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
35932index 3f56bc0..707d642 100644
35933--- a/drivers/parport/procfs.c
35934+++ b/drivers/parport/procfs.c
35935@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
ae4e228f
MT
35936
35937 *ppos += len;
35938
35939- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
bc901d79 35940+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
ae4e228f
MT
35941 }
35942
35943 #ifdef CONFIG_PARPORT_1284
fe2de317 35944@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
ae4e228f
MT
35945
35946 *ppos += len;
35947
35948- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
bc901d79 35949+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
ae4e228f
MT
35950 }
35951 #endif /* IEEE1284.3 support. */
35952
fe2de317
MT
35953diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
35954index 9fff878..ad0ad53 100644
35955--- a/drivers/pci/hotplug/cpci_hotplug.h
35956+++ b/drivers/pci/hotplug/cpci_hotplug.h
15a11c5b
MT
35957@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
35958 int (*hardware_test) (struct slot* slot, u32 value);
35959 u8 (*get_power) (struct slot* slot);
35960 int (*set_power) (struct slot* slot, int value);
35961-};
35962+} __no_const;
35963
35964 struct cpci_hp_controller {
35965 unsigned int irq;
fe2de317
MT
35966diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
35967index 76ba8a1..20ca857 100644
35968--- a/drivers/pci/hotplug/cpqphp_nvram.c
35969+++ b/drivers/pci/hotplug/cpqphp_nvram.c
35970@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
58c5fc13
MT
35971
35972 void compaq_nvram_init (void __iomem *rom_start)
35973 {
35974+
35975+#ifndef CONFIG_PAX_KERNEXEC
35976 if (rom_start) {
35977 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
35978 }
35979+#endif
35980+
35981 dbg("int15 entry = %p\n", compaq_int15_entry_point);
35982
35983 /* initialize our int15 lock */
fe2de317 35984diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
c6e2a6c8 35985index b500840..d7159d3 100644
fe2de317
MT
35986--- a/drivers/pci/pcie/aspm.c
35987+++ b/drivers/pci/pcie/aspm.c
16454cff
MT
35988@@ -27,9 +27,9 @@
35989 #define MODULE_PARAM_PREFIX "pcie_aspm."
35990
35991 /* Note: those are not register definitions */
35992-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
35993-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
35994-#define ASPM_STATE_L1 (4) /* L1 state */
35995+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
35996+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
35997+#define ASPM_STATE_L1 (4U) /* L1 state */
35998 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
35999 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
36000
fe2de317 36001diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
c6e2a6c8 36002index 5e1ca3c..08082fe 100644
fe2de317
MT
36003--- a/drivers/pci/probe.c
36004+++ b/drivers/pci/probe.c
c6e2a6c8 36005@@ -215,7 +215,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
bc901d79 36006 u16 orig_cmd;
c6e2a6c8 36007 struct pci_bus_region region;
bc901d79
MT
36008
36009- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
36010+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
36011
36012 if (!dev->mmio_always_on) {
36013 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
fe2de317
MT
36014diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
36015index 27911b5..5b6db88 100644
36016--- a/drivers/pci/proc.c
36017+++ b/drivers/pci/proc.c
36018@@ -476,7 +476,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
58c5fc13
MT
36019 static int __init pci_proc_init(void)
36020 {
36021 struct pci_dev *dev = NULL;
36022+
36023+#ifdef CONFIG_GRKERNSEC_PROC_ADD
36024+#ifdef CONFIG_GRKERNSEC_PROC_USER
36025+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36026+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36027+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36028+#endif
36029+#else
36030 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36031+#endif
36032 proc_create("devices", 0, proc_bus_pci_dir,
36033 &proc_bus_pci_dev_operations);
36034 proc_initialized = 1;
fe2de317 36035diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
c6e2a6c8 36036index d68c000..f6094ca 100644
fe2de317
MT
36037--- a/drivers/platform/x86/thinkpad_acpi.c
36038+++ b/drivers/platform/x86/thinkpad_acpi.c
15a11c5b 36039@@ -2094,7 +2094,7 @@ static int hotkey_mask_get(void)
66a7e928
MT
36040 return 0;
36041 }
36042
15a11c5b
MT
36043-void static hotkey_mask_warn_incomplete_mask(void)
36044+static void hotkey_mask_warn_incomplete_mask(void)
66a7e928 36045 {
15a11c5b
MT
36046 /* log only what the user can fix... */
36047 const u32 wantedmask = hotkey_driver_mask &
fe2de317
MT
36048@@ -2325,11 +2325,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
36049 }
36050 }
36051
36052-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36053- struct tp_nvram_state *newn,
36054- const u32 event_mask)
36055-{
36056-
36057 #define TPACPI_COMPARE_KEY(__scancode, __member) \
36058 do { \
36059 if ((event_mask & (1 << __scancode)) && \
36060@@ -2343,36 +2338,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36061 tpacpi_hotkey_send_key(__scancode); \
36062 } while (0)
36063
36064- void issue_volchange(const unsigned int oldvol,
36065- const unsigned int newvol)
36066- {
36067- unsigned int i = oldvol;
36068+static void issue_volchange(const unsigned int oldvol,
36069+ const unsigned int newvol,
36070+ const u32 event_mask)
36071+{
36072+ unsigned int i = oldvol;
36073
36074- while (i > newvol) {
36075- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36076- i--;
36077- }
36078- while (i < newvol) {
36079- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36080- i++;
36081- }
36082+ while (i > newvol) {
36083+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
36084+ i--;
36085 }
36086+ while (i < newvol) {
36087+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36088+ i++;
36089+ }
36090+}
36091
36092- void issue_brightnesschange(const unsigned int oldbrt,
36093- const unsigned int newbrt)
36094- {
36095- unsigned int i = oldbrt;
36096+static void issue_brightnesschange(const unsigned int oldbrt,
36097+ const unsigned int newbrt,
36098+ const u32 event_mask)
36099+{
36100+ unsigned int i = oldbrt;
36101
36102- while (i > newbrt) {
36103- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36104- i--;
36105- }
36106- while (i < newbrt) {
36107- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36108- i++;
36109- }
36110+ while (i > newbrt) {
36111+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
36112+ i--;
36113+ }
36114+ while (i < newbrt) {
36115+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36116+ i++;
36117 }
36118+}
36119
36120+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36121+ struct tp_nvram_state *newn,
36122+ const u32 event_mask)
36123+{
36124 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
36125 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
36126 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
36127@@ -2406,7 +2407,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36128 oldn->volume_level != newn->volume_level) {
36129 /* recently muted, or repeated mute keypress, or
36130 * multiple presses ending in mute */
36131- issue_volchange(oldn->volume_level, newn->volume_level);
36132+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36133 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
36134 }
36135 } else {
36136@@ -2416,7 +2417,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36137 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
36138 }
36139 if (oldn->volume_level != newn->volume_level) {
36140- issue_volchange(oldn->volume_level, newn->volume_level);
36141+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
36142 } else if (oldn->volume_toggle != newn->volume_toggle) {
36143 /* repeated vol up/down keypress at end of scale ? */
36144 if (newn->volume_level == 0)
36145@@ -2429,7 +2430,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36146 /* handle brightness */
36147 if (oldn->brightness_level != newn->brightness_level) {
36148 issue_brightnesschange(oldn->brightness_level,
36149- newn->brightness_level);
36150+ newn->brightness_level,
36151+ event_mask);
36152 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
36153 /* repeated key presses that didn't change state */
36154 if (newn->brightness_level == 0)
36155@@ -2438,10 +2440,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
36156 && !tp_features.bright_unkfw)
36157 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
36158 }
36159+}
36160
36161 #undef TPACPI_COMPARE_KEY
36162 #undef TPACPI_MAY_SEND_KEY
36163-}
36164
36165 /*
36166 * Polling driver
36167diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
c6e2a6c8 36168index 769d265..a3a05ca 100644
fe2de317
MT
36169--- a/drivers/pnp/pnpbios/bioscalls.c
36170+++ b/drivers/pnp/pnpbios/bioscalls.c
c6e2a6c8 36171@@ -58,7 +58,7 @@ do { \
ae4e228f 36172 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
58c5fc13
MT
36173 } while(0)
36174
ae4e228f
MT
36175-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36176+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36177 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
58c5fc13
MT
36178
36179 /*
c6e2a6c8 36180@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
58c5fc13
MT
36181
36182 cpu = get_cpu();
36183 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36184+
ae4e228f 36185+ pax_open_kernel();
58c5fc13 36186 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
ae4e228f 36187+ pax_close_kernel();
58c5fc13 36188
58c5fc13
MT
36189 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36190 spin_lock_irqsave(&pnp_bios_lock, flags);
c6e2a6c8 36191@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
58c5fc13
MT
36192 :"memory");
36193 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36194
ae4e228f 36195+ pax_open_kernel();
58c5fc13 36196 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
ae4e228f 36197+ pax_close_kernel();
58c5fc13
MT
36198+
36199 put_cpu();
36200
36201 /* If we get here and this is set then the PnP BIOS faulted on us. */
c6e2a6c8 36202@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
58c5fc13
MT
36203 return status;
36204 }
36205
36206-void pnpbios_calls_init(union pnp_bios_install_struct *header)
36207+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36208 {
36209 int i;
36210
c6e2a6c8 36211@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
58c5fc13
MT
36212 pnp_bios_callpoint.offset = header->fields.pm16offset;
36213 pnp_bios_callpoint.segment = PNP_CS16;
36214
ae4e228f 36215+ pax_open_kernel();
58c5fc13 36216+
ae4e228f
MT
36217 for_each_possible_cpu(i) {
36218 struct desc_struct *gdt = get_cpu_gdt_table(i);
36219 if (!gdt)
c6e2a6c8 36220@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
ae4e228f
MT
36221 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36222 (unsigned long)__va(header->fields.pm16dseg));
58c5fc13
MT
36223 }
36224+
ae4e228f 36225+ pax_close_kernel();
58c5fc13 36226 }
fe2de317
MT
36227diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
36228index b0ecacb..7c9da2e 100644
36229--- a/drivers/pnp/resource.c
36230+++ b/drivers/pnp/resource.c
36231@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
58c5fc13
MT
36232 return 1;
36233
36234 /* check if the resource is valid */
36235- if (*irq < 0 || *irq > 15)
36236+ if (*irq > 15)
36237 return 0;
36238
36239 /* check if the resource is reserved */
fe2de317 36240@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
58c5fc13
MT
36241 return 1;
36242
36243 /* check if the resource is valid */
36244- if (*dma < 0 || *dma == 4 || *dma > 7)
36245+ if (*dma == 4 || *dma > 7)
36246 return 0;
36247
36248 /* check if the resource is reserved */
fe2de317 36249diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
c6e2a6c8 36250index 222ccd8..6275fa5 100644
fe2de317
MT
36251--- a/drivers/power/bq27x00_battery.c
36252+++ b/drivers/power/bq27x00_battery.c
5e856224 36253@@ -72,7 +72,7 @@
15a11c5b
MT
36254 struct bq27x00_device_info;
36255 struct bq27x00_access_methods {
36256 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
36257-};
36258+} __no_const;
36259
36260 enum bq27x00_chip { BQ27000, BQ27500 };
36261
fe2de317 36262diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
c6e2a6c8 36263index 4c5b053..104263e 100644
fe2de317
MT
36264--- a/drivers/regulator/max8660.c
36265+++ b/drivers/regulator/max8660.c
c6e2a6c8 36266@@ -385,8 +385,10 @@ static int __devinit max8660_probe(struct i2c_client *client,
15a11c5b
MT
36267 max8660->shadow_regs[MAX8660_OVER1] = 5;
36268 } else {
36269 /* Otherwise devices can be toggled via software */
36270- max8660_dcdc_ops.enable = max8660_dcdc_enable;
36271- max8660_dcdc_ops.disable = max8660_dcdc_disable;
36272+ pax_open_kernel();
36273+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
36274+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
36275+ pax_close_kernel();
36276 }
66a7e928 36277
15a11c5b 36278 /*
fe2de317 36279diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
c6e2a6c8 36280index 845aa22..99ec402 100644
fe2de317
MT
36281--- a/drivers/regulator/mc13892-regulator.c
36282+++ b/drivers/regulator/mc13892-regulator.c
5e856224 36283@@ -574,10 +574,12 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
15a11c5b
MT
36284 }
36285 mc13xxx_unlock(mc13892);
66a7e928 36286
15a11c5b
MT
36287- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36288+ pax_open_kernel();
36289+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
36290 = mc13892_vcam_set_mode;
36291- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36292+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
36293 = mc13892_vcam_get_mode;
36294+ pax_close_kernel();
5e856224
MT
36295
36296 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
36297 ARRAY_SIZE(mc13892_regulators));
fe2de317
MT
36298diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
36299index cace6d3..f623fda 100644
36300--- a/drivers/rtc/rtc-dev.c
36301+++ b/drivers/rtc/rtc-dev.c
bc901d79
MT
36302@@ -14,6 +14,7 @@
36303 #include <linux/module.h>
36304 #include <linux/rtc.h>
36305 #include <linux/sched.h>
36306+#include <linux/grsecurity.h>
36307 #include "rtc-core.h"
36308
36309 static dev_t rtc_devt;
fe2de317 36310@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
bc901d79
MT
36311 if (copy_from_user(&tm, uarg, sizeof(tm)))
36312 return -EFAULT;
36313
36314+ gr_log_timechange();
36315+
36316 return rtc_set_time(rtc, &tm);
36317
36318 case RTC_PIE_ON:
fe2de317 36319diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
c6e2a6c8 36320index 3fcf627..f334910 100644
fe2de317
MT
36321--- a/drivers/scsi/aacraid/aacraid.h
36322+++ b/drivers/scsi/aacraid/aacraid.h
15a11c5b
MT
36323@@ -492,7 +492,7 @@ struct adapter_ops
36324 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36325 /* Administrative operations */
36326 int (*adapter_comm)(struct aac_dev * dev, int comm);
36327-};
36328+} __no_const;
66a7e928
MT
36329
36330 /*
15a11c5b 36331 * Define which interrupt handler needs to be installed
fe2de317 36332diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
c6e2a6c8 36333index 0d279c44..3d25a97 100644
fe2de317
MT
36334--- a/drivers/scsi/aacraid/linit.c
36335+++ b/drivers/scsi/aacraid/linit.c
36336@@ -93,7 +93,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
6e9df6a3
MT
36337 #elif defined(__devinitconst)
36338 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36339 #else
36340-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
36341+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
36342 #endif
36343 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
36344 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
fe2de317 36345diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
c6e2a6c8 36346index ff80552..1c4120c 100644
fe2de317
MT
36347--- a/drivers/scsi/aic94xx/aic94xx_init.c
36348+++ b/drivers/scsi/aic94xx/aic94xx_init.c
36349@@ -1012,7 +1012,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
c6e2a6c8 36350 .lldd_ata_set_dmamode = asd_set_dmamode,
6e9df6a3
MT
36351 };
36352
36353-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
36354+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
36355 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
36356 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
36357 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
fe2de317 36358diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
c6e2a6c8 36359index 4ad7e36..d004679 100644
fe2de317
MT
36360--- a/drivers/scsi/bfa/bfa.h
36361+++ b/drivers/scsi/bfa/bfa.h
36362@@ -196,7 +196,7 @@ struct bfa_hwif_s {
36363 u32 *end);
36364 int cpe_vec_q0;
36365 int rme_vec_q0;
36366-};
36367+} __no_const;
36368 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
66a7e928 36369
fe2de317
MT
36370 struct bfa_faa_cbfn_s {
36371diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
5e856224 36372index f0f80e2..8ec946b 100644
fe2de317
MT
36373--- a/drivers/scsi/bfa/bfa_fcpim.c
36374+++ b/drivers/scsi/bfa/bfa_fcpim.c
5e856224 36375@@ -3715,7 +3715,7 @@ bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4c928ab7
MT
36376
36377 bfa_iotag_attach(fcp);
36378
36379- fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
36380+ fcp->itn_arr = (bfa_itn_s_no_const *) bfa_mem_kva_curp(fcp);
36381 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
36382 (fcp->num_itns * sizeof(struct bfa_itn_s));
36383 memset(fcp->itn_arr, 0,
5e856224 36384@@ -3773,7 +3773,7 @@ bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
6e9df6a3
MT
36385 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
36386 {
36387 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
36388- struct bfa_itn_s *itn;
36389+ bfa_itn_s_no_const *itn;
36390
36391 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
36392 itn->isr = isr;
fe2de317 36393diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
5e856224 36394index 36f26da..38a34a8 100644
fe2de317
MT
36395--- a/drivers/scsi/bfa/bfa_fcpim.h
36396+++ b/drivers/scsi/bfa/bfa_fcpim.h
6e9df6a3
MT
36397@@ -37,6 +37,7 @@ struct bfa_iotag_s {
36398 struct bfa_itn_s {
36399 bfa_isr_func_t isr;
36400 };
36401+typedef struct bfa_itn_s __no_const bfa_itn_s_no_const;
36402
36403 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
36404 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
5e856224 36405@@ -147,7 +148,7 @@ struct bfa_fcp_mod_s {
6e9df6a3
MT
36406 struct list_head iotag_tio_free_q; /* free IO resources */
36407 struct list_head iotag_unused_q; /* unused IO resources*/
36408 struct bfa_iotag_s *iotag_arr;
36409- struct bfa_itn_s *itn_arr;
36410+ bfa_itn_s_no_const *itn_arr;
36411 int num_ioim_reqs;
36412 int num_fwtio_reqs;
36413 int num_itns;
fe2de317 36414diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
c6e2a6c8 36415index 1a99d4b..e85d64b 100644
fe2de317
MT
36416--- a/drivers/scsi/bfa/bfa_ioc.h
36417+++ b/drivers/scsi/bfa/bfa_ioc.h
6e9df6a3 36418@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
15a11c5b
MT
36419 bfa_ioc_disable_cbfn_t disable_cbfn;
36420 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36421 bfa_ioc_reset_cbfn_t reset_cbfn;
36422-};
36423+} __no_const;
8308f9c9 36424
15a11c5b 36425 /*
6e9df6a3
MT
36426 * IOC event notification mechanism.
36427@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
15a11c5b
MT
36428 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
36429 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
6e9df6a3 36430 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
15a11c5b
MT
36431-};
36432+} __no_const;
36433
6e9df6a3
MT
36434 /*
36435 * Queue element to wait for room in request queue. FIFO order is
fe2de317 36436diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
c6e2a6c8 36437index a3a056a..b9bbc2f 100644
fe2de317
MT
36438--- a/drivers/scsi/hosts.c
36439+++ b/drivers/scsi/hosts.c
66a7e928
MT
36440@@ -42,7 +42,7 @@
36441 #include "scsi_logging.h"
36442
36443
36444-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36445+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36446
36447
36448 static void scsi_host_cls_release(struct device *dev)
c6e2a6c8 36449@@ -360,7 +360,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
66a7e928
MT
36450 * subtract one because we increment first then return, but we need to
36451 * know what the next host number was before increment
36452 */
36453- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36454+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36455 shost->dma_channel = 0xff;
36456
36457 /* These three are default values which can be overridden */
fe2de317 36458diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
c6e2a6c8 36459index 500e20d..ebd3059 100644
fe2de317
MT
36460--- a/drivers/scsi/hpsa.c
36461+++ b/drivers/scsi/hpsa.c
c6e2a6c8 36462@@ -521,7 +521,7 @@ static inline u32 next_command(struct ctlr_info *h)
15a11c5b
MT
36463 u32 a;
36464
36465 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36466- return h->access.command_completed(h);
36467+ return h->access->command_completed(h);
36468
36469 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36470 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
c6e2a6c8 36471@@ -3002,7 +3002,7 @@ static void start_io(struct ctlr_info *h)
15a11c5b
MT
36472 while (!list_empty(&h->reqQ)) {
36473 c = list_entry(h->reqQ.next, struct CommandList, list);
36474 /* can't do anything if fifo is full */
36475- if ((h->access.fifo_full(h))) {
36476+ if ((h->access->fifo_full(h))) {
36477 dev_warn(&h->pdev->dev, "fifo full\n");
36478 break;
36479 }
c6e2a6c8 36480@@ -3012,7 +3012,7 @@ static void start_io(struct ctlr_info *h)
15a11c5b 36481 h->Qdepth--;
66a7e928 36482
15a11c5b
MT
36483 /* Tell the controller execute command */
36484- h->access.submit_command(h, c);
36485+ h->access->submit_command(h, c);
66a7e928 36486
15a11c5b
MT
36487 /* Put job onto the completed Q */
36488 addQ(&h->cmpQ, c);
c6e2a6c8 36489@@ -3021,17 +3021,17 @@ static void start_io(struct ctlr_info *h)
66a7e928 36490
15a11c5b
MT
36491 static inline unsigned long get_next_completion(struct ctlr_info *h)
36492 {
36493- return h->access.command_completed(h);
36494+ return h->access->command_completed(h);
36495 }
66a7e928 36496
15a11c5b
MT
36497 static inline bool interrupt_pending(struct ctlr_info *h)
36498 {
36499- return h->access.intr_pending(h);
36500+ return h->access->intr_pending(h);
36501 }
66a7e928 36502
15a11c5b
MT
36503 static inline long interrupt_not_for_us(struct ctlr_info *h)
36504 {
36505- return (h->access.intr_pending(h) == 0) ||
36506+ return (h->access->intr_pending(h) == 0) ||
36507 (h->interrupts_enabled == 0);
36508 }
66a7e928 36509
c6e2a6c8 36510@@ -3930,7 +3930,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
15a11c5b
MT
36511 if (prod_index < 0)
36512 return -ENODEV;
36513 h->product_name = products[prod_index].product_name;
36514- h->access = *(products[prod_index].access);
36515+ h->access = products[prod_index].access;
66a7e928 36516
15a11c5b
MT
36517 if (hpsa_board_disabled(h->pdev)) {
36518 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
c6e2a6c8 36519@@ -4175,7 +4175,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
4c928ab7
MT
36520
36521 assert_spin_locked(&lockup_detector_lock);
36522 remove_ctlr_from_lockup_detector_list(h);
36523- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36524+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36525 spin_lock_irqsave(&h->lock, flags);
36526 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
36527 spin_unlock_irqrestore(&h->lock, flags);
c6e2a6c8 36528@@ -4355,7 +4355,7 @@ reinit_after_soft_reset:
66a7e928 36529 }
66a7e928 36530
15a11c5b
MT
36531 /* make sure the board interrupts are off */
36532- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36533+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
66a7e928 36534
15a11c5b
MT
36535 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
36536 goto clean2;
c6e2a6c8 36537@@ -4389,7 +4389,7 @@ reinit_after_soft_reset:
15a11c5b
MT
36538 * fake ones to scoop up any residual completions.
36539 */
36540 spin_lock_irqsave(&h->lock, flags);
36541- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36542+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36543 spin_unlock_irqrestore(&h->lock, flags);
36544 free_irq(h->intr[h->intr_mode], h);
36545 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
c6e2a6c8 36546@@ -4408,9 +4408,9 @@ reinit_after_soft_reset:
15a11c5b
MT
36547 dev_info(&h->pdev->dev, "Board READY.\n");
36548 dev_info(&h->pdev->dev,
36549 "Waiting for stale completions to drain.\n");
36550- h->access.set_intr_mask(h, HPSA_INTR_ON);
36551+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36552 msleep(10000);
36553- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36554+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36555
36556 rc = controller_reset_failed(h->cfgtable);
36557 if (rc)
c6e2a6c8 36558@@ -4431,7 +4431,7 @@ reinit_after_soft_reset:
15a11c5b
MT
36559 }
36560
36561 /* Turn the interrupts on so we can service requests */
36562- h->access.set_intr_mask(h, HPSA_INTR_ON);
36563+ h->access->set_intr_mask(h, HPSA_INTR_ON);
36564
36565 hpsa_hba_inquiry(h);
36566 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
c6e2a6c8 36567@@ -4483,7 +4483,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
15a11c5b
MT
36568 * To write all data in the battery backed cache to disks
36569 */
36570 hpsa_flush_cache(h);
36571- h->access.set_intr_mask(h, HPSA_INTR_OFF);
36572+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
36573 free_irq(h->intr[h->intr_mode], h);
36574 #ifdef CONFIG_PCI_MSI
36575 if (h->msix_vector)
c6e2a6c8 36576@@ -4657,7 +4657,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
15a11c5b
MT
36577 return;
36578 }
36579 /* Change the access methods to the performant access methods */
36580- h->access = SA5_performant_access;
36581+ h->access = &SA5_performant_access;
36582 h->transMethod = CFGTBL_Trans_Performant;
36583 }
36584
fe2de317 36585diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
c6e2a6c8 36586index 7b28d54..952f23a 100644
fe2de317
MT
36587--- a/drivers/scsi/hpsa.h
36588+++ b/drivers/scsi/hpsa.h
c6e2a6c8 36589@@ -72,7 +72,7 @@ struct ctlr_info {
15a11c5b
MT
36590 unsigned int msix_vector;
36591 unsigned int msi_vector;
36592 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
36593- struct access_method access;
36594+ struct access_method *access;
36595
36596 /* queue and queue Info */
36597 struct list_head reqQ;
fe2de317
MT
36598diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
36599index f2df059..a3a9930 100644
36600--- a/drivers/scsi/ips.h
36601+++ b/drivers/scsi/ips.h
15a11c5b
MT
36602@@ -1027,7 +1027,7 @@ typedef struct {
36603 int (*intr)(struct ips_ha *);
36604 void (*enableint)(struct ips_ha *);
36605 uint32_t (*statupd)(struct ips_ha *);
36606-} ips_hw_func_t;
36607+} __no_const ips_hw_func_t;
36608
36609 typedef struct ips_ha {
36610 uint8_t ha_id[IPS_MAX_CHANNELS+1];
fe2de317 36611diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
c6e2a6c8 36612index aceffad..c35c08d 100644
fe2de317
MT
36613--- a/drivers/scsi/libfc/fc_exch.c
36614+++ b/drivers/scsi/libfc/fc_exch.c
16454cff 36615@@ -105,12 +105,12 @@ struct fc_exch_mgr {
58c5fc13
MT
36616 * all together if not used XXX
36617 */
36618 struct {
36619- atomic_t no_free_exch;
36620- atomic_t no_free_exch_xid;
36621- atomic_t xid_not_found;
36622- atomic_t xid_busy;
36623- atomic_t seq_not_found;
36624- atomic_t non_bls_resp;
36625+ atomic_unchecked_t no_free_exch;
36626+ atomic_unchecked_t no_free_exch_xid;
36627+ atomic_unchecked_t xid_not_found;
36628+ atomic_unchecked_t xid_busy;
36629+ atomic_unchecked_t seq_not_found;
36630+ atomic_unchecked_t non_bls_resp;
36631 } stats;
58c5fc13 36632 };
16454cff 36633
4c928ab7 36634@@ -719,7 +719,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
58c5fc13
MT
36635 /* allocate memory for exchange */
36636 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
36637 if (!ep) {
36638- atomic_inc(&mp->stats.no_free_exch);
36639+ atomic_inc_unchecked(&mp->stats.no_free_exch);
36640 goto out;
36641 }
36642 memset(ep, 0, sizeof(*ep));
4c928ab7 36643@@ -780,7 +780,7 @@ out:
58c5fc13
MT
36644 return ep;
36645 err:
ae4e228f 36646 spin_unlock_bh(&pool->lock);
58c5fc13
MT
36647- atomic_inc(&mp->stats.no_free_exch_xid);
36648+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
36649 mempool_free(ep, mp->ep_pool);
36650 return NULL;
36651 }
4c928ab7 36652@@ -923,7 +923,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
58c5fc13
MT
36653 xid = ntohs(fh->fh_ox_id); /* we originated exch */
36654 ep = fc_exch_find(mp, xid);
36655 if (!ep) {
36656- atomic_inc(&mp->stats.xid_not_found);
36657+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36658 reject = FC_RJT_OX_ID;
36659 goto out;
36660 }
4c928ab7 36661@@ -953,7 +953,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
58c5fc13
MT
36662 ep = fc_exch_find(mp, xid);
36663 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
36664 if (ep) {
36665- atomic_inc(&mp->stats.xid_busy);
36666+ atomic_inc_unchecked(&mp->stats.xid_busy);
36667 reject = FC_RJT_RX_ID;
36668 goto rel;
36669 }
4c928ab7 36670@@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
58c5fc13
MT
36671 }
36672 xid = ep->xid; /* get our XID */
36673 } else if (!ep) {
36674- atomic_inc(&mp->stats.xid_not_found);
36675+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36676 reject = FC_RJT_RX_ID; /* XID not found */
36677 goto out;
36678 }
4c928ab7 36679@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
58c5fc13
MT
36680 } else {
36681 sp = &ep->seq;
36682 if (sp->id != fh->fh_seq_id) {
36683- atomic_inc(&mp->stats.seq_not_found);
36684+ atomic_inc_unchecked(&mp->stats.seq_not_found);
6e9df6a3
MT
36685 if (f_ctl & FC_FC_END_SEQ) {
36686 /*
36687 * Update sequence_id based on incoming last
4c928ab7 36688@@ -1431,22 +1431,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
58c5fc13
MT
36689
36690 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
36691 if (!ep) {
36692- atomic_inc(&mp->stats.xid_not_found);
36693+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36694 goto out;
36695 }
36696 if (ep->esb_stat & ESB_ST_COMPLETE) {
36697- atomic_inc(&mp->stats.xid_not_found);
36698+ atomic_inc_unchecked(&mp->stats.xid_not_found);
16454cff 36699 goto rel;
58c5fc13
MT
36700 }
36701 if (ep->rxid == FC_XID_UNKNOWN)
36702 ep->rxid = ntohs(fh->fh_rx_id);
36703 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
36704- atomic_inc(&mp->stats.xid_not_found);
36705+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36706 goto rel;
36707 }
36708 if (ep->did != ntoh24(fh->fh_s_id) &&
36709 ep->did != FC_FID_FLOGI) {
36710- atomic_inc(&mp->stats.xid_not_found);
36711+ atomic_inc_unchecked(&mp->stats.xid_not_found);
36712 goto rel;
36713 }
36714 sof = fr_sof(fp);
4c928ab7 36715@@ -1455,7 +1455,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
57199397
MT
36716 sp->ssb_stat |= SSB_ST_RESP;
36717 sp->id = fh->fh_seq_id;
36718 } else if (sp->id != fh->fh_seq_id) {
36719- atomic_inc(&mp->stats.seq_not_found);
36720+ atomic_inc_unchecked(&mp->stats.seq_not_found);
36721 goto rel;
58c5fc13 36722 }
57199397 36723
4c928ab7 36724@@ -1519,9 +1519,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
58c5fc13 36725 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
ae4e228f
MT
36726
36727 if (!sp)
58c5fc13
MT
36728- atomic_inc(&mp->stats.xid_not_found);
36729+ atomic_inc_unchecked(&mp->stats.xid_not_found);
ae4e228f 36730 else
58c5fc13
MT
36731- atomic_inc(&mp->stats.non_bls_resp);
36732+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
ae4e228f 36733
58c5fc13 36734 fc_frame_free(fp);
ae4e228f 36735 }
fe2de317 36736diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
572b4308 36737index d109cc3..09f4e7d 100644
fe2de317
MT
36738--- a/drivers/scsi/libsas/sas_ata.c
36739+++ b/drivers/scsi/libsas/sas_ata.c
c6e2a6c8 36740@@ -529,7 +529,7 @@ static struct ata_port_operations sas_sata_ops = {
66a7e928
MT
36741 .postreset = ata_std_postreset,
36742 .error_handler = ata_std_error_handler,
ae4e228f 36743 .post_internal_cmd = sas_ata_post_internal,
66a7e928
MT
36744- .qc_defer = ata_std_qc_defer,
36745+ .qc_defer = ata_std_qc_defer,
36746 .qc_prep = ata_noop_qc_prep,
36747 .qc_issue = sas_ata_qc_issue,
36748 .qc_fill_rtf = sas_ata_qc_fill_rtf,
fe2de317 36749diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
c6e2a6c8 36750index 3a1ffdd..8eb7c71 100644
fe2de317
MT
36751--- a/drivers/scsi/lpfc/lpfc.h
36752+++ b/drivers/scsi/lpfc/lpfc.h
5e856224 36753@@ -413,7 +413,7 @@ struct lpfc_vport {
fe2de317
MT
36754 struct dentry *debug_nodelist;
36755 struct dentry *vport_debugfs_root;
36756 struct lpfc_debugfs_trc *disc_trc;
36757- atomic_t disc_trc_cnt;
36758+ atomic_unchecked_t disc_trc_cnt;
36759 #endif
36760 uint8_t stat_data_enabled;
36761 uint8_t stat_data_blocked;
c6e2a6c8 36762@@ -826,8 +826,8 @@ struct lpfc_hba {
fe2de317
MT
36763 struct timer_list fabric_block_timer;
36764 unsigned long bit_flags;
36765 #define FABRIC_COMANDS_BLOCKED 0
36766- atomic_t num_rsrc_err;
36767- atomic_t num_cmd_success;
36768+ atomic_unchecked_t num_rsrc_err;
36769+ atomic_unchecked_t num_cmd_success;
36770 unsigned long last_rsrc_error_time;
36771 unsigned long last_ramp_down_time;
36772 unsigned long last_ramp_up_time;
c6e2a6c8 36773@@ -863,7 +863,7 @@ struct lpfc_hba {
4c928ab7 36774
fe2de317
MT
36775 struct dentry *debug_slow_ring_trc;
36776 struct lpfc_debugfs_trc *slow_ring_trc;
36777- atomic_t slow_ring_trc_cnt;
36778+ atomic_unchecked_t slow_ring_trc_cnt;
36779 /* iDiag debugfs sub-directory */
36780 struct dentry *idiag_root;
36781 struct dentry *idiag_pci_cfg;
36782diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
c6e2a6c8 36783index af04b0d..8f1a97e 100644
fe2de317
MT
36784--- a/drivers/scsi/lpfc/lpfc_debugfs.c
36785+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
4c928ab7 36786@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
66a7e928
MT
36787
36788 #include <linux/debugfs.h>
8308f9c9
MT
36789
36790-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36791+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
36792 static unsigned long lpfc_debugfs_start_time = 0L;
36793
66a7e928 36794 /* iDiag */
4c928ab7 36795@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
8308f9c9
MT
36796 lpfc_debugfs_enable = 0;
36797
36798 len = 0;
36799- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
36800+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
36801 (lpfc_debugfs_max_disc_trc - 1);
36802 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
36803 dtp = vport->disc_trc + i;
4c928ab7 36804@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
8308f9c9
MT
36805 lpfc_debugfs_enable = 0;
36806
36807 len = 0;
36808- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
36809+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
36810 (lpfc_debugfs_max_slow_ring_trc - 1);
36811 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
36812 dtp = phba->slow_ring_trc + i;
4c928ab7 36813@@ -636,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
8308f9c9
MT
36814 !vport || !vport->disc_trc)
36815 return;
36816
36817- index = atomic_inc_return(&vport->disc_trc_cnt) &
36818+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
36819 (lpfc_debugfs_max_disc_trc - 1);
36820 dtp = vport->disc_trc + index;
36821 dtp->fmt = fmt;
36822 dtp->data1 = data1;
36823 dtp->data2 = data2;
36824 dtp->data3 = data3;
36825- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36826+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36827 dtp->jif = jiffies;
36828 #endif
36829 return;
4c928ab7 36830@@ -674,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
8308f9c9
MT
36831 !phba || !phba->slow_ring_trc)
36832 return;
36833
36834- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
36835+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
36836 (lpfc_debugfs_max_slow_ring_trc - 1);
36837 dtp = phba->slow_ring_trc + index;
36838 dtp->fmt = fmt;
36839 dtp->data1 = data1;
36840 dtp->data2 = data2;
36841 dtp->data3 = data3;
36842- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
36843+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
36844 dtp->jif = jiffies;
36845 #endif
36846 return;
c6e2a6c8 36847@@ -4090,7 +4090,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
8308f9c9
MT
36848 "slow_ring buffer\n");
36849 goto debug_failed;
36850 }
36851- atomic_set(&phba->slow_ring_trc_cnt, 0);
36852+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
36853 memset(phba->slow_ring_trc, 0,
36854 (sizeof(struct lpfc_debugfs_trc) *
36855 lpfc_debugfs_max_slow_ring_trc));
c6e2a6c8 36856@@ -4136,7 +4136,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
8308f9c9
MT
36857 "buffer\n");
36858 goto debug_failed;
36859 }
36860- atomic_set(&vport->disc_trc_cnt, 0);
36861+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
36862
36863 snprintf(name, sizeof(name), "discovery_trace");
36864 vport->debug_disc_trc =
fe2de317 36865diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
c6e2a6c8 36866index 9598fdc..7e9f3d9 100644
fe2de317
MT
36867--- a/drivers/scsi/lpfc/lpfc_init.c
36868+++ b/drivers/scsi/lpfc/lpfc_init.c
c6e2a6c8
MT
36869@@ -10266,8 +10266,10 @@ lpfc_init(void)
36870 "misc_register returned with status %d", error);
15a11c5b
MT
36871
36872 if (lpfc_enable_npiv) {
36873- lpfc_transport_functions.vport_create = lpfc_vport_create;
36874- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36875+ pax_open_kernel();
36876+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
36877+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
36878+ pax_close_kernel();
36879 }
36880 lpfc_transport_template =
36881 fc_attach_transport(&lpfc_transport_functions);
fe2de317 36882diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
c6e2a6c8 36883index 88f3a83..686d3fa 100644
fe2de317
MT
36884--- a/drivers/scsi/lpfc/lpfc_scsi.c
36885+++ b/drivers/scsi/lpfc/lpfc_scsi.c
c6e2a6c8 36886@@ -311,7 +311,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
8308f9c9
MT
36887 uint32_t evt_posted;
36888
36889 spin_lock_irqsave(&phba->hbalock, flags);
36890- atomic_inc(&phba->num_rsrc_err);
36891+ atomic_inc_unchecked(&phba->num_rsrc_err);
36892 phba->last_rsrc_error_time = jiffies;
36893
36894 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
c6e2a6c8 36895@@ -352,7 +352,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
8308f9c9
MT
36896 unsigned long flags;
36897 struct lpfc_hba *phba = vport->phba;
36898 uint32_t evt_posted;
36899- atomic_inc(&phba->num_cmd_success);
36900+ atomic_inc_unchecked(&phba->num_cmd_success);
36901
36902 if (vport->cfg_lun_queue_depth <= queue_depth)
36903 return;
c6e2a6c8 36904@@ -396,8 +396,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
8308f9c9
MT
36905 unsigned long num_rsrc_err, num_cmd_success;
36906 int i;
36907
36908- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
36909- num_cmd_success = atomic_read(&phba->num_cmd_success);
36910+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
36911+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
36912
36913 vports = lpfc_create_vport_work_array(phba);
36914 if (vports != NULL)
c6e2a6c8 36915@@ -417,8 +417,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
8308f9c9
MT
36916 }
36917 }
36918 lpfc_destroy_vport_work_array(phba, vports);
36919- atomic_set(&phba->num_rsrc_err, 0);
36920- atomic_set(&phba->num_cmd_success, 0);
36921+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36922+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36923 }
36924
36925 /**
c6e2a6c8 36926@@ -452,8 +452,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
8308f9c9
MT
36927 }
36928 }
36929 lpfc_destroy_vport_work_array(phba, vports);
36930- atomic_set(&phba->num_rsrc_err, 0);
36931- atomic_set(&phba->num_cmd_success, 0);
36932+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
36933+ atomic_set_unchecked(&phba->num_cmd_success, 0);
36934 }
36935
36936 /**
fe2de317 36937diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
5e856224 36938index ea8a0b4..812a124 100644
fe2de317
MT
36939--- a/drivers/scsi/pmcraid.c
36940+++ b/drivers/scsi/pmcraid.c
4c928ab7 36941@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
8308f9c9
MT
36942 res->scsi_dev = scsi_dev;
36943 scsi_dev->hostdata = res;
36944 res->change_detected = 0;
36945- atomic_set(&res->read_failures, 0);
36946- atomic_set(&res->write_failures, 0);
36947+ atomic_set_unchecked(&res->read_failures, 0);
36948+ atomic_set_unchecked(&res->write_failures, 0);
36949 rc = 0;
36950 }
36951 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
4c928ab7 36952@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
8308f9c9
MT
36953
36954 /* If this was a SCSI read/write command keep count of errors */
36955 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
36956- atomic_inc(&res->read_failures);
36957+ atomic_inc_unchecked(&res->read_failures);
36958 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
36959- atomic_inc(&res->write_failures);
36960+ atomic_inc_unchecked(&res->write_failures);
36961
36962 if (!RES_IS_GSCSI(res->cfg_entry) &&
36963 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
4c928ab7 36964@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
8308f9c9
MT
36965 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36966 * hrrq_id assigned here in queuecommand
36967 */
36968- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36969+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36970 pinstance->num_hrrq;
36971 cmd->cmd_done = pmcraid_io_done;
36972
4c928ab7 36973@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
8308f9c9
MT
36974 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
36975 * hrrq_id assigned here in queuecommand
36976 */
36977- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
36978+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
36979 pinstance->num_hrrq;
36980
36981 if (request_size) {
4c928ab7 36982@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
8308f9c9
MT
36983
36984 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
36985 /* add resources only after host is added into system */
36986- if (!atomic_read(&pinstance->expose_resources))
36987+ if (!atomic_read_unchecked(&pinstance->expose_resources))
36988 return;
36989
36990 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
4c928ab7 36991@@ -5331,8 +5331,8 @@ static int __devinit pmcraid_init_instance(
8308f9c9
MT
36992 init_waitqueue_head(&pinstance->reset_wait_q);
36993
36994 atomic_set(&pinstance->outstanding_cmds, 0);
36995- atomic_set(&pinstance->last_message_id, 0);
36996- atomic_set(&pinstance->expose_resources, 0);
36997+ atomic_set_unchecked(&pinstance->last_message_id, 0);
36998+ atomic_set_unchecked(&pinstance->expose_resources, 0);
36999
37000 INIT_LIST_HEAD(&pinstance->free_res_q);
37001 INIT_LIST_HEAD(&pinstance->used_res_q);
4c928ab7 37002@@ -6047,7 +6047,7 @@ static int __devinit pmcraid_probe(
8308f9c9
MT
37003 /* Schedule worker thread to handle CCN and take care of adding and
37004 * removing devices to OS
37005 */
37006- atomic_set(&pinstance->expose_resources, 1);
37007+ atomic_set_unchecked(&pinstance->expose_resources, 1);
37008 schedule_work(&pinstance->worker_q);
37009 return rc;
37010
fe2de317 37011diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
c6e2a6c8 37012index e1d150f..6c6df44 100644
fe2de317
MT
37013--- a/drivers/scsi/pmcraid.h
37014+++ b/drivers/scsi/pmcraid.h
4c928ab7 37015@@ -748,7 +748,7 @@ struct pmcraid_instance {
8308f9c9
MT
37016 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
37017
37018 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
37019- atomic_t last_message_id;
37020+ atomic_unchecked_t last_message_id;
37021
37022 /* configuration table */
37023 struct pmcraid_config_table *cfg_table;
4c928ab7 37024@@ -777,7 +777,7 @@ struct pmcraid_instance {
8308f9c9
MT
37025 atomic_t outstanding_cmds;
37026
37027 /* should add/delete resources to mid-layer now ?*/
37028- atomic_t expose_resources;
37029+ atomic_unchecked_t expose_resources;
37030
37031
37032
4c928ab7 37033@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
8308f9c9
MT
37034 struct pmcraid_config_table_entry_ext cfg_entry_ext;
37035 };
37036 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37037- atomic_t read_failures; /* count of failed READ commands */
37038- atomic_t write_failures; /* count of failed WRITE commands */
37039+ atomic_unchecked_t read_failures; /* count of failed READ commands */
37040+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37041
37042 /* To indicate add/delete/modify during CCN */
37043 u8 change_detected;
fe2de317 37044diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
c6e2a6c8 37045index a244303..6015eb7 100644
fe2de317
MT
37046--- a/drivers/scsi/qla2xxx/qla_def.h
37047+++ b/drivers/scsi/qla2xxx/qla_def.h
c6e2a6c8 37048@@ -2264,7 +2264,7 @@ struct isp_operations {
15a11c5b
MT
37049 int (*start_scsi) (srb_t *);
37050 int (*abort_isp) (struct scsi_qla_host *);
5e856224 37051 int (*iospace_config)(struct qla_hw_data*);
15a11c5b
MT
37052-};
37053+} __no_const;
37054
37055 /* MSI-X Support *************************************************************/
37056
fe2de317 37057diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
c6e2a6c8 37058index 7f2492e..5113877 100644
fe2de317
MT
37059--- a/drivers/scsi/qla4xxx/ql4_def.h
37060+++ b/drivers/scsi/qla4xxx/ql4_def.h
c6e2a6c8 37061@@ -268,7 +268,7 @@ struct ddb_entry {
4c928ab7
MT
37062 * (4000 only) */
37063 atomic_t relogin_timer; /* Max Time to wait for
37064 * relogin to complete */
37065- atomic_t relogin_retry_count; /* Num of times relogin has been
37066+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37067 * retried */
37068 uint32_t default_time2wait; /* Default Min time between
37069 * relogins (+aens) */
37070diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
c6e2a6c8 37071index ee47820..a83b1f4 100644
4c928ab7
MT
37072--- a/drivers/scsi/qla4xxx/ql4_os.c
37073+++ b/drivers/scsi/qla4xxx/ql4_os.c
c6e2a6c8 37074@@ -2551,12 +2551,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
4c928ab7
MT
37075 */
37076 if (!iscsi_is_session_online(cls_sess)) {
37077 /* Reset retry relogin timer */
37078- atomic_inc(&ddb_entry->relogin_retry_count);
37079+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37080 DEBUG2(ql4_printk(KERN_INFO, ha,
37081 "%s: index[%d] relogin timed out-retrying"
37082 " relogin (%d), retry (%d)\n", __func__,
37083 ddb_entry->fw_ddb_index,
37084- atomic_read(&ddb_entry->relogin_retry_count),
37085+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
37086 ddb_entry->default_time2wait + 4));
37087 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
37088 atomic_set(&ddb_entry->retry_relogin_timer,
c6e2a6c8 37089@@ -4453,7 +4453,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
4c928ab7 37090
8308f9c9
MT
37091 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37092 atomic_set(&ddb_entry->relogin_timer, 0);
37093- atomic_set(&ddb_entry->relogin_retry_count, 0);
37094+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
5e856224 37095 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
4c928ab7 37096 ddb_entry->default_relogin_timeout =
5e856224 37097 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
fe2de317 37098diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
c6e2a6c8 37099index 07322ec..91ccc23 100644
fe2de317
MT
37100--- a/drivers/scsi/scsi.c
37101+++ b/drivers/scsi/scsi.c
37102@@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
8308f9c9
MT
37103 unsigned long timeout;
37104 int rtn = 0;
37105
37106- atomic_inc(&cmd->device->iorequest_cnt);
37107+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37108
37109 /* check if the device is still usable */
37110 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
fe2de317 37111diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
c6e2a6c8 37112index 4037fd5..a19fcc7 100644
fe2de317
MT
37113--- a/drivers/scsi/scsi_lib.c
37114+++ b/drivers/scsi/scsi_lib.c
c6e2a6c8 37115@@ -1415,7 +1415,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
8308f9c9
MT
37116 shost = sdev->host;
37117 scsi_init_cmd_errh(cmd);
37118 cmd->result = DID_NO_CONNECT << 16;
37119- atomic_inc(&cmd->device->iorequest_cnt);
37120+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37121
37122 /*
37123 * SCSI request completion path will do scsi_device_unbusy(),
c6e2a6c8 37124@@ -1441,9 +1441,9 @@ static void scsi_softirq_done(struct request *rq)
8308f9c9
MT
37125
37126 INIT_LIST_HEAD(&cmd->eh_entry);
37127
37128- atomic_inc(&cmd->device->iodone_cnt);
37129+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
37130 if (cmd->result)
37131- atomic_inc(&cmd->device->ioerr_cnt);
37132+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37133
37134 disposition = scsi_decide_disposition(cmd);
37135 if (disposition != SUCCESS &&
fe2de317 37136diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
4c928ab7 37137index 04c2a27..9d8bd66 100644
fe2de317
MT
37138--- a/drivers/scsi/scsi_sysfs.c
37139+++ b/drivers/scsi/scsi_sysfs.c
4c928ab7 37140@@ -660,7 +660,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
8308f9c9
MT
37141 char *buf) \
37142 { \
37143 struct scsi_device *sdev = to_scsi_device(dev); \
37144- unsigned long long count = atomic_read(&sdev->field); \
37145+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
37146 return snprintf(buf, 20, "0x%llx\n", count); \
37147 } \
37148 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
fe2de317
MT
37149diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
37150index 84a1fdf..693b0d6 100644
37151--- a/drivers/scsi/scsi_tgt_lib.c
37152+++ b/drivers/scsi/scsi_tgt_lib.c
37153@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
6e9df6a3
MT
37154 int err;
37155
37156 dprintk("%lx %u\n", uaddr, len);
37157- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
37158+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
37159 if (err) {
37160 /*
37161 * TODO: need to fixup sg_tablesize, max_segment_size,
fe2de317 37162diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
c6e2a6c8 37163index 80fbe2a..efa223b 100644
fe2de317
MT
37164--- a/drivers/scsi/scsi_transport_fc.c
37165+++ b/drivers/scsi/scsi_transport_fc.c
c6e2a6c8 37166@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
8308f9c9
MT
37167 * Netlink Infrastructure
37168 */
37169
37170-static atomic_t fc_event_seq;
37171+static atomic_unchecked_t fc_event_seq;
37172
37173 /**
37174 * fc_get_event_number - Obtain the next sequential FC event number
c6e2a6c8 37175@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
8308f9c9
MT
37176 u32
37177 fc_get_event_number(void)
37178 {
37179- return atomic_add_return(1, &fc_event_seq);
37180+ return atomic_add_return_unchecked(1, &fc_event_seq);
37181 }
37182 EXPORT_SYMBOL(fc_get_event_number);
37183
c6e2a6c8 37184@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
8308f9c9
MT
37185 {
37186 int error;
37187
37188- atomic_set(&fc_event_seq, 0);
37189+ atomic_set_unchecked(&fc_event_seq, 0);
37190
37191 error = transport_class_register(&fc_host_class);
37192 if (error)
c6e2a6c8 37193@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
71d190be 37194 char *cp;
58c5fc13 37195
71d190be
MT
37196 *val = simple_strtoul(buf, &cp, 0);
37197- if ((*cp && (*cp != '\n')) || (*val < 0))
37198+ if (*cp && (*cp != '\n'))
37199 return -EINVAL;
37200 /*
37201 * Check for overflow; dev_loss_tmo is u32
fe2de317 37202diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
c6e2a6c8 37203index 1cf640e..78e9014 100644
fe2de317
MT
37204--- a/drivers/scsi/scsi_transport_iscsi.c
37205+++ b/drivers/scsi/scsi_transport_iscsi.c
4c928ab7
MT
37206@@ -79,7 +79,7 @@ struct iscsi_internal {
37207 struct transport_container session_cont;
8308f9c9
MT
37208 };
37209
37210-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37211+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37212 static struct workqueue_struct *iscsi_eh_timer_workq;
37213
4c928ab7 37214 static DEFINE_IDA(iscsi_sess_ida);
c6e2a6c8 37215@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
8308f9c9
MT
37216 int err;
37217
37218 ihost = shost->shost_data;
37219- session->sid = atomic_add_return(1, &iscsi_session_nr);
37220+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37221
4c928ab7
MT
37222 if (target_id == ISCSI_MAX_TARGET) {
37223 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
c6e2a6c8 37224@@ -2940,7 +2940,7 @@ static __init int iscsi_transport_init(void)
8308f9c9
MT
37225 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37226 ISCSI_TRANSPORT_VERSION);
37227
37228- atomic_set(&iscsi_session_nr, 0);
37229+ atomic_set_unchecked(&iscsi_session_nr, 0);
37230
37231 err = class_register(&iscsi_transport_class);
37232 if (err)
fe2de317
MT
37233diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
37234index 21a045e..ec89e03 100644
37235--- a/drivers/scsi/scsi_transport_srp.c
37236+++ b/drivers/scsi/scsi_transport_srp.c
8308f9c9
MT
37237@@ -33,7 +33,7 @@
37238 #include "scsi_transport_srp_internal.h"
37239
37240 struct srp_host_attrs {
37241- atomic_t next_port_id;
37242+ atomic_unchecked_t next_port_id;
37243 };
37244 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37245
fe2de317 37246@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
8308f9c9
MT
37247 struct Scsi_Host *shost = dev_to_shost(dev);
37248 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37249
37250- atomic_set(&srp_host->next_port_id, 0);
37251+ atomic_set_unchecked(&srp_host->next_port_id, 0);
37252 return 0;
37253 }
37254
fe2de317 37255@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
8308f9c9
MT
37256 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37257 rport->roles = ids->roles;
37258
37259- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37260+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37261 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37262
37263 transport_setup_device(&rport->dev);
fe2de317 37264diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
5e856224 37265index eacd46b..e3f4d62 100644
fe2de317
MT
37266--- a/drivers/scsi/sg.c
37267+++ b/drivers/scsi/sg.c
4c928ab7 37268@@ -1077,7 +1077,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
6e9df6a3
MT
37269 sdp->disk->disk_name,
37270 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
37271 NULL,
37272- (char *)arg);
37273+ (char __user *)arg);
37274 case BLKTRACESTART:
37275 return blk_trace_startstop(sdp->device->request_queue, 1);
37276 case BLKTRACESTOP:
4c928ab7 37277@@ -2312,7 +2312,7 @@ struct sg_proc_leaf {
ae4e228f 37278 const struct file_operations * fops;
58c5fc13
MT
37279 };
37280
ae4e228f
MT
37281-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37282+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37283 {"allow_dio", &adio_fops},
37284 {"debug", &debug_fops},
37285 {"def_reserved_size", &dressz_fops},
5e856224 37286@@ -2332,7 +2332,7 @@ sg_proc_init(void)
ae4e228f 37287 if (!sg_proc_sgp)
5e856224
MT
37288 return 1;
37289 for (k = 0; k < num_leaves; ++k) {
37290- struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37291+ const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
37292 umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
37293 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
37294 }
fe2de317 37295diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
c6e2a6c8 37296index 3d8f662..070f1a5 100644
fe2de317
MT
37297--- a/drivers/spi/spi.c
37298+++ b/drivers/spi/spi.c
c6e2a6c8 37299@@ -1361,7 +1361,7 @@ int spi_bus_unlock(struct spi_master *master)
fe2de317
MT
37300 EXPORT_SYMBOL_GPL(spi_bus_unlock);
37301
37302 /* portable code must never pass more than 32 bytes */
37303-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37304+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
37305
37306 static u8 *buf;
37307
fe2de317 37308diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
c6e2a6c8 37309index d91751f..a3a9e36 100644
fe2de317
MT
37310--- a/drivers/staging/octeon/ethernet-rx.c
37311+++ b/drivers/staging/octeon/ethernet-rx.c
c6e2a6c8 37312@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
8308f9c9
MT
37313 /* Increment RX stats for virtual ports */
37314 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
37315 #ifdef CONFIG_64BIT
37316- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
37317- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
37318+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
37319+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
37320 #else
37321- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
37322- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
37323+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
37324+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
37325 #endif
37326 }
37327 netif_receive_skb(skb);
c6e2a6c8 37328@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
8308f9c9
MT
37329 dev->name);
37330 */
37331 #ifdef CONFIG_64BIT
37332- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
37333+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37334 #else
37335- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
37336+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
37337 #endif
37338 dev_kfree_skb_irq(skb);
37339 }
fe2de317 37340diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
c6e2a6c8 37341index 60cba81..71eb239 100644
fe2de317
MT
37342--- a/drivers/staging/octeon/ethernet.c
37343+++ b/drivers/staging/octeon/ethernet.c
c6e2a6c8 37344@@ -259,11 +259,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
fe2de317
MT
37345 * since the RX tasklet also increments it.
37346 */
37347 #ifdef CONFIG_64BIT
37348- atomic64_add(rx_status.dropped_packets,
37349- (atomic64_t *)&priv->stats.rx_dropped);
37350+ atomic64_add_unchecked(rx_status.dropped_packets,
37351+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
37352 #else
37353- atomic_add(rx_status.dropped_packets,
37354- (atomic_t *)&priv->stats.rx_dropped);
37355+ atomic_add_unchecked(rx_status.dropped_packets,
37356+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
37357 #endif
37358 }
37359
fe2de317 37360diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
c6e2a6c8 37361index d3d8727..f9327bb8 100644
fe2de317
MT
37362--- a/drivers/staging/rtl8712/rtl871x_io.h
37363+++ b/drivers/staging/rtl8712/rtl871x_io.h
4c928ab7 37364@@ -108,7 +108,7 @@ struct _io_ops {
15a11c5b
MT
37365 u8 *pmem);
37366 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
37367 u8 *pmem);
37368-};
37369+} __no_const;
37370
37371 struct io_req {
37372 struct list_head list;
fe2de317
MT
37373diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
37374index c7b5e8b..783d6cb 100644
37375--- a/drivers/staging/sbe-2t3e3/netdev.c
37376+++ b/drivers/staging/sbe-2t3e3/netdev.c
37377@@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
15a11c5b
MT
37378 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
37379
37380 if (rlen)
37381- if (copy_to_user(data, &resp, rlen))
37382+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
37383 return -EFAULT;
66a7e928 37384
66a7e928 37385 return 0;
5e856224
MT
37386diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
37387index 42cdafe..2769103 100644
37388--- a/drivers/staging/speakup/speakup_soft.c
37389+++ b/drivers/staging/speakup/speakup_soft.c
37390@@ -241,11 +241,11 @@ static ssize_t softsynth_read(struct file *fp, char *buf, size_t count,
37391 break;
37392 } else if (!initialized) {
37393 if (*init) {
37394- ch = *init;
37395 init++;
37396 } else {
37397 initialized = 1;
37398 }
37399+ ch = *init;
37400 } else {
37401 ch = synth_buffer_getc();
37402 }
fe2de317 37403diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
c6e2a6c8 37404index c7b888c..c94be93 100644
fe2de317
MT
37405--- a/drivers/staging/usbip/usbip_common.h
37406+++ b/drivers/staging/usbip/usbip_common.h
6e9df6a3 37407@@ -289,7 +289,7 @@ struct usbip_device {
15a11c5b
MT
37408 void (*shutdown)(struct usbip_device *);
37409 void (*reset)(struct usbip_device *);
37410 void (*unusable)(struct usbip_device *);
37411- } eh_ops;
37412+ } __no_const eh_ops;
37413 };
37414
5e856224 37415 /* usbip_common.c */
fe2de317 37416diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
4c928ab7 37417index 88b3298..3783eee 100644
fe2de317
MT
37418--- a/drivers/staging/usbip/vhci.h
37419+++ b/drivers/staging/usbip/vhci.h
4c928ab7 37420@@ -88,7 +88,7 @@ struct vhci_hcd {
15a11c5b
MT
37421 unsigned resuming:1;
37422 unsigned long re_timeout;
8308f9c9
MT
37423
37424- atomic_t seqnum;
37425+ atomic_unchecked_t seqnum;
37426
37427 /*
37428 * NOTE:
fe2de317 37429diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
c6e2a6c8 37430index dca9bf1..80735c9 100644
fe2de317
MT
37431--- a/drivers/staging/usbip/vhci_hcd.c
37432+++ b/drivers/staging/usbip/vhci_hcd.c
c6e2a6c8 37433@@ -488,7 +488,7 @@ static void vhci_tx_urb(struct urb *urb)
8308f9c9
MT
37434 return;
37435 }
37436
37437- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
37438+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37439 if (priv->seqnum == 0xffff)
15a11c5b 37440 dev_info(&urb->dev->dev, "seqnum max\n");
8308f9c9 37441
c6e2a6c8 37442@@ -740,7 +740,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
8308f9c9
MT
37443 return -ENOMEM;
37444 }
37445
37446- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
37447+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
37448 if (unlink->seqnum == 0xffff)
15a11c5b 37449 pr_info("seqnum max\n");
8308f9c9 37450
c6e2a6c8 37451@@ -928,7 +928,7 @@ static int vhci_start(struct usb_hcd *hcd)
8308f9c9
MT
37452 vdev->rhport = rhport;
37453 }
37454
37455- atomic_set(&vhci->seqnum, 0);
37456+ atomic_set_unchecked(&vhci->seqnum, 0);
37457 spin_lock_init(&vhci->lock);
37458
15a11c5b 37459 hcd->power_budget = 0; /* no limit */
fe2de317 37460diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
c6e2a6c8 37461index f5fba732..210a16c 100644
fe2de317
MT
37462--- a/drivers/staging/usbip/vhci_rx.c
37463+++ b/drivers/staging/usbip/vhci_rx.c
37464@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
15a11c5b
MT
37465 if (!urb) {
37466 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
37467 pr_info("max seqnum %d\n",
37468- atomic_read(&the_controller->seqnum));
37469+ atomic_read_unchecked(&the_controller->seqnum));
8308f9c9
MT
37470 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
37471 return;
37472 }
fe2de317
MT
37473diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
37474index 7735027..30eed13 100644
37475--- a/drivers/staging/vt6655/hostap.c
37476+++ b/drivers/staging/vt6655/hostap.c
37477@@ -79,14 +79,13 @@ static int msglevel =MSG_LEVEL_INFO;
15a11c5b
MT
37478 *
37479 */
66a7e928 37480
15a11c5b
MT
37481+static net_device_ops_no_const apdev_netdev_ops;
37482+
37483 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
66a7e928 37484 {
15a11c5b
MT
37485 PSDevice apdev_priv;
37486 struct net_device *dev = pDevice->dev;
37487 int ret;
37488- const struct net_device_ops apdev_netdev_ops = {
37489- .ndo_start_xmit = pDevice->tx_80211,
37490- };
66a7e928 37491
15a11c5b 37492 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
66a7e928 37493
fe2de317 37494@@ -98,6 +97,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
15a11c5b
MT
37495 *apdev_priv = *pDevice;
37496 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37497
37498+ /* only half broken now */
37499+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37500 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37501
37502 pDevice->apdev->type = ARPHRD_IEEE80211;
fe2de317
MT
37503diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
37504index 51b5adf..098e320 100644
37505--- a/drivers/staging/vt6656/hostap.c
37506+++ b/drivers/staging/vt6656/hostap.c
37507@@ -80,14 +80,13 @@ static int msglevel =MSG_LEVEL_INFO;
15a11c5b
MT
37508 *
37509 */
66a7e928 37510
15a11c5b
MT
37511+static net_device_ops_no_const apdev_netdev_ops;
37512+
37513 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
66a7e928 37514 {
15a11c5b
MT
37515 PSDevice apdev_priv;
37516 struct net_device *dev = pDevice->dev;
37517 int ret;
37518- const struct net_device_ops apdev_netdev_ops = {
37519- .ndo_start_xmit = pDevice->tx_80211,
37520- };
37521
37522 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
66a7e928 37523
fe2de317 37524@@ -99,6 +98,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
15a11c5b
MT
37525 *apdev_priv = *pDevice;
37526 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
37527
37528+ /* only half broken now */
37529+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
37530 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
37531
37532 pDevice->apdev->type = ARPHRD_IEEE80211;
fe2de317
MT
37533diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
37534index 7843dfd..3db105f 100644
37535--- a/drivers/staging/wlan-ng/hfa384x_usb.c
37536+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
37537@@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
15a11c5b
MT
37538
37539 struct usbctlx_completor {
37540 int (*complete) (struct usbctlx_completor *);
37541-};
37542+} __no_const;
37543
37544 static int
37545 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
fe2de317
MT
37546diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
37547index 1ca66ea..76f1343 100644
37548--- a/drivers/staging/zcache/tmem.c
37549+++ b/drivers/staging/zcache/tmem.c
66a7e928
MT
37550@@ -39,7 +39,7 @@
37551 * A tmem host implementation must use this function to register callbacks
37552 * for memory allocation.
37553 */
37554-static struct tmem_hostops tmem_hostops;
15a11c5b 37555+static tmem_hostops_no_const tmem_hostops;
66a7e928
MT
37556
37557 static void tmem_objnode_tree_init(void);
37558
fe2de317 37559@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
66a7e928
MT
37560 * A tmem host implementation must use this function to register
37561 * callbacks for a page-accessible memory (PAM) implementation
37562 */
37563-static struct tmem_pamops tmem_pamops;
15a11c5b 37564+static tmem_pamops_no_const tmem_pamops;
66a7e928
MT
37565
37566 void tmem_register_pamops(struct tmem_pamops *m)
37567 {
fe2de317 37568diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
c6e2a6c8 37569index 0d4aa82..f7832d4 100644
fe2de317
MT
37570--- a/drivers/staging/zcache/tmem.h
37571+++ b/drivers/staging/zcache/tmem.h
6e9df6a3
MT
37572@@ -180,6 +180,7 @@ struct tmem_pamops {
37573 void (*new_obj)(struct tmem_obj *);
37574 int (*replace_in_obj)(void *, struct tmem_obj *);
15a11c5b
MT
37575 };
37576+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
37577 extern void tmem_register_pamops(struct tmem_pamops *m);
66a7e928 37578
15a11c5b 37579 /* memory allocation methods provided by the host implementation */
6e9df6a3 37580@@ -189,6 +190,7 @@ struct tmem_hostops {
15a11c5b
MT
37581 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
37582 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
37583 };
37584+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
37585 extern void tmem_register_hostops(struct tmem_hostops *m);
66a7e928 37586
15a11c5b 37587 /* core tmem accessor functions */
572b4308
MT
37588diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
37589index 30a6770..fa323f8 100644
37590--- a/drivers/target/target_core_cdb.c
37591+++ b/drivers/target/target_core_cdb.c
37592@@ -1107,7 +1107,7 @@ int target_emulate_write_same(struct se_task *task)
37593 if (num_blocks != 0)
37594 range = num_blocks;
37595 else
37596- range = (dev->transport->get_blocks(dev) - lba);
37597+ range = (dev->transport->get_blocks(dev) - lba) + 1;
37598
37599 pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
37600 (unsigned long long)lba, (unsigned long long)range);
37601diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
37602index c3148b1..89d10e6 100644
37603--- a/drivers/target/target_core_pr.c
37604+++ b/drivers/target/target_core_pr.c
37605@@ -2038,7 +2038,7 @@ static int __core_scsi3_write_aptpl_to_file(
37606 if (IS_ERR(file) || !file || !file->f_dentry) {
37607 pr_err("filp_open(%s) for APTPL metadata"
37608 " failed\n", path);
37609- return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
37610+ return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
37611 }
37612
37613 iov[0].iov_base = &buf[0];
37614@@ -3826,7 +3826,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
37615 " SPC-2 reservation is held, returning"
37616 " RESERVATION_CONFLICT\n");
37617 cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
37618- ret = EINVAL;
37619+ ret = -EINVAL;
37620 goto out;
37621 }
37622
37623@@ -3836,7 +3836,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
37624 */
37625 if (!cmd->se_sess) {
37626 cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
37627- return -EINVAL;
37628+ ret = -EINVAL;
37629+ goto out;
37630 }
37631
37632 if (cmd->data_length < 24) {
fe2de317 37633diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
c6e2a6c8 37634index f015839..b15dfc4 100644
fe2de317
MT
37635--- a/drivers/target/target_core_tmr.c
37636+++ b/drivers/target/target_core_tmr.c
c6e2a6c8 37637@@ -327,7 +327,7 @@ static void core_tmr_drain_task_list(
6e9df6a3
MT
37638 cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
37639 cmd->t_task_list_num,
37640 atomic_read(&cmd->t_task_cdbs_left),
37641- atomic_read(&cmd->t_task_cdbs_sent),
37642+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
c6e2a6c8
MT
37643 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37644 (cmd->transport_state & CMD_T_STOP) != 0,
37645 (cmd->transport_state & CMD_T_SENT) != 0);
fe2de317 37646diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
c6e2a6c8 37647index 443704f..92d3517 100644
fe2de317
MT
37648--- a/drivers/target/target_core_transport.c
37649+++ b/drivers/target/target_core_transport.c
c6e2a6c8 37650@@ -1355,7 +1355,7 @@ struct se_device *transport_add_device_to_core_hba(
5e856224
MT
37651 spin_lock_init(&dev->se_port_lock);
37652 spin_lock_init(&dev->se_tmr_lock);
37653 spin_lock_init(&dev->qf_cmd_lock);
8308f9c9
MT
37654- atomic_set(&dev->dev_ordered_id, 0);
37655+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
37656
37657 se_dev_set_default_attribs(dev, dev_limits);
37658
c6e2a6c8 37659@@ -1542,7 +1542,7 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
8308f9c9
MT
37660 * Used to determine when ORDERED commands should go from
37661 * Dormant to Active status.
37662 */
6e9df6a3
MT
37663- cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
37664+ cmd->se_ordered_id = atomic_inc_return_unchecked(&cmd->se_dev->dev_ordered_id);
8308f9c9 37665 smp_mb__after_atomic_inc();
6e9df6a3 37666 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
8308f9c9 37667 cmd->se_ordered_id, cmd->sam_task_attr,
c6e2a6c8
MT
37668@@ -1956,7 +1956,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
37669 " CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n",
37670 cmd->t_task_list_num,
6e9df6a3
MT
37671 atomic_read(&cmd->t_task_cdbs_left),
37672- atomic_read(&cmd->t_task_cdbs_sent),
37673+ atomic_read_unchecked(&cmd->t_task_cdbs_sent),
37674 atomic_read(&cmd->t_task_cdbs_ex_left),
c6e2a6c8
MT
37675 (cmd->transport_state & CMD_T_ACTIVE) != 0,
37676 (cmd->transport_state & CMD_T_STOP) != 0,
37677@@ -2216,9 +2216,9 @@ check_depth:
5e856224 37678 cmd = task->task_se_cmd;
6e9df6a3 37679 spin_lock_irqsave(&cmd->t_state_lock, flags);
4c928ab7 37680 task->task_flags |= (TF_ACTIVE | TF_SENT);
6e9df6a3
MT
37681- atomic_inc(&cmd->t_task_cdbs_sent);
37682+ atomic_inc_unchecked(&cmd->t_task_cdbs_sent);
8308f9c9 37683
6e9df6a3
MT
37684- if (atomic_read(&cmd->t_task_cdbs_sent) ==
37685+ if (atomic_read_unchecked(&cmd->t_task_cdbs_sent) ==
37686 cmd->t_task_list_num)
c6e2a6c8 37687 cmd->transport_state |= CMD_T_SENT;
8308f9c9 37688
572b4308
MT
37689diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
37690index a375f25..da90f64 100644
37691--- a/drivers/target/tcm_fc/tfc_cmd.c
37692+++ b/drivers/target/tcm_fc/tfc_cmd.c
37693@@ -240,6 +240,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
37694 {
37695 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
37696
37697+ if (cmd->aborted)
37698+ return ~0;
37699 return fc_seq_exch(cmd->seq)->rxid;
37700 }
37701
fe2de317 37702diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
c6e2a6c8 37703index 3436436..772237b 100644
fe2de317
MT
37704--- a/drivers/tty/hvc/hvcs.c
37705+++ b/drivers/tty/hvc/hvcs.c
16454cff
MT
37706@@ -83,6 +83,7 @@
37707 #include <asm/hvcserver.h>
37708 #include <asm/uaccess.h>
37709 #include <asm/vio.h>
37710+#include <asm/local.h>
37711
37712 /*
37713 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
37714@@ -270,7 +271,7 @@ struct hvcs_struct {
37715 unsigned int index;
37716
37717 struct tty_struct *tty;
37718- int open_count;
37719+ local_t open_count;
37720
37721 /*
37722 * Used to tell the driver kernel_thread what operations need to take
fe2de317 37723@@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
16454cff
MT
37724
37725 spin_lock_irqsave(&hvcsd->lock, flags);
37726
37727- if (hvcsd->open_count > 0) {
37728+ if (local_read(&hvcsd->open_count) > 0) {
37729 spin_unlock_irqrestore(&hvcsd->lock, flags);
37730 printk(KERN_INFO "HVCS: vterm state unchanged. "
37731 "The hvcs device node is still in use.\n");
c6e2a6c8 37732@@ -1138,7 +1139,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
16454cff
MT
37733 if ((retval = hvcs_partner_connect(hvcsd)))
37734 goto error_release;
37735
37736- hvcsd->open_count = 1;
37737+ local_set(&hvcsd->open_count, 1);
37738 hvcsd->tty = tty;
37739 tty->driver_data = hvcsd;
37740
c6e2a6c8 37741@@ -1172,7 +1173,7 @@ fast_open:
16454cff
MT
37742
37743 spin_lock_irqsave(&hvcsd->lock, flags);
37744 kref_get(&hvcsd->kref);
37745- hvcsd->open_count++;
37746+ local_inc(&hvcsd->open_count);
37747 hvcsd->todo_mask |= HVCS_SCHED_READ;
37748 spin_unlock_irqrestore(&hvcsd->lock, flags);
37749
c6e2a6c8 37750@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
16454cff
MT
37751 hvcsd = tty->driver_data;
37752
37753 spin_lock_irqsave(&hvcsd->lock, flags);
37754- if (--hvcsd->open_count == 0) {
37755+ if (local_dec_and_test(&hvcsd->open_count)) {
37756
37757 vio_disable_interrupts(hvcsd->vdev);
37758
c6e2a6c8 37759@@ -1242,10 +1243,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
16454cff
MT
37760 free_irq(irq, hvcsd);
37761 kref_put(&hvcsd->kref, destroy_hvcs_struct);
37762 return;
37763- } else if (hvcsd->open_count < 0) {
37764+ } else if (local_read(&hvcsd->open_count) < 0) {
37765 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
37766 " is missmanaged.\n",
37767- hvcsd->vdev->unit_address, hvcsd->open_count);
37768+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
37769 }
37770
37771 spin_unlock_irqrestore(&hvcsd->lock, flags);
c6e2a6c8 37772@@ -1261,7 +1262,7 @@ static void hvcs_hangup(struct tty_struct * tty)
16454cff
MT
37773
37774 spin_lock_irqsave(&hvcsd->lock, flags);
37775 /* Preserve this so that we know how many kref refs to put */
37776- temp_open_count = hvcsd->open_count;
37777+ temp_open_count = local_read(&hvcsd->open_count);
37778
37779 /*
37780 * Don't kref put inside the spinlock because the destruction
c6e2a6c8 37781@@ -1276,7 +1277,7 @@ static void hvcs_hangup(struct tty_struct * tty)
16454cff
MT
37782 hvcsd->tty->driver_data = NULL;
37783 hvcsd->tty = NULL;
37784
37785- hvcsd->open_count = 0;
37786+ local_set(&hvcsd->open_count, 0);
37787
37788 /* This will drop any buffered data on the floor which is OK in a hangup
37789 * scenario. */
c6e2a6c8 37790@@ -1347,7 +1348,7 @@ static int hvcs_write(struct tty_struct *tty,
16454cff
MT
37791 * the middle of a write operation? This is a crummy place to do this
37792 * but we want to keep it all in the spinlock.
37793 */
37794- if (hvcsd->open_count <= 0) {
37795+ if (local_read(&hvcsd->open_count) <= 0) {
37796 spin_unlock_irqrestore(&hvcsd->lock, flags);
37797 return -ENODEV;
37798 }
c6e2a6c8 37799@@ -1421,7 +1422,7 @@ static int hvcs_write_room(struct tty_struct *tty)
16454cff
MT
37800 {
37801 struct hvcs_struct *hvcsd = tty->driver_data;
37802
37803- if (!hvcsd || hvcsd->open_count <= 0)
37804+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
37805 return 0;
37806
37807 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
fe2de317 37808diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
c6e2a6c8 37809index 4daf962..b4a2281 100644
fe2de317
MT
37810--- a/drivers/tty/ipwireless/tty.c
37811+++ b/drivers/tty/ipwireless/tty.c
66a7e928
MT
37812@@ -29,6 +29,7 @@
37813 #include <linux/tty_driver.h>
37814 #include <linux/tty_flip.h>
37815 #include <linux/uaccess.h>
37816+#include <asm/local.h>
37817
37818 #include "tty.h"
37819 #include "network.h"
37820@@ -51,7 +52,7 @@ struct ipw_tty {
37821 int tty_type;
37822 struct ipw_network *network;
37823 struct tty_struct *linux_tty;
37824- int open_count;
37825+ local_t open_count;
37826 unsigned int control_lines;
37827 struct mutex ipw_tty_mutex;
37828 int tx_bytes_queued;
c6e2a6c8 37829@@ -117,10 +118,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
66a7e928
MT
37830 mutex_unlock(&tty->ipw_tty_mutex);
37831 return -ENODEV;
37832 }
37833- if (tty->open_count == 0)
37834+ if (local_read(&tty->open_count) == 0)
37835 tty->tx_bytes_queued = 0;
37836
37837- tty->open_count++;
37838+ local_inc(&tty->open_count);
37839
37840 tty->linux_tty = linux_tty;
37841 linux_tty->driver_data = tty;
c6e2a6c8 37842@@ -136,9 +137,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
66a7e928
MT
37843
37844 static void do_ipw_close(struct ipw_tty *tty)
37845 {
37846- tty->open_count--;
37847-
37848- if (tty->open_count == 0) {
37849+ if (local_dec_return(&tty->open_count) == 0) {
37850 struct tty_struct *linux_tty = tty->linux_tty;
37851
37852 if (linux_tty != NULL) {
c6e2a6c8 37853@@ -159,7 +158,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
66a7e928
MT
37854 return;
37855
37856 mutex_lock(&tty->ipw_tty_mutex);
37857- if (tty->open_count == 0) {
37858+ if (local_read(&tty->open_count) == 0) {
37859 mutex_unlock(&tty->ipw_tty_mutex);
37860 return;
37861 }
c6e2a6c8 37862@@ -188,7 +187,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
66a7e928
MT
37863 return;
37864 }
37865
37866- if (!tty->open_count) {
37867+ if (!local_read(&tty->open_count)) {
37868 mutex_unlock(&tty->ipw_tty_mutex);
37869 return;
37870 }
c6e2a6c8 37871@@ -230,7 +229,7 @@ static int ipw_write(struct tty_struct *linux_tty,
66a7e928
MT
37872 return -ENODEV;
37873
37874 mutex_lock(&tty->ipw_tty_mutex);
37875- if (!tty->open_count) {
37876+ if (!local_read(&tty->open_count)) {
37877 mutex_unlock(&tty->ipw_tty_mutex);
37878 return -EINVAL;
37879 }
c6e2a6c8 37880@@ -270,7 +269,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
66a7e928
MT
37881 if (!tty)
37882 return -ENODEV;
37883
37884- if (!tty->open_count)
37885+ if (!local_read(&tty->open_count))
37886 return -EINVAL;
37887
37888 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
c6e2a6c8 37889@@ -312,7 +311,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
66a7e928
MT
37890 if (!tty)
37891 return 0;
37892
37893- if (!tty->open_count)
37894+ if (!local_read(&tty->open_count))
37895 return 0;
37896
37897 return tty->tx_bytes_queued;
c6e2a6c8 37898@@ -393,7 +392,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
66a7e928
MT
37899 if (!tty)
37900 return -ENODEV;
37901
37902- if (!tty->open_count)
37903+ if (!local_read(&tty->open_count))
37904 return -EINVAL;
37905
37906 return get_control_lines(tty);
c6e2a6c8 37907@@ -409,7 +408,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
66a7e928
MT
37908 if (!tty)
37909 return -ENODEV;
37910
37911- if (!tty->open_count)
37912+ if (!local_read(&tty->open_count))
37913 return -EINVAL;
37914
37915 return set_control_lines(tty, set, clear);
c6e2a6c8 37916@@ -423,7 +422,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
66a7e928
MT
37917 if (!tty)
37918 return -ENODEV;
37919
37920- if (!tty->open_count)
37921+ if (!local_read(&tty->open_count))
37922 return -EINVAL;
37923
37924 /* FIXME: Exactly how is the tty object locked here .. */
c6e2a6c8 37925@@ -572,7 +571,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
66a7e928
MT
37926 against a parallel ioctl etc */
37927 mutex_lock(&ttyj->ipw_tty_mutex);
37928 }
37929- while (ttyj->open_count)
37930+ while (local_read(&ttyj->open_count))
37931 do_ipw_close(ttyj);
37932 ipwireless_disassociate_network_ttys(network,
37933 ttyj->channel_idx);
fe2de317 37934diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
c6e2a6c8 37935index c43b683..0a88f1c 100644
fe2de317
MT
37936--- a/drivers/tty/n_gsm.c
37937+++ b/drivers/tty/n_gsm.c
4c928ab7 37938@@ -1629,7 +1629,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
6e9df6a3
MT
37939 kref_init(&dlci->ref);
37940 mutex_init(&dlci->mutex);
bc901d79
MT
37941 dlci->fifo = &dlci->_fifo;
37942- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
37943+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
37944 kfree(dlci);
37945 return NULL;
37946 }
fe2de317 37947diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
c6e2a6c8 37948index 94b6eda..15f7cec 100644
fe2de317
MT
37949--- a/drivers/tty/n_tty.c
37950+++ b/drivers/tty/n_tty.c
c6e2a6c8 37951@@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
bc901d79
MT
37952 {
37953 *ops = tty_ldisc_N_TTY;
37954 ops->owner = NULL;
37955- ops->refcount = ops->flags = 0;
37956+ atomic_set(&ops->refcount, 0);
37957+ ops->flags = 0;
37958 }
37959 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
fe2de317 37960diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
c6e2a6c8 37961index eeae7fa..177a743 100644
fe2de317
MT
37962--- a/drivers/tty/pty.c
37963+++ b/drivers/tty/pty.c
c6e2a6c8
MT
37964@@ -707,8 +707,10 @@ static void __init unix98_pty_init(void)
37965 panic("Couldn't register Unix98 pts driver");
bc901d79 37966
15a11c5b
MT
37967 /* Now create the /dev/ptmx special device */
37968+ pax_open_kernel();
37969 tty_default_fops(&ptmx_fops);
bc901d79 37970- ptmx_fops.open = ptmx_open;
15a11c5b
MT
37971+ *(void **)&ptmx_fops.open = ptmx_open;
37972+ pax_close_kernel();
37973
bc901d79
MT
37974 cdev_init(&ptmx_cdev, &ptmx_fops);
37975 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
fe2de317 37976diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
4c928ab7 37977index 2b42a01..32a2ed3 100644
fe2de317
MT
37978--- a/drivers/tty/serial/kgdboc.c
37979+++ b/drivers/tty/serial/kgdboc.c
4c928ab7 37980@@ -24,8 +24,9 @@
15a11c5b 37981 #define MAX_CONFIG_LEN 40
66a7e928 37982
15a11c5b
MT
37983 static struct kgdb_io kgdboc_io_ops;
37984+static struct kgdb_io kgdboc_io_ops_console;
66a7e928 37985
15a11c5b
MT
37986-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37987+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
37988 static int configured = -1;
66a7e928 37989
15a11c5b 37990 static char config[MAX_CONFIG_LEN];
4c928ab7 37991@@ -148,6 +149,8 @@ static void cleanup_kgdboc(void)
15a11c5b
MT
37992 kgdboc_unregister_kbd();
37993 if (configured == 1)
37994 kgdb_unregister_io_module(&kgdboc_io_ops);
37995+ else if (configured == 2)
37996+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
66a7e928
MT
37997 }
37998
15a11c5b 37999 static int configure_kgdboc(void)
4c928ab7 38000@@ -157,13 +160,13 @@ static int configure_kgdboc(void)
15a11c5b
MT
38001 int err;
38002 char *cptr = config;
38003 struct console *cons;
38004+ int is_console = 0;
38005
38006 err = kgdboc_option_setup(config);
38007 if (err || !strlen(config) || isspace(config[0]))
38008 goto noconfig;
38009
38010 err = -ENODEV;
38011- kgdboc_io_ops.is_console = 0;
38012 kgdb_tty_driver = NULL;
38013
38014 kgdboc_use_kms = 0;
4c928ab7 38015@@ -184,7 +187,7 @@ static int configure_kgdboc(void)
15a11c5b
MT
38016 int idx;
38017 if (cons->device && cons->device(cons, &idx) == p &&
38018 idx == tty_line) {
38019- kgdboc_io_ops.is_console = 1;
38020+ is_console = 1;
38021 break;
38022 }
38023 cons = cons->next;
4c928ab7 38024@@ -194,12 +197,16 @@ static int configure_kgdboc(void)
15a11c5b
MT
38025 kgdb_tty_line = tty_line;
38026
38027 do_register:
38028- err = kgdb_register_io_module(&kgdboc_io_ops);
38029+ if (is_console) {
38030+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
38031+ configured = 2;
38032+ } else {
38033+ err = kgdb_register_io_module(&kgdboc_io_ops);
38034+ configured = 1;
38035+ }
38036 if (err)
38037 goto noconfig;
66a7e928 38038
15a11c5b
MT
38039- configured = 1;
38040-
66a7e928 38041 return 0;
66a7e928 38042
15a11c5b 38043 noconfig:
4c928ab7 38044@@ -213,7 +220,7 @@ noconfig:
15a11c5b
MT
38045 static int __init init_kgdboc(void)
38046 {
38047 /* Already configured? */
38048- if (configured == 1)
38049+ if (configured >= 1)
38050 return 0;
16454cff 38051
15a11c5b 38052 return configure_kgdboc();
4c928ab7 38053@@ -262,7 +269,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
15a11c5b
MT
38054 if (config[len - 1] == '\n')
38055 config[len - 1] = '\0';
66a7e928 38056
15a11c5b
MT
38057- if (configured == 1)
38058+ if (configured >= 1)
38059 cleanup_kgdboc();
66a7e928 38060
15a11c5b 38061 /* Go and configure with the new params. */
4c928ab7 38062@@ -302,6 +309,15 @@ static struct kgdb_io kgdboc_io_ops = {
15a11c5b 38063 .post_exception = kgdboc_post_exp_handler,
66a7e928 38064 };
66a7e928 38065
15a11c5b
MT
38066+static struct kgdb_io kgdboc_io_ops_console = {
38067+ .name = "kgdboc",
38068+ .read_char = kgdboc_get_char,
38069+ .write_char = kgdboc_put_char,
38070+ .pre_exception = kgdboc_pre_exp_handler,
38071+ .post_exception = kgdboc_post_exp_handler,
38072+ .is_console = 1
38073+};
38074+
38075 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
38076 /* This is only available if kgdboc is a built in for early debugging */
38077 static int __init kgdboc_early_init(char *opt)
5e856224 38078diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
c6e2a6c8 38079index 05728894..b9d44c6 100644
5e856224
MT
38080--- a/drivers/tty/sysrq.c
38081+++ b/drivers/tty/sysrq.c
c6e2a6c8 38082@@ -865,7 +865,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
5e856224
MT
38083 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
38084 size_t count, loff_t *ppos)
38085 {
38086- if (count) {
38087+ if (count && capable(CAP_SYS_ADMIN)) {
38088 char c;
38089
38090 if (get_user(c, buf))
fe2de317 38091diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
c6e2a6c8 38092index d939bd7..33d92cd 100644
fe2de317
MT
38093--- a/drivers/tty/tty_io.c
38094+++ b/drivers/tty/tty_io.c
c6e2a6c8 38095@@ -3278,7 +3278,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
66a7e928 38096
15a11c5b 38097 void tty_default_fops(struct file_operations *fops)
bc901d79 38098 {
15a11c5b
MT
38099- *fops = tty_fops;
38100+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
bc901d79 38101 }
bc901d79 38102
bc901d79 38103 /*
fe2de317 38104diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
5e856224 38105index 24b95db..9c078d0 100644
fe2de317
MT
38106--- a/drivers/tty/tty_ldisc.c
38107+++ b/drivers/tty/tty_ldisc.c
5e856224 38108@@ -57,7 +57,7 @@ static void put_ldisc(struct tty_ldisc *ld)
bc901d79
MT
38109 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
38110 struct tty_ldisc_ops *ldo = ld->ops;
38111
38112- ldo->refcount--;
38113+ atomic_dec(&ldo->refcount);
38114 module_put(ldo->owner);
38115 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38116
5e856224 38117@@ -92,7 +92,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
bc901d79
MT
38118 spin_lock_irqsave(&tty_ldisc_lock, flags);
38119 tty_ldiscs[disc] = new_ldisc;
38120 new_ldisc->num = disc;
38121- new_ldisc->refcount = 0;
38122+ atomic_set(&new_ldisc->refcount, 0);
38123 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38124
38125 return ret;
5e856224 38126@@ -120,7 +120,7 @@ int tty_unregister_ldisc(int disc)
bc901d79
MT
38127 return -EINVAL;
38128
38129 spin_lock_irqsave(&tty_ldisc_lock, flags);
38130- if (tty_ldiscs[disc]->refcount)
38131+ if (atomic_read(&tty_ldiscs[disc]->refcount))
38132 ret = -EBUSY;
38133 else
38134 tty_ldiscs[disc] = NULL;
5e856224 38135@@ -141,7 +141,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
bc901d79
MT
38136 if (ldops) {
38137 ret = ERR_PTR(-EAGAIN);
38138 if (try_module_get(ldops->owner)) {
38139- ldops->refcount++;
38140+ atomic_inc(&ldops->refcount);
38141 ret = ldops;
38142 }
38143 }
5e856224 38144@@ -154,7 +154,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
bc901d79
MT
38145 unsigned long flags;
38146
38147 spin_lock_irqsave(&tty_ldisc_lock, flags);
38148- ldops->refcount--;
38149+ atomic_dec(&ldops->refcount);
38150 module_put(ldops->owner);
38151 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
38152 }
fe2de317 38153diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
c6e2a6c8 38154index 3b0c4e3..f98a992 100644
fe2de317
MT
38155--- a/drivers/tty/vt/keyboard.c
38156+++ b/drivers/tty/vt/keyboard.c
c6e2a6c8 38157@@ -663,6 +663,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
66a7e928 38158 kbd->kbdmode == VC_OFF) &&
bc901d79
MT
38159 value != KVAL(K_SAK))
38160 return; /* SAK is allowed even in raw mode */
38161+
38162+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
38163+ {
38164+ void *func = fn_handler[value];
38165+ if (func == fn_show_state || func == fn_show_ptregs ||
38166+ func == fn_show_mem)
38167+ return;
38168+ }
38169+#endif
38170+
38171 fn_handler[value](vc);
38172 }
38173
c6e2a6c8 38174@@ -1812,9 +1822,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
bc901d79
MT
38175 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
38176 return -EFAULT;
38177
38178- if (!capable(CAP_SYS_TTY_CONFIG))
38179- perm = 0;
38180-
38181 switch (cmd) {
38182 case KDGKBENT:
c6e2a6c8
MT
38183 /* Ensure another thread doesn't free it under us */
38184@@ -1829,6 +1836,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
38185 spin_unlock_irqrestore(&kbd_event_lock, flags);
bc901d79
MT
38186 return put_user(val, &user_kbe->kb_value);
38187 case KDSKBENT:
38188+ if (!capable(CAP_SYS_TTY_CONFIG))
38189+ perm = 0;
38190+
38191 if (!perm)
38192 return -EPERM;
38193 if (!i && v == K_NOSUCHMAP) {
c6e2a6c8 38194@@ -1919,9 +1929,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
bc901d79
MT
38195 int i, j, k;
38196 int ret;
38197
38198- if (!capable(CAP_SYS_TTY_CONFIG))
38199- perm = 0;
38200-
38201 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
38202 if (!kbs) {
38203 ret = -ENOMEM;
c6e2a6c8 38204@@ -1955,6 +1962,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
bc901d79
MT
38205 kfree(kbs);
38206 return ((p && *p) ? -EOVERFLOW : 0);
38207 case KDSKBSENT:
38208+ if (!capable(CAP_SYS_TTY_CONFIG))
38209+ perm = 0;
38210+
38211 if (!perm) {
38212 ret = -EPERM;
38213 goto reterr;
fe2de317 38214diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
4c928ab7 38215index a783d53..cb30d94 100644
fe2de317
MT
38216--- a/drivers/uio/uio.c
38217+++ b/drivers/uio/uio.c
c52201e0
MT
38218@@ -25,6 +25,7 @@
38219 #include <linux/kobject.h>
38220 #include <linux/cdev.h>
38221 #include <linux/uio_driver.h>
38222+#include <asm/local.h>
38223
38224 #define UIO_MAX_DEVICES (1U << MINORBITS)
38225
8308f9c9
MT
38226@@ -32,10 +33,10 @@ struct uio_device {
38227 struct module *owner;
38228 struct device *dev;
38229 int minor;
38230- atomic_t event;
38231+ atomic_unchecked_t event;
c52201e0
MT
38232 struct fasync_struct *async_queue;
38233 wait_queue_head_t wait;
38234- int vma_count;
38235+ local_t vma_count;
38236 struct uio_info *info;
38237 struct kobject *map_dir;
38238 struct kobject *portio_dir;
fe2de317 38239@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
8308f9c9
MT
38240 struct device_attribute *attr, char *buf)
38241 {
38242 struct uio_device *idev = dev_get_drvdata(dev);
38243- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
38244+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
38245 }
38246
38247 static struct device_attribute uio_class_attributes[] = {
fe2de317 38248@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
8308f9c9
MT
38249 {
38250 struct uio_device *idev = info->uio_dev;
38251
38252- atomic_inc(&idev->event);
38253+ atomic_inc_unchecked(&idev->event);
38254 wake_up_interruptible(&idev->wait);
38255 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38256 }
fe2de317 38257@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
8308f9c9
MT
38258 }
38259
38260 listener->dev = idev;
38261- listener->event_count = atomic_read(&idev->event);
38262+ listener->event_count = atomic_read_unchecked(&idev->event);
38263 filep->private_data = listener;
38264
38265 if (idev->info->open) {
fe2de317 38266@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
8308f9c9
MT
38267 return -EIO;
38268
38269 poll_wait(filep, &idev->wait, wait);
38270- if (listener->event_count != atomic_read(&idev->event))
38271+ if (listener->event_count != atomic_read_unchecked(&idev->event))
38272 return POLLIN | POLLRDNORM;
38273 return 0;
38274 }
fe2de317 38275@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
8308f9c9
MT
38276 do {
38277 set_current_state(TASK_INTERRUPTIBLE);
38278
38279- event_count = atomic_read(&idev->event);
38280+ event_count = atomic_read_unchecked(&idev->event);
38281 if (event_count != listener->event_count) {
38282 if (copy_to_user(buf, &event_count, count))
38283 retval = -EFAULT;
fe2de317 38284@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
c52201e0
MT
38285 static void uio_vma_open(struct vm_area_struct *vma)
38286 {
38287 struct uio_device *idev = vma->vm_private_data;
38288- idev->vma_count++;
38289+ local_inc(&idev->vma_count);
38290 }
38291
38292 static void uio_vma_close(struct vm_area_struct *vma)
38293 {
38294 struct uio_device *idev = vma->vm_private_data;
38295- idev->vma_count--;
38296+ local_dec(&idev->vma_count);
38297 }
38298
38299 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4c928ab7 38300@@ -821,7 +822,7 @@ int __uio_register_device(struct module *owner,
8308f9c9
MT
38301 idev->owner = owner;
38302 idev->info = info;
38303 init_waitqueue_head(&idev->wait);
38304- atomic_set(&idev->event, 0);
38305+ atomic_set_unchecked(&idev->event, 0);
38306
38307 ret = uio_get_minor(idev);
38308 if (ret)
fe2de317 38309diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
5e856224 38310index 98b89fe..aff824e 100644
fe2de317
MT
38311--- a/drivers/usb/atm/cxacru.c
38312+++ b/drivers/usb/atm/cxacru.c
38313@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
6892158b
MT
38314 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
38315 if (ret < 2)
38316 return -EINVAL;
38317- if (index < 0 || index > 0x7f)
38318+ if (index > 0x7f)
38319 return -EINVAL;
38320 pos += tmp;
38321
fe2de317
MT
38322diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
38323index d3448ca..d2864ca 100644
38324--- a/drivers/usb/atm/usbatm.c
38325+++ b/drivers/usb/atm/usbatm.c
38326@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
38327 if (printk_ratelimit())
38328 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38329 __func__, vpi, vci);
38330- atomic_inc(&vcc->stats->rx_err);
38331+ atomic_inc_unchecked(&vcc->stats->rx_err);
38332 return;
38333 }
38334
fe2de317 38335@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
38336 if (length > ATM_MAX_AAL5_PDU) {
38337 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38338 __func__, length, vcc);
38339- atomic_inc(&vcc->stats->rx_err);
38340+ atomic_inc_unchecked(&vcc->stats->rx_err);
38341 goto out;
38342 }
38343
fe2de317 38344@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
38345 if (sarb->len < pdu_length) {
38346 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38347 __func__, pdu_length, sarb->len, vcc);
38348- atomic_inc(&vcc->stats->rx_err);
38349+ atomic_inc_unchecked(&vcc->stats->rx_err);
38350 goto out;
38351 }
38352
38353 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38354 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38355 __func__, vcc);
38356- atomic_inc(&vcc->stats->rx_err);
38357+ atomic_inc_unchecked(&vcc->stats->rx_err);
38358 goto out;
38359 }
38360
fe2de317 38361@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
38362 if (printk_ratelimit())
38363 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38364 __func__, length);
38365- atomic_inc(&vcc->stats->rx_drop);
38366+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38367 goto out;
38368 }
38369
fe2de317 38370@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
58c5fc13
MT
38371
38372 vcc->push(vcc, skb);
38373
38374- atomic_inc(&vcc->stats->rx);
38375+ atomic_inc_unchecked(&vcc->stats->rx);
38376 out:
38377 skb_trim(sarb, 0);
38378 }
fe2de317 38379@@ -615,7 +615,7 @@ static void usbatm_tx_process(unsigned long data)
58c5fc13
MT
38380 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38381
38382 usbatm_pop(vcc, skb);
38383- atomic_inc(&vcc->stats->tx);
38384+ atomic_inc_unchecked(&vcc->stats->tx);
38385
38386 skb = skb_dequeue(&instance->sndqueue);
38387 }
fe2de317 38388@@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
58c5fc13
MT
38389 if (!left--)
38390 return sprintf(page,
38391 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38392- atomic_read(&atm_dev->stats.aal5.tx),
38393- atomic_read(&atm_dev->stats.aal5.tx_err),
38394- atomic_read(&atm_dev->stats.aal5.rx),
38395- atomic_read(&atm_dev->stats.aal5.rx_err),
38396- atomic_read(&atm_dev->stats.aal5.rx_drop));
38397+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38398+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38399+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38400+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38401+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38402
38403 if (!left--) {
38404 if (instance->disconnected)
fe2de317 38405diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
4c928ab7 38406index d956965..4179a77 100644
fe2de317
MT
38407--- a/drivers/usb/core/devices.c
38408+++ b/drivers/usb/core/devices.c
15a11c5b 38409@@ -126,7 +126,7 @@ static const char format_endpt[] =
8308f9c9
MT
38410 * time it gets called.
38411 */
38412 static struct device_connect_event {
38413- atomic_t count;
38414+ atomic_unchecked_t count;
38415 wait_queue_head_t wait;
38416 } device_event = {
38417 .count = ATOMIC_INIT(1),
fe2de317 38418@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
8308f9c9
MT
38419
38420 void usbfs_conn_disc_event(void)
38421 {
38422- atomic_add(2, &device_event.count);
38423+ atomic_add_unchecked(2, &device_event.count);
38424 wake_up(&device_event.wait);
38425 }
38426
fe2de317 38427@@ -648,7 +648,7 @@ static unsigned int usb_device_poll(struct file *file,
8308f9c9
MT
38428
38429 poll_wait(file, &device_event.wait, wait);
38430
38431- event_count = atomic_read(&device_event.count);
38432+ event_count = atomic_read_unchecked(&device_event.count);
38433 if (file->f_version != event_count) {
38434 file->f_version = event_count;
38435 return POLLIN | POLLRDNORM;
fe2de317
MT
38436diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
38437index 1fc8f12..20647c1 100644
38438--- a/drivers/usb/early/ehci-dbgp.c
38439+++ b/drivers/usb/early/ehci-dbgp.c
38440@@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
6892158b
MT
38441
38442 #ifdef CONFIG_KGDB
15a11c5b
MT
38443 static struct kgdb_io kgdbdbgp_io_ops;
38444-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
38445+static struct kgdb_io kgdbdbgp_io_ops_console;
38446+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
6892158b 38447 #else
16454cff 38448 #define dbgp_kgdb_mode (0)
15a11c5b 38449 #endif
fe2de317 38450@@ -1035,6 +1036,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
16454cff 38451 .write_char = kgdbdbgp_write_char,
66a7e928
MT
38452 };
38453
15a11c5b
MT
38454+static struct kgdb_io kgdbdbgp_io_ops_console = {
38455+ .name = "kgdbdbgp",
38456+ .read_char = kgdbdbgp_read_char,
38457+ .write_char = kgdbdbgp_write_char,
38458+ .is_console = 1
38459+};
38460+
38461 static int kgdbdbgp_wait_time;
66a7e928 38462
15a11c5b 38463 static int __init kgdbdbgp_parse_config(char *str)
fe2de317 38464@@ -1050,8 +1058,10 @@ static int __init kgdbdbgp_parse_config(char *str)
15a11c5b
MT
38465 ptr++;
38466 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
38467 }
38468- kgdb_register_io_module(&kgdbdbgp_io_ops);
38469- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
38470+ if (early_dbgp_console.index != -1)
38471+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
38472+ else
38473+ kgdb_register_io_module(&kgdbdbgp_io_ops);
66a7e928 38474
66a7e928
MT
38475 return 0;
38476 }
fe2de317
MT
38477diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
38478index d6bea3e..60b250e 100644
38479--- a/drivers/usb/wusbcore/wa-hc.h
38480+++ b/drivers/usb/wusbcore/wa-hc.h
8308f9c9
MT
38481@@ -192,7 +192,7 @@ struct wahc {
38482 struct list_head xfer_delayed_list;
38483 spinlock_t xfer_list_lock;
38484 struct work_struct xfer_work;
38485- atomic_t xfer_id_count;
38486+ atomic_unchecked_t xfer_id_count;
38487 };
38488
38489
fe2de317 38490@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
8308f9c9
MT
38491 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38492 spin_lock_init(&wa->xfer_list_lock);
38493 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38494- atomic_set(&wa->xfer_id_count, 1);
38495+ atomic_set_unchecked(&wa->xfer_id_count, 1);
38496 }
38497
38498 /**
fe2de317 38499diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
4c928ab7 38500index 57c01ab..8a05959 100644
fe2de317
MT
38501--- a/drivers/usb/wusbcore/wa-xfer.c
38502+++ b/drivers/usb/wusbcore/wa-xfer.c
4c928ab7 38503@@ -296,7 +296,7 @@ out:
8308f9c9
MT
38504 */
38505 static void wa_xfer_id_init(struct wa_xfer *xfer)
38506 {
38507- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38508+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38509 }
38510
38511 /*
fe2de317 38512diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
c6e2a6c8 38513index 51e4c1e..9d87e2a 100644
fe2de317
MT
38514--- a/drivers/vhost/vhost.c
38515+++ b/drivers/vhost/vhost.c
c6e2a6c8 38516@@ -632,7 +632,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
6e9df6a3 38517 return 0;
57199397
MT
38518 }
38519
38520-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
38521+static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
38522 {
38523 struct file *eventfp, *filep = NULL,
38524 *pollstart = NULL, *pollstop = NULL;
fe2de317
MT
38525diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
38526index b0b2ac3..89a4399 100644
38527--- a/drivers/video/aty/aty128fb.c
38528+++ b/drivers/video/aty/aty128fb.c
6e9df6a3
MT
38529@@ -148,7 +148,7 @@ enum {
38530 };
38531
38532 /* Must match above enum */
38533-static const char *r128_family[] __devinitdata = {
38534+static const char *r128_family[] __devinitconst = {
38535 "AGP",
38536 "PCI",
38537 "PRO AGP",
fe2de317
MT
38538diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
38539index 5c3960d..15cf8fc 100644
38540--- a/drivers/video/fbcmap.c
38541+++ b/drivers/video/fbcmap.c
38542@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
df50ba0c
MT
38543 rc = -ENODEV;
38544 goto out;
38545 }
38546- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
38547- !info->fbops->fb_setcmap)) {
38548+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
38549 rc = -EINVAL;
38550 goto out1;
38551 }
fe2de317 38552diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
5e856224 38553index c6ce416..3b9b642 100644
fe2de317
MT
38554--- a/drivers/video/fbmem.c
38555+++ b/drivers/video/fbmem.c
38556@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
58c5fc13
MT
38557 image->dx += image->width + 8;
38558 }
38559 } else if (rotate == FB_ROTATE_UD) {
38560- for (x = 0; x < num && image->dx >= 0; x++) {
38561+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
38562 info->fbops->fb_imageblit(info, image);
38563 image->dx -= image->width + 8;
38564 }
fe2de317 38565@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
58c5fc13
MT
38566 image->dy += image->height + 8;
38567 }
38568 } else if (rotate == FB_ROTATE_CCW) {
38569- for (x = 0; x < num && image->dy >= 0; x++) {
38570+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
38571 info->fbops->fb_imageblit(info, image);
38572 image->dy -= image->height + 8;
38573 }
5e856224 38574@@ -1157,7 +1157,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
58c5fc13
MT
38575 return -EFAULT;
38576 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
38577 return -EINVAL;
38578- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
38579+ if (con2fb.framebuffer >= FB_MAX)
38580 return -EINVAL;
38581 if (!registered_fb[con2fb.framebuffer])
38582 request_module("fb%d", con2fb.framebuffer);
fe2de317
MT
38583diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
38584index 5a5d092..265c5ed 100644
38585--- a/drivers/video/geode/gx1fb_core.c
38586+++ b/drivers/video/geode/gx1fb_core.c
6e9df6a3
MT
38587@@ -29,7 +29,7 @@ static int crt_option = 1;
38588 static char panel_option[32] = "";
38589
38590 /* Modes relevant to the GX1 (taken from modedb.c) */
38591-static const struct fb_videomode __devinitdata gx1_modedb[] = {
38592+static const struct fb_videomode __devinitconst gx1_modedb[] = {
38593 /* 640x480-60 VESA */
38594 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
38595 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
fe2de317 38596diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
4c928ab7 38597index 0fad23f..0e9afa4 100644
fe2de317
MT
38598--- a/drivers/video/gxt4500.c
38599+++ b/drivers/video/gxt4500.c
6e9df6a3
MT
38600@@ -156,7 +156,7 @@ struct gxt4500_par {
38601 static char *mode_option;
38602
38603 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
38604-static const struct fb_videomode defaultmode __devinitdata = {
38605+static const struct fb_videomode defaultmode __devinitconst = {
38606 .refresh = 60,
38607 .xres = 1280,
38608 .yres = 1024,
fe2de317 38609@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
6e9df6a3
MT
38610 return 0;
38611 }
38612
38613-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
38614+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
38615 .id = "IBM GXT4500P",
38616 .type = FB_TYPE_PACKED_PIXELS,
38617 .visual = FB_VISUAL_PSEUDOCOLOR,
fe2de317
MT
38618diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
38619index 7672d2e..b56437f 100644
38620--- a/drivers/video/i810/i810_accel.c
38621+++ b/drivers/video/i810/i810_accel.c
38622@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
58c5fc13
MT
38623 }
38624 }
38625 printk("ringbuffer lockup!!!\n");
38626+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
38627 i810_report_error(mmio);
38628 par->dev_flags |= LOCKUP;
38629 info->pixmap.scan_align = 1;
fe2de317 38630diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
5e856224 38631index b83f361..2b05a91 100644
fe2de317
MT
38632--- a/drivers/video/i810/i810_main.c
38633+++ b/drivers/video/i810/i810_main.c
38634@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
6e9df6a3
MT
38635 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
38636
38637 /* PCI */
38638-static const char *i810_pci_list[] __devinitdata = {
38639+static const char *i810_pci_list[] __devinitconst = {
38640 "Intel(R) 810 Framebuffer Device" ,
38641 "Intel(R) 810-DC100 Framebuffer Device" ,
38642 "Intel(R) 810E Framebuffer Device" ,
fe2de317
MT
38643diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c
38644index de36693..3c63fc2 100644
38645--- a/drivers/video/jz4740_fb.c
38646+++ b/drivers/video/jz4740_fb.c
6e9df6a3
MT
38647@@ -136,7 +136,7 @@ struct jzfb {
38648 uint32_t pseudo_palette[16];
38649 };
38650
38651-static const struct fb_fix_screeninfo jzfb_fix __devinitdata = {
38652+static const struct fb_fix_screeninfo jzfb_fix __devinitconst = {
38653 .id = "JZ4740 FB",
38654 .type = FB_TYPE_PACKED_PIXELS,
38655 .visual = FB_VISUAL_TRUECOLOR,
fe2de317
MT
38656diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
38657index 3c14e43..eafa544 100644
38658--- a/drivers/video/logo/logo_linux_clut224.ppm
38659+++ b/drivers/video/logo/logo_linux_clut224.ppm
15a11c5b
MT
38660@@ -1,1604 +1,1123 @@
38661 P3
38662-# Standard 224-color Linux logo
38663 80 80
38664 255
38665- 0 0 0 0 0 0 0 0 0 0 0 0
38666- 0 0 0 0 0 0 0 0 0 0 0 0
38667- 0 0 0 0 0 0 0 0 0 0 0 0
38668- 0 0 0 0 0 0 0 0 0 0 0 0
38669- 0 0 0 0 0 0 0 0 0 0 0 0
38670- 0 0 0 0 0 0 0 0 0 0 0 0
38671- 0 0 0 0 0 0 0 0 0 0 0 0
38672- 0 0 0 0 0 0 0 0 0 0 0 0
38673- 0 0 0 0 0 0 0 0 0 0 0 0
38674- 6 6 6 6 6 6 10 10 10 10 10 10
38675- 10 10 10 6 6 6 6 6 6 6 6 6
38676- 0 0 0 0 0 0 0 0 0 0 0 0
38677- 0 0 0 0 0 0 0 0 0 0 0 0
38678- 0 0 0 0 0 0 0 0 0 0 0 0
38679- 0 0 0 0 0 0 0 0 0 0 0 0
38680- 0 0 0 0 0 0 0 0 0 0 0 0
38681- 0 0 0 0 0 0 0 0 0 0 0 0
38682- 0 0 0 0 0 0 0 0 0 0 0 0
38683- 0 0 0 0 0 0 0 0 0 0 0 0
38684- 0 0 0 0 0 0 0 0 0 0 0 0
38685- 0 0 0 0 0 0 0 0 0 0 0 0
38686- 0 0 0 0 0 0 0 0 0 0 0 0
38687- 0 0 0 0 0 0 0 0 0 0 0 0
38688- 0 0 0 0 0 0 0 0 0 0 0 0
38689- 0 0 0 0 0 0 0 0 0 0 0 0
38690- 0 0 0 0 0 0 0 0 0 0 0 0
38691- 0 0 0 0 0 0 0 0 0 0 0 0
38692- 0 0 0 0 0 0 0 0 0 0 0 0
38693- 0 0 0 6 6 6 10 10 10 14 14 14
38694- 22 22 22 26 26 26 30 30 30 34 34 34
38695- 30 30 30 30 30 30 26 26 26 18 18 18
38696- 14 14 14 10 10 10 6 6 6 0 0 0
38697- 0 0 0 0 0 0 0 0 0 0 0 0
38698- 0 0 0 0 0 0 0 0 0 0 0 0
38699- 0 0 0 0 0 0 0 0 0 0 0 0
38700- 0 0 0 0 0 0 0 0 0 0 0 0
38701- 0 0 0 0 0 0 0 0 0 0 0 0
38702- 0 0 0 0 0 0 0 0 0 0 0 0
38703- 0 0 0 0 0 0 0 0 0 0 0 0
38704- 0 0 0 0 0 0 0 0 0 0 0 0
38705- 0 0 0 0 0 0 0 0 0 0 0 0
38706- 0 0 0 0 0 1 0 0 1 0 0 0
38707- 0 0 0 0 0 0 0 0 0 0 0 0
38708- 0 0 0 0 0 0 0 0 0 0 0 0
38709- 0 0 0 0 0 0 0 0 0 0 0 0
38710- 0 0 0 0 0 0 0 0 0 0 0 0
38711- 0 0 0 0 0 0 0 0 0 0 0 0
38712- 0 0 0 0 0 0 0 0 0 0 0 0
38713- 6 6 6 14 14 14 26 26 26 42 42 42
38714- 54 54 54 66 66 66 78 78 78 78 78 78
38715- 78 78 78 74 74 74 66 66 66 54 54 54
38716- 42 42 42 26 26 26 18 18 18 10 10 10
38717- 6 6 6 0 0 0 0 0 0 0 0 0
38718- 0 0 0 0 0 0 0 0 0 0 0 0
38719- 0 0 0 0 0 0 0 0 0 0 0 0
38720- 0 0 0 0 0 0 0 0 0 0 0 0
38721- 0 0 0 0 0 0 0 0 0 0 0 0
38722- 0 0 0 0 0 0 0 0 0 0 0 0
38723- 0 0 0 0 0 0 0 0 0 0 0 0
38724- 0 0 0 0 0 0 0 0 0 0 0 0
38725- 0 0 0 0 0 0 0 0 0 0 0 0
38726- 0 0 1 0 0 0 0 0 0 0 0 0
38727- 0 0 0 0 0 0 0 0 0 0 0 0
38728- 0 0 0 0 0 0 0 0 0 0 0 0
38729- 0 0 0 0 0 0 0 0 0 0 0 0
38730- 0 0 0 0 0 0 0 0 0 0 0 0
38731- 0 0 0 0 0 0 0 0 0 0 0 0
38732- 0 0 0 0 0 0 0 0 0 10 10 10
38733- 22 22 22 42 42 42 66 66 66 86 86 86
38734- 66 66 66 38 38 38 38 38 38 22 22 22
38735- 26 26 26 34 34 34 54 54 54 66 66 66
38736- 86 86 86 70 70 70 46 46 46 26 26 26
38737- 14 14 14 6 6 6 0 0 0 0 0 0
38738- 0 0 0 0 0 0 0 0 0 0 0 0
38739- 0 0 0 0 0 0 0 0 0 0 0 0
38740- 0 0 0 0 0 0 0 0 0 0 0 0
38741- 0 0 0 0 0 0 0 0 0 0 0 0
38742- 0 0 0 0 0 0 0 0 0 0 0 0
38743- 0 0 0 0 0 0 0 0 0 0 0 0
38744- 0 0 0 0 0 0 0 0 0 0 0 0
38745- 0 0 0 0 0 0 0 0 0 0 0 0
38746- 0 0 1 0 0 1 0 0 1 0 0 0
38747- 0 0 0 0 0 0 0 0 0 0 0 0
38748- 0 0 0 0 0 0 0 0 0 0 0 0
38749- 0 0 0 0 0 0 0 0 0 0 0 0
38750- 0 0 0 0 0 0 0 0 0 0 0 0
38751- 0 0 0 0 0 0 0 0 0 0 0 0
38752- 0 0 0 0 0 0 10 10 10 26 26 26
38753- 50 50 50 82 82 82 58 58 58 6 6 6
38754- 2 2 6 2 2 6 2 2 6 2 2 6
38755- 2 2 6 2 2 6 2 2 6 2 2 6
38756- 6 6 6 54 54 54 86 86 86 66 66 66
38757- 38 38 38 18 18 18 6 6 6 0 0 0
38758- 0 0 0 0 0 0 0 0 0 0 0 0
38759- 0 0 0 0 0 0 0 0 0 0 0 0
38760- 0 0 0 0 0 0 0 0 0 0 0 0
38761- 0 0 0 0 0 0 0 0 0 0 0 0
38762- 0 0 0 0 0 0 0 0 0 0 0 0
38763- 0 0 0 0 0 0 0 0 0 0 0 0
38764- 0 0 0 0 0 0 0 0 0 0 0 0
38765- 0 0 0 0 0 0 0 0 0 0 0 0
38766- 0 0 0 0 0 0 0 0 0 0 0 0
38767- 0 0 0 0 0 0 0 0 0 0 0 0
38768- 0 0 0 0 0 0 0 0 0 0 0 0
38769- 0 0 0 0 0 0 0 0 0 0 0 0
38770- 0 0 0 0 0 0 0 0 0 0 0 0
38771- 0 0 0 0 0 0 0 0 0 0 0 0
38772- 0 0 0 6 6 6 22 22 22 50 50 50
38773- 78 78 78 34 34 34 2 2 6 2 2 6
38774- 2 2 6 2 2 6 2 2 6 2 2 6
38775- 2 2 6 2 2 6 2 2 6 2 2 6
38776- 2 2 6 2 2 6 6 6 6 70 70 70
38777- 78 78 78 46 46 46 22 22 22 6 6 6
38778- 0 0 0 0 0 0 0 0 0 0 0 0
38779- 0 0 0 0 0 0 0 0 0 0 0 0
38780- 0 0 0 0 0 0 0 0 0 0 0 0
38781- 0 0 0 0 0 0 0 0 0 0 0 0
38782- 0 0 0 0 0 0 0 0 0 0 0 0
38783- 0 0 0 0 0 0 0 0 0 0 0 0
38784- 0 0 0 0 0 0 0 0 0 0 0 0
38785- 0 0 0 0 0 0 0 0 0 0 0 0
38786- 0 0 1 0 0 1 0 0 1 0 0 0
38787- 0 0 0 0 0 0 0 0 0 0 0 0
38788- 0 0 0 0 0 0 0 0 0 0 0 0
38789- 0 0 0 0 0 0 0 0 0 0 0 0
38790- 0 0 0 0 0 0 0 0 0 0 0 0
38791- 0 0 0 0 0 0 0 0 0 0 0 0
38792- 6 6 6 18 18 18 42 42 42 82 82 82
38793- 26 26 26 2 2 6 2 2 6 2 2 6
38794- 2 2 6 2 2 6 2 2 6 2 2 6
38795- 2 2 6 2 2 6 2 2 6 14 14 14
38796- 46 46 46 34 34 34 6 6 6 2 2 6
38797- 42 42 42 78 78 78 42 42 42 18 18 18
38798- 6 6 6 0 0 0 0 0 0 0 0 0
38799- 0 0 0 0 0 0 0 0 0 0 0 0
38800- 0 0 0 0 0 0 0 0 0 0 0 0
38801- 0 0 0 0 0 0 0 0 0 0 0 0
38802- 0 0 0 0 0 0 0 0 0 0 0 0
38803- 0 0 0 0 0 0 0 0 0 0 0 0
38804- 0 0 0 0 0 0 0 0 0 0 0 0
38805- 0 0 0 0 0 0 0 0 0 0 0 0
38806- 0 0 1 0 0 0 0 0 1 0 0 0
38807- 0 0 0 0 0 0 0 0 0 0 0 0
38808- 0 0 0 0 0 0 0 0 0 0 0 0
38809- 0 0 0 0 0 0 0 0 0 0 0 0
38810- 0 0 0 0 0 0 0 0 0 0 0 0
38811- 0 0 0 0 0 0 0 0 0 0 0 0
38812- 10 10 10 30 30 30 66 66 66 58 58 58
38813- 2 2 6 2 2 6 2 2 6 2 2 6
38814- 2 2 6 2 2 6 2 2 6 2 2 6
38815- 2 2 6 2 2 6 2 2 6 26 26 26
38816- 86 86 86 101 101 101 46 46 46 10 10 10
38817- 2 2 6 58 58 58 70 70 70 34 34 34
38818- 10 10 10 0 0 0 0 0 0 0 0 0
38819- 0 0 0 0 0 0 0 0 0 0 0 0
38820- 0 0 0 0 0 0 0 0 0 0 0 0
38821- 0 0 0 0 0 0 0 0 0 0 0 0
38822- 0 0 0 0 0 0 0 0 0 0 0 0
38823- 0 0 0 0 0 0 0 0 0 0 0 0
38824- 0 0 0 0 0 0 0 0 0 0 0 0
38825- 0 0 0 0 0 0 0 0 0 0 0 0
38826- 0 0 1 0 0 1 0 0 1 0 0 0
38827- 0 0 0 0 0 0 0 0 0 0 0 0
38828- 0 0 0 0 0 0 0 0 0 0 0 0
38829- 0 0 0 0 0 0 0 0 0 0 0 0
38830- 0 0 0 0 0 0 0 0 0 0 0 0
38831- 0 0 0 0 0 0 0 0 0 0 0 0
38832- 14 14 14 42 42 42 86 86 86 10 10 10
38833- 2 2 6 2 2 6 2 2 6 2 2 6
38834- 2 2 6 2 2 6 2 2 6 2 2 6
38835- 2 2 6 2 2 6 2 2 6 30 30 30
38836- 94 94 94 94 94 94 58 58 58 26 26 26
38837- 2 2 6 6 6 6 78 78 78 54 54 54
38838- 22 22 22 6 6 6 0 0 0 0 0 0
38839- 0 0 0 0 0 0 0 0 0 0 0 0
38840- 0 0 0 0 0 0 0 0 0 0 0 0
38841- 0 0 0 0 0 0 0 0 0 0 0 0
38842- 0 0 0 0 0 0 0 0 0 0 0 0
38843- 0 0 0 0 0 0 0 0 0 0 0 0
38844- 0 0 0 0 0 0 0 0 0 0 0 0
38845- 0 0 0 0 0 0 0 0 0 0 0 0
38846- 0 0 0 0 0 0 0 0 0 0 0 0
38847- 0 0 0 0 0 0 0 0 0 0 0 0
38848- 0 0 0 0 0 0 0 0 0 0 0 0
38849- 0 0 0 0 0 0 0 0 0 0 0 0
38850- 0 0 0 0 0 0 0 0 0 0 0 0
38851- 0 0 0 0 0 0 0 0 0 6 6 6
38852- 22 22 22 62 62 62 62 62 62 2 2 6
38853- 2 2 6 2 2 6 2 2 6 2 2 6
38854- 2 2 6 2 2 6 2 2 6 2 2 6
38855- 2 2 6 2 2 6 2 2 6 26 26 26
38856- 54 54 54 38 38 38 18 18 18 10 10 10
38857- 2 2 6 2 2 6 34 34 34 82 82 82
38858- 38 38 38 14 14 14 0 0 0 0 0 0
38859- 0 0 0 0 0 0 0 0 0 0 0 0
38860- 0 0 0 0 0 0 0 0 0 0 0 0
38861- 0 0 0 0 0 0 0 0 0 0 0 0
38862- 0 0 0 0 0 0 0 0 0 0 0 0
38863- 0 0 0 0 0 0 0 0 0 0 0 0
38864- 0 0 0 0 0 0 0 0 0 0 0 0
38865- 0 0 0 0 0 0 0 0 0 0 0 0
38866- 0 0 0 0 0 1 0 0 1 0 0 0
38867- 0 0 0 0 0 0 0 0 0 0 0 0
38868- 0 0 0 0 0 0 0 0 0 0 0 0
38869- 0 0 0 0 0 0 0 0 0 0 0 0
38870- 0 0 0 0 0 0 0 0 0 0 0 0
38871- 0 0 0 0 0 0 0 0 0 6 6 6
38872- 30 30 30 78 78 78 30 30 30 2 2 6
38873- 2 2 6 2 2 6 2 2 6 2 2 6
38874- 2 2 6 2 2 6 2 2 6 2 2 6
38875- 2 2 6 2 2 6 2 2 6 10 10 10
38876- 10 10 10 2 2 6 2 2 6 2 2 6
38877- 2 2 6 2 2 6 2 2 6 78 78 78
38878- 50 50 50 18 18 18 6 6 6 0 0 0
38879- 0 0 0 0 0 0 0 0 0 0 0 0
38880- 0 0 0 0 0 0 0 0 0 0 0 0
38881- 0 0 0 0 0 0 0 0 0 0 0 0
38882- 0 0 0 0 0 0 0 0 0 0 0 0
38883- 0 0 0 0 0 0 0 0 0 0 0 0
38884- 0 0 0 0 0 0 0 0 0 0 0 0
38885- 0 0 0 0 0 0 0 0 0 0 0 0
38886- 0 0 1 0 0 0 0 0 0 0 0 0
38887- 0 0 0 0 0 0 0 0 0 0 0 0
38888- 0 0 0 0 0 0 0 0 0 0 0 0
38889- 0 0 0 0 0 0 0 0 0 0 0 0
38890- 0 0 0 0 0 0 0 0 0 0 0 0
38891- 0 0 0 0 0 0 0 0 0 10 10 10
38892- 38 38 38 86 86 86 14 14 14 2 2 6
38893- 2 2 6 2 2 6 2 2 6 2 2 6
38894- 2 2 6 2 2 6 2 2 6 2 2 6
38895- 2 2 6 2 2 6 2 2 6 2 2 6
38896- 2 2 6 2 2 6 2 2 6 2 2 6
38897- 2 2 6 2 2 6 2 2 6 54 54 54
38898- 66 66 66 26 26 26 6 6 6 0 0 0
38899- 0 0 0 0 0 0 0 0 0 0 0 0
38900- 0 0 0 0 0 0 0 0 0 0 0 0
38901- 0 0 0 0 0 0 0 0 0 0 0 0
38902- 0 0 0 0 0 0 0 0 0 0 0 0
38903- 0 0 0 0 0 0 0 0 0 0 0 0
38904- 0 0 0 0 0 0 0 0 0 0 0 0
38905- 0 0 0 0 0 0 0 0 0 0 0 0
38906- 0 0 0 0 0 1 0 0 1 0 0 0
38907- 0 0 0 0 0 0 0 0 0 0 0 0
38908- 0 0 0 0 0 0 0 0 0 0 0 0
38909- 0 0 0 0 0 0 0 0 0 0 0 0
38910- 0 0 0 0 0 0 0 0 0 0 0 0
38911- 0 0 0 0 0 0 0 0 0 14 14 14
38912- 42 42 42 82 82 82 2 2 6 2 2 6
38913- 2 2 6 6 6 6 10 10 10 2 2 6
38914- 2 2 6 2 2 6 2 2 6 2 2 6
38915- 2 2 6 2 2 6 2 2 6 6 6 6
38916- 14 14 14 10 10 10 2 2 6 2 2 6
38917- 2 2 6 2 2 6 2 2 6 18 18 18
38918- 82 82 82 34 34 34 10 10 10 0 0 0
38919- 0 0 0 0 0 0 0 0 0 0 0 0
38920- 0 0 0 0 0 0 0 0 0 0 0 0
38921- 0 0 0 0 0 0 0 0 0 0 0 0
38922- 0 0 0 0 0 0 0 0 0 0 0 0
38923- 0 0 0 0 0 0 0 0 0 0 0 0
38924- 0 0 0 0 0 0 0 0 0 0 0 0
38925- 0 0 0 0 0 0 0 0 0 0 0 0
38926- 0 0 1 0 0 0 0 0 0 0 0 0
38927- 0 0 0 0 0 0 0 0 0 0 0 0
38928- 0 0 0 0 0 0 0 0 0 0 0 0
38929- 0 0 0 0 0 0 0 0 0 0 0 0
38930- 0 0 0 0 0 0 0 0 0 0 0 0
38931- 0 0 0 0 0 0 0 0 0 14 14 14
38932- 46 46 46 86 86 86 2 2 6 2 2 6
38933- 6 6 6 6 6 6 22 22 22 34 34 34
38934- 6 6 6 2 2 6 2 2 6 2 2 6
38935- 2 2 6 2 2 6 18 18 18 34 34 34
38936- 10 10 10 50 50 50 22 22 22 2 2 6
38937- 2 2 6 2 2 6 2 2 6 10 10 10
38938- 86 86 86 42 42 42 14 14 14 0 0 0
38939- 0 0 0 0 0 0 0 0 0 0 0 0
38940- 0 0 0 0 0 0 0 0 0 0 0 0
38941- 0 0 0 0 0 0 0 0 0 0 0 0
38942- 0 0 0 0 0 0 0 0 0 0 0 0
38943- 0 0 0 0 0 0 0 0 0 0 0 0
38944- 0 0 0 0 0 0 0 0 0 0 0 0
38945- 0 0 0 0 0 0 0 0 0 0 0 0
38946- 0 0 1 0 0 1 0 0 1 0 0 0
38947- 0 0 0 0 0 0 0 0 0 0 0 0
38948- 0 0 0 0 0 0 0 0 0 0 0 0
38949- 0 0 0 0 0 0 0 0 0 0 0 0
38950- 0 0 0 0 0 0 0 0 0 0 0 0
38951- 0 0 0 0 0 0 0 0 0 14 14 14
38952- 46 46 46 86 86 86 2 2 6 2 2 6
38953- 38 38 38 116 116 116 94 94 94 22 22 22
38954- 22 22 22 2 2 6 2 2 6 2 2 6
38955- 14 14 14 86 86 86 138 138 138 162 162 162
38956-154 154 154 38 38 38 26 26 26 6 6 6
38957- 2 2 6 2 2 6 2 2 6 2 2 6
38958- 86 86 86 46 46 46 14 14 14 0 0 0
38959- 0 0 0 0 0 0 0 0 0 0 0 0
38960- 0 0 0 0 0 0 0 0 0 0 0 0
38961- 0 0 0 0 0 0 0 0 0 0 0 0
38962- 0 0 0 0 0 0 0 0 0 0 0 0
38963- 0 0 0 0 0 0 0 0 0 0 0 0
38964- 0 0 0 0 0 0 0 0 0 0 0 0
38965- 0 0 0 0 0 0 0 0 0 0 0 0
38966- 0 0 0 0 0 0 0 0 0 0 0 0
38967- 0 0 0 0 0 0 0 0 0 0 0 0
38968- 0 0 0 0 0 0 0 0 0 0 0 0
38969- 0 0 0 0 0 0 0 0 0 0 0 0
38970- 0 0 0 0 0 0 0 0 0 0 0 0
38971- 0 0 0 0 0 0 0 0 0 14 14 14
38972- 46 46 46 86 86 86 2 2 6 14 14 14
38973-134 134 134 198 198 198 195 195 195 116 116 116
38974- 10 10 10 2 2 6 2 2 6 6 6 6
38975-101 98 89 187 187 187 210 210 210 218 218 218
38976-214 214 214 134 134 134 14 14 14 6 6 6
38977- 2 2 6 2 2 6 2 2 6 2 2 6
38978- 86 86 86 50 50 50 18 18 18 6 6 6
38979- 0 0 0 0 0 0 0 0 0 0 0 0
38980- 0 0 0 0 0 0 0 0 0 0 0 0
38981- 0 0 0 0 0 0 0 0 0 0 0 0
38982- 0 0 0 0 0 0 0 0 0 0 0 0
38983- 0 0 0 0 0 0 0 0 0 0 0 0
38984- 0 0 0 0 0 0 0 0 0 0 0 0
38985- 0 0 0 0 0 0 0 0 1 0 0 0
38986- 0 0 1 0 0 1 0 0 1 0 0 0
38987- 0 0 0 0 0 0 0 0 0 0 0 0
38988- 0 0 0 0 0 0 0 0 0 0 0 0
38989- 0 0 0 0 0 0 0 0 0 0 0 0
38990- 0 0 0 0 0 0 0 0 0 0 0 0
38991- 0 0 0 0 0 0 0 0 0 14 14 14
38992- 46 46 46 86 86 86 2 2 6 54 54 54
38993-218 218 218 195 195 195 226 226 226 246 246 246
38994- 58 58 58 2 2 6 2 2 6 30 30 30
38995-210 210 210 253 253 253 174 174 174 123 123 123
38996-221 221 221 234 234 234 74 74 74 2 2 6
38997- 2 2 6 2 2 6 2 2 6 2 2 6
38998- 70 70 70 58 58 58 22 22 22 6 6 6
38999- 0 0 0 0 0 0 0 0 0 0 0 0
39000- 0 0 0 0 0 0 0 0 0 0 0 0
39001- 0 0 0 0 0 0 0 0 0 0 0 0
39002- 0 0 0 0 0 0 0 0 0 0 0 0
39003- 0 0 0 0 0 0 0 0 0 0 0 0
39004- 0 0 0 0 0 0 0 0 0 0 0 0
39005- 0 0 0 0 0 0 0 0 0 0 0 0
39006- 0 0 0 0 0 0 0 0 0 0 0 0
39007- 0 0 0 0 0 0 0 0 0 0 0 0
39008- 0 0 0 0 0 0 0 0 0 0 0 0
39009- 0 0 0 0 0 0 0 0 0 0 0 0
39010- 0 0 0 0 0 0 0 0 0 0 0 0
39011- 0 0 0 0 0 0 0 0 0 14 14 14
39012- 46 46 46 82 82 82 2 2 6 106 106 106
39013-170 170 170 26 26 26 86 86 86 226 226 226
39014-123 123 123 10 10 10 14 14 14 46 46 46
39015-231 231 231 190 190 190 6 6 6 70 70 70
39016- 90 90 90 238 238 238 158 158 158 2 2 6
39017- 2 2 6 2 2 6 2 2 6 2 2 6
39018- 70 70 70 58 58 58 22 22 22 6 6 6
39019- 0 0 0 0 0 0 0 0 0 0 0 0
39020- 0 0 0 0 0 0 0 0 0 0 0 0
39021- 0 0 0 0 0 0 0 0 0 0 0 0
39022- 0 0 0 0 0 0 0 0 0 0 0 0
39023- 0 0 0 0 0 0 0 0 0 0 0 0
39024- 0 0 0 0 0 0 0 0 0 0 0 0
39025- 0 0 0 0 0 0 0 0 1 0 0 0
39026- 0 0 1 0 0 1 0 0 1 0 0 0
39027- 0 0 0 0 0 0 0 0 0 0 0 0
39028- 0 0 0 0 0 0 0 0 0 0 0 0
39029- 0 0 0 0 0 0 0 0 0 0 0 0
39030- 0 0 0 0 0 0 0 0 0 0 0 0
39031- 0 0 0 0 0 0 0 0 0 14 14 14
39032- 42 42 42 86 86 86 6 6 6 116 116 116
39033-106 106 106 6 6 6 70 70 70 149 149 149
39034-128 128 128 18 18 18 38 38 38 54 54 54
39035-221 221 221 106 106 106 2 2 6 14 14 14
39036- 46 46 46 190 190 190 198 198 198 2 2 6
39037- 2 2 6 2 2 6 2 2 6 2 2 6
39038- 74 74 74 62 62 62 22 22 22 6 6 6
39039- 0 0 0 0 0 0 0 0 0 0 0 0
39040- 0 0 0 0 0 0 0 0 0 0 0 0
39041- 0 0 0 0 0 0 0 0 0 0 0 0
39042- 0 0 0 0 0 0 0 0 0 0 0 0
39043- 0 0 0 0 0 0 0 0 0 0 0 0
39044- 0 0 0 0 0 0 0 0 0 0 0 0
39045- 0 0 0 0 0 0 0 0 1 0 0 0
39046- 0 0 1 0 0 0 0 0 1 0 0 0
39047- 0 0 0 0 0 0 0 0 0 0 0 0
39048- 0 0 0 0 0 0 0 0 0 0 0 0
39049- 0 0 0 0 0 0 0 0 0 0 0 0
39050- 0 0 0 0 0 0 0 0 0 0 0 0
39051- 0 0 0 0 0 0 0 0 0 14 14 14
39052- 42 42 42 94 94 94 14 14 14 101 101 101
39053-128 128 128 2 2 6 18 18 18 116 116 116
39054-118 98 46 121 92 8 121 92 8 98 78 10
39055-162 162 162 106 106 106 2 2 6 2 2 6
39056- 2 2 6 195 195 195 195 195 195 6 6 6
39057- 2 2 6 2 2 6 2 2 6 2 2 6
39058- 74 74 74 62 62 62 22 22 22 6 6 6
39059- 0 0 0 0 0 0 0 0 0 0 0 0
39060- 0 0 0 0 0 0 0 0 0 0 0 0
39061- 0 0 0 0 0 0 0 0 0 0 0 0
39062- 0 0 0 0 0 0 0 0 0 0 0 0
39063- 0 0 0 0 0 0 0 0 0 0 0 0
39064- 0 0 0 0 0 0 0 0 0 0 0 0
39065- 0 0 0 0 0 0 0 0 1 0 0 1
39066- 0 0 1 0 0 0 0 0 1 0 0 0
39067- 0 0 0 0 0 0 0 0 0 0 0 0
39068- 0 0 0 0 0 0 0 0 0 0 0 0
39069- 0 0 0 0 0 0 0 0 0 0 0 0
39070- 0 0 0 0 0 0 0 0 0 0 0 0
39071- 0 0 0 0 0 0 0 0 0 10 10 10
39072- 38 38 38 90 90 90 14 14 14 58 58 58
39073-210 210 210 26 26 26 54 38 6 154 114 10
39074-226 170 11 236 186 11 225 175 15 184 144 12
39075-215 174 15 175 146 61 37 26 9 2 2 6
39076- 70 70 70 246 246 246 138 138 138 2 2 6
39077- 2 2 6 2 2 6 2 2 6 2 2 6
39078- 70 70 70 66 66 66 26 26 26 6 6 6
39079- 0 0 0 0 0 0 0 0 0 0 0 0
39080- 0 0 0 0 0 0 0 0 0 0 0 0
39081- 0 0 0 0 0 0 0 0 0 0 0 0
39082- 0 0 0 0 0 0 0 0 0 0 0 0
39083- 0 0 0 0 0 0 0 0 0 0 0 0
39084- 0 0 0 0 0 0 0 0 0 0 0 0
39085- 0 0 0 0 0 0 0 0 0 0 0 0
39086- 0 0 0 0 0 0 0 0 0 0 0 0
39087- 0 0 0 0 0 0 0 0 0 0 0 0
39088- 0 0 0 0 0 0 0 0 0 0 0 0
39089- 0 0 0 0 0 0 0 0 0 0 0 0
39090- 0 0 0 0 0 0 0 0 0 0 0 0
39091- 0 0 0 0 0 0 0 0 0 10 10 10
39092- 38 38 38 86 86 86 14 14 14 10 10 10
39093-195 195 195 188 164 115 192 133 9 225 175 15
39094-239 182 13 234 190 10 232 195 16 232 200 30
39095-245 207 45 241 208 19 232 195 16 184 144 12
39096-218 194 134 211 206 186 42 42 42 2 2 6
39097- 2 2 6 2 2 6 2 2 6 2 2 6
39098- 50 50 50 74 74 74 30 30 30 6 6 6
39099- 0 0 0 0 0 0 0 0 0 0 0 0
39100- 0 0 0 0 0 0 0 0 0 0 0 0
39101- 0 0 0 0 0 0 0 0 0 0 0 0
39102- 0 0 0 0 0 0 0 0 0 0 0 0
39103- 0 0 0 0 0 0 0 0 0 0 0 0
39104- 0 0 0 0 0 0 0 0 0 0 0 0
39105- 0 0 0 0 0 0 0 0 0 0 0 0
39106- 0 0 0 0 0 0 0 0 0 0 0 0
39107- 0 0 0 0 0 0 0 0 0 0 0 0
39108- 0 0 0 0 0 0 0 0 0 0 0 0
39109- 0 0 0 0 0 0 0 0 0 0 0 0
39110- 0 0 0 0 0 0 0 0 0 0 0 0
39111- 0 0 0 0 0 0 0 0 0 10 10 10
39112- 34 34 34 86 86 86 14 14 14 2 2 6
39113-121 87 25 192 133 9 219 162 10 239 182 13
39114-236 186 11 232 195 16 241 208 19 244 214 54
39115-246 218 60 246 218 38 246 215 20 241 208 19
39116-241 208 19 226 184 13 121 87 25 2 2 6
39117- 2 2 6 2 2 6 2 2 6 2 2 6
39118- 50 50 50 82 82 82 34 34 34 10 10 10
39119- 0 0 0 0 0 0 0 0 0 0 0 0
39120- 0 0 0 0 0 0 0 0 0 0 0 0
39121- 0 0 0 0 0 0 0 0 0 0 0 0
39122- 0 0 0 0 0 0 0 0 0 0 0 0
39123- 0 0 0 0 0 0 0 0 0 0 0 0
39124- 0 0 0 0 0 0 0 0 0 0 0 0
39125- 0 0 0 0 0 0 0 0 0 0 0 0
39126- 0 0 0 0 0 0 0 0 0 0 0 0
39127- 0 0 0 0 0 0 0 0 0 0 0 0
39128- 0 0 0 0 0 0 0 0 0 0 0 0
39129- 0 0 0 0 0 0 0 0 0 0 0 0
39130- 0 0 0 0 0 0 0 0 0 0 0 0
39131- 0 0 0 0 0 0 0 0 0 10 10 10
39132- 34 34 34 82 82 82 30 30 30 61 42 6
39133-180 123 7 206 145 10 230 174 11 239 182 13
39134-234 190 10 238 202 15 241 208 19 246 218 74
39135-246 218 38 246 215 20 246 215 20 246 215 20
39136-226 184 13 215 174 15 184 144 12 6 6 6
39137- 2 2 6 2 2 6 2 2 6 2 2 6
39138- 26 26 26 94 94 94 42 42 42 14 14 14
39139- 0 0 0 0 0 0 0 0 0 0 0 0
39140- 0 0 0 0 0 0 0 0 0 0 0 0
39141- 0 0 0 0 0 0 0 0 0 0 0 0
39142- 0 0 0 0 0 0 0 0 0 0 0 0
39143- 0 0 0 0 0 0 0 0 0 0 0 0
39144- 0 0 0 0 0 0 0 0 0 0 0 0
39145- 0 0 0 0 0 0 0 0 0 0 0 0
39146- 0 0 0 0 0 0 0 0 0 0 0 0
39147- 0 0 0 0 0 0 0 0 0 0 0 0
39148- 0 0 0 0 0 0 0 0 0 0 0 0
39149- 0 0 0 0 0 0 0 0 0 0 0 0
39150- 0 0 0 0 0 0 0 0 0 0 0 0
39151- 0 0 0 0 0 0 0 0 0 10 10 10
39152- 30 30 30 78 78 78 50 50 50 104 69 6
39153-192 133 9 216 158 10 236 178 12 236 186 11
39154-232 195 16 241 208 19 244 214 54 245 215 43
39155-246 215 20 246 215 20 241 208 19 198 155 10
39156-200 144 11 216 158 10 156 118 10 2 2 6
39157- 2 2 6 2 2 6 2 2 6 2 2 6
39158- 6 6 6 90 90 90 54 54 54 18 18 18
39159- 6 6 6 0 0 0 0 0 0 0 0 0
39160- 0 0 0 0 0 0 0 0 0 0 0 0
39161- 0 0 0 0 0 0 0 0 0 0 0 0
39162- 0 0 0 0 0 0 0 0 0 0 0 0
39163- 0 0 0 0 0 0 0 0 0 0 0 0
39164- 0 0 0 0 0 0 0 0 0 0 0 0
39165- 0 0 0 0 0 0 0 0 0 0 0 0
39166- 0 0 0 0 0 0 0 0 0 0 0 0
39167- 0 0 0 0 0 0 0 0 0 0 0 0
39168- 0 0 0 0 0 0 0 0 0 0 0 0
39169- 0 0 0 0 0 0 0 0 0 0 0 0
39170- 0 0 0 0 0 0 0 0 0 0 0 0
39171- 0 0 0 0 0 0 0 0 0 10 10 10
39172- 30 30 30 78 78 78 46 46 46 22 22 22
39173-137 92 6 210 162 10 239 182 13 238 190 10
39174-238 202 15 241 208 19 246 215 20 246 215 20
39175-241 208 19 203 166 17 185 133 11 210 150 10
39176-216 158 10 210 150 10 102 78 10 2 2 6
39177- 6 6 6 54 54 54 14 14 14 2 2 6
39178- 2 2 6 62 62 62 74 74 74 30 30 30
39179- 10 10 10 0 0 0 0 0 0 0 0 0
39180- 0 0 0 0 0 0 0 0 0 0 0 0
39181- 0 0 0 0 0 0 0 0 0 0 0 0
39182- 0 0 0 0 0 0 0 0 0 0 0 0
39183- 0 0 0 0 0 0 0 0 0 0 0 0
39184- 0 0 0 0 0 0 0 0 0 0 0 0
39185- 0 0 0 0 0 0 0 0 0 0 0 0
39186- 0 0 0 0 0 0 0 0 0 0 0 0
39187- 0 0 0 0 0 0 0 0 0 0 0 0
39188- 0 0 0 0 0 0 0 0 0 0 0 0
39189- 0 0 0 0 0 0 0 0 0 0 0 0
39190- 0 0 0 0 0 0 0 0 0 0 0 0
39191- 0 0 0 0 0 0 0 0 0 10 10 10
39192- 34 34 34 78 78 78 50 50 50 6 6 6
39193- 94 70 30 139 102 15 190 146 13 226 184 13
39194-232 200 30 232 195 16 215 174 15 190 146 13
39195-168 122 10 192 133 9 210 150 10 213 154 11
39196-202 150 34 182 157 106 101 98 89 2 2 6
39197- 2 2 6 78 78 78 116 116 116 58 58 58
39198- 2 2 6 22 22 22 90 90 90 46 46 46
39199- 18 18 18 6 6 6 0 0 0 0 0 0
39200- 0 0 0 0 0 0 0 0 0 0 0 0
39201- 0 0 0 0 0 0 0 0 0 0 0 0
39202- 0 0 0 0 0 0 0 0 0 0 0 0
39203- 0 0 0 0 0 0 0 0 0 0 0 0
39204- 0 0 0 0 0 0 0 0 0 0 0 0
39205- 0 0 0 0 0 0 0 0 0 0 0 0
39206- 0 0 0 0 0 0 0 0 0 0 0 0
39207- 0 0 0 0 0 0 0 0 0 0 0 0
39208- 0 0 0 0 0 0 0 0 0 0 0 0
39209- 0 0 0 0 0 0 0 0 0 0 0 0
39210- 0 0 0 0 0 0 0 0 0 0 0 0
39211- 0 0 0 0 0 0 0 0 0 10 10 10
39212- 38 38 38 86 86 86 50 50 50 6 6 6
39213-128 128 128 174 154 114 156 107 11 168 122 10
39214-198 155 10 184 144 12 197 138 11 200 144 11
39215-206 145 10 206 145 10 197 138 11 188 164 115
39216-195 195 195 198 198 198 174 174 174 14 14 14
39217- 2 2 6 22 22 22 116 116 116 116 116 116
39218- 22 22 22 2 2 6 74 74 74 70 70 70
39219- 30 30 30 10 10 10 0 0 0 0 0 0
39220- 0 0 0 0 0 0 0 0 0 0 0 0
39221- 0 0 0 0 0 0 0 0 0 0 0 0
39222- 0 0 0 0 0 0 0 0 0 0 0 0
39223- 0 0 0 0 0 0 0 0 0 0 0 0
39224- 0 0 0 0 0 0 0 0 0 0 0 0
39225- 0 0 0 0 0 0 0 0 0 0 0 0
39226- 0 0 0 0 0 0 0 0 0 0 0 0
39227- 0 0 0 0 0 0 0 0 0 0 0 0
39228- 0 0 0 0 0 0 0 0 0 0 0 0
39229- 0 0 0 0 0 0 0 0 0 0 0 0
39230- 0 0 0 0 0 0 0 0 0 0 0 0
39231- 0 0 0 0 0 0 6 6 6 18 18 18
39232- 50 50 50 101 101 101 26 26 26 10 10 10
39233-138 138 138 190 190 190 174 154 114 156 107 11
39234-197 138 11 200 144 11 197 138 11 192 133 9
39235-180 123 7 190 142 34 190 178 144 187 187 187
39236-202 202 202 221 221 221 214 214 214 66 66 66
39237- 2 2 6 2 2 6 50 50 50 62 62 62
39238- 6 6 6 2 2 6 10 10 10 90 90 90
39239- 50 50 50 18 18 18 6 6 6 0 0 0
39240- 0 0 0 0 0 0 0 0 0 0 0 0
39241- 0 0 0 0 0 0 0 0 0 0 0 0
39242- 0 0 0 0 0 0 0 0 0 0 0 0
39243- 0 0 0 0 0 0 0 0 0 0 0 0
39244- 0 0 0 0 0 0 0 0 0 0 0 0
39245- 0 0 0 0 0 0 0 0 0 0 0 0
39246- 0 0 0 0 0 0 0 0 0 0 0 0
39247- 0 0 0 0 0 0 0 0 0 0 0 0
39248- 0 0 0 0 0 0 0 0 0 0 0 0
39249- 0 0 0 0 0 0 0 0 0 0 0 0
39250- 0 0 0 0 0 0 0 0 0 0 0 0
39251- 0 0 0 0 0 0 10 10 10 34 34 34
39252- 74 74 74 74 74 74 2 2 6 6 6 6
39253-144 144 144 198 198 198 190 190 190 178 166 146
39254-154 121 60 156 107 11 156 107 11 168 124 44
39255-174 154 114 187 187 187 190 190 190 210 210 210
39256-246 246 246 253 253 253 253 253 253 182 182 182
39257- 6 6 6 2 2 6 2 2 6 2 2 6
39258- 2 2 6 2 2 6 2 2 6 62 62 62
39259- 74 74 74 34 34 34 14 14 14 0 0 0
39260- 0 0 0 0 0 0 0 0 0 0 0 0
39261- 0 0 0 0 0 0 0 0 0 0 0 0
39262- 0 0 0 0 0 0 0 0 0 0 0 0
39263- 0 0 0 0 0 0 0 0 0 0 0 0
39264- 0 0 0 0 0 0 0 0 0 0 0 0
39265- 0 0 0 0 0 0 0 0 0 0 0 0
39266- 0 0 0 0 0 0 0 0 0 0 0 0
39267- 0 0 0 0 0 0 0 0 0 0 0 0
39268- 0 0 0 0 0 0 0 0 0 0 0 0
39269- 0 0 0 0 0 0 0 0 0 0 0 0
39270- 0 0 0 0 0 0 0 0 0 0 0 0
39271- 0 0 0 10 10 10 22 22 22 54 54 54
39272- 94 94 94 18 18 18 2 2 6 46 46 46
39273-234 234 234 221 221 221 190 190 190 190 190 190
39274-190 190 190 187 187 187 187 187 187 190 190 190
39275-190 190 190 195 195 195 214 214 214 242 242 242
39276-253 253 253 253 253 253 253 253 253 253 253 253
39277- 82 82 82 2 2 6 2 2 6 2 2 6
39278- 2 2 6 2 2 6 2 2 6 14 14 14
39279- 86 86 86 54 54 54 22 22 22 6 6 6
39280- 0 0 0 0 0 0 0 0 0 0 0 0
39281- 0 0 0 0 0 0 0 0 0 0 0 0
39282- 0 0 0 0 0 0 0 0 0 0 0 0
39283- 0 0 0 0 0 0 0 0 0 0 0 0
39284- 0 0 0 0 0 0 0 0 0 0 0 0
39285- 0 0 0 0 0 0 0 0 0 0 0 0
39286- 0 0 0 0 0 0 0 0 0 0 0 0
39287- 0 0 0 0 0 0 0 0 0 0 0 0
39288- 0 0 0 0 0 0 0 0 0 0 0 0
39289- 0 0 0 0 0 0 0 0 0 0 0 0
39290- 0 0 0 0 0 0 0 0 0 0 0 0
39291- 6 6 6 18 18 18 46 46 46 90 90 90
39292- 46 46 46 18 18 18 6 6 6 182 182 182
39293-253 253 253 246 246 246 206 206 206 190 190 190
39294-190 190 190 190 190 190 190 190 190 190 190 190
39295-206 206 206 231 231 231 250 250 250 253 253 253
39296-253 253 253 253 253 253 253 253 253 253 253 253
39297-202 202 202 14 14 14 2 2 6 2 2 6
39298- 2 2 6 2 2 6 2 2 6 2 2 6
39299- 42 42 42 86 86 86 42 42 42 18 18 18
39300- 6 6 6 0 0 0 0 0 0 0 0 0
39301- 0 0 0 0 0 0 0 0 0 0 0 0
39302- 0 0 0 0 0 0 0 0 0 0 0 0
39303- 0 0 0 0 0 0 0 0 0 0 0 0
39304- 0 0 0 0 0 0 0 0 0 0 0 0
39305- 0 0 0 0 0 0 0 0 0 0 0 0
39306- 0 0 0 0 0 0 0 0 0 0 0 0
39307- 0 0 0 0 0 0 0 0 0 0 0 0
39308- 0 0 0 0 0 0 0 0 0 0 0 0
39309- 0 0 0 0 0 0 0 0 0 0 0 0
39310- 0 0 0 0 0 0 0 0 0 6 6 6
39311- 14 14 14 38 38 38 74 74 74 66 66 66
39312- 2 2 6 6 6 6 90 90 90 250 250 250
39313-253 253 253 253 253 253 238 238 238 198 198 198
39314-190 190 190 190 190 190 195 195 195 221 221 221
39315-246 246 246 253 253 253 253 253 253 253 253 253
39316-253 253 253 253 253 253 253 253 253 253 253 253
39317-253 253 253 82 82 82 2 2 6 2 2 6
39318- 2 2 6 2 2 6 2 2 6 2 2 6
39319- 2 2 6 78 78 78 70 70 70 34 34 34
39320- 14 14 14 6 6 6 0 0 0 0 0 0
39321- 0 0 0 0 0 0 0 0 0 0 0 0
39322- 0 0 0 0 0 0 0 0 0 0 0 0
39323- 0 0 0 0 0 0 0 0 0 0 0 0
39324- 0 0 0 0 0 0 0 0 0 0 0 0
39325- 0 0 0 0 0 0 0 0 0 0 0 0
39326- 0 0 0 0 0 0 0 0 0 0 0 0
39327- 0 0 0 0 0 0 0 0 0 0 0 0
39328- 0 0 0 0 0 0 0 0 0 0 0 0
39329- 0 0 0 0 0 0 0 0 0 0 0 0
39330- 0 0 0 0 0 0 0 0 0 14 14 14
39331- 34 34 34 66 66 66 78 78 78 6 6 6
39332- 2 2 6 18 18 18 218 218 218 253 253 253
39333-253 253 253 253 253 253 253 253 253 246 246 246
39334-226 226 226 231 231 231 246 246 246 253 253 253
39335-253 253 253 253 253 253 253 253 253 253 253 253
39336-253 253 253 253 253 253 253 253 253 253 253 253
39337-253 253 253 178 178 178 2 2 6 2 2 6
39338- 2 2 6 2 2 6 2 2 6 2 2 6
39339- 2 2 6 18 18 18 90 90 90 62 62 62
39340- 30 30 30 10 10 10 0 0 0 0 0 0
39341- 0 0 0 0 0 0 0 0 0 0 0 0
39342- 0 0 0 0 0 0 0 0 0 0 0 0
39343- 0 0 0 0 0 0 0 0 0 0 0 0
39344- 0 0 0 0 0 0 0 0 0 0 0 0
39345- 0 0 0 0 0 0 0 0 0 0 0 0
39346- 0 0 0 0 0 0 0 0 0 0 0 0
39347- 0 0 0 0 0 0 0 0 0 0 0 0
39348- 0 0 0 0 0 0 0 0 0 0 0 0
39349- 0 0 0 0 0 0 0 0 0 0 0 0
39350- 0 0 0 0 0 0 10 10 10 26 26 26
39351- 58 58 58 90 90 90 18 18 18 2 2 6
39352- 2 2 6 110 110 110 253 253 253 253 253 253
39353-253 253 253 253 253 253 253 253 253 253 253 253
39354-250 250 250 253 253 253 253 253 253 253 253 253
39355-253 253 253 253 253 253 253 253 253 253 253 253
39356-253 253 253 253 253 253 253 253 253 253 253 253
39357-253 253 253 231 231 231 18 18 18 2 2 6
39358- 2 2 6 2 2 6 2 2 6 2 2 6
39359- 2 2 6 2 2 6 18 18 18 94 94 94
39360- 54 54 54 26 26 26 10 10 10 0 0 0
39361- 0 0 0 0 0 0 0 0 0 0 0 0
39362- 0 0 0 0 0 0 0 0 0 0 0 0
39363- 0 0 0 0 0 0 0 0 0 0 0 0
39364- 0 0 0 0 0 0 0 0 0 0 0 0
39365- 0 0 0 0 0 0 0 0 0 0 0 0
39366- 0 0 0 0 0 0 0 0 0 0 0 0
39367- 0 0 0 0 0 0 0 0 0 0 0 0
39368- 0 0 0 0 0 0 0 0 0 0 0 0
39369- 0 0 0 0 0 0 0 0 0 0 0 0
39370- 0 0 0 6 6 6 22 22 22 50 50 50
39371- 90 90 90 26 26 26 2 2 6 2 2 6
39372- 14 14 14 195 195 195 250 250 250 253 253 253
39373-253 253 253 253 253 253 253 253 253 253 253 253
39374-253 253 253 253 253 253 253 253 253 253 253 253
39375-253 253 253 253 253 253 253 253 253 253 253 253
39376-253 253 253 253 253 253 253 253 253 253 253 253
39377-250 250 250 242 242 242 54 54 54 2 2 6
39378- 2 2 6 2 2 6 2 2 6 2 2 6
39379- 2 2 6 2 2 6 2 2 6 38 38 38
39380- 86 86 86 50 50 50 22 22 22 6 6 6
39381- 0 0 0 0 0 0 0 0 0 0 0 0
39382- 0 0 0 0 0 0 0 0 0 0 0 0
39383- 0 0 0 0 0 0 0 0 0 0 0 0
39384- 0 0 0 0 0 0 0 0 0 0 0 0
39385- 0 0 0 0 0 0 0 0 0 0 0 0
39386- 0 0 0 0 0 0 0 0 0 0 0 0
39387- 0 0 0 0 0 0 0 0 0 0 0 0
39388- 0 0 0 0 0 0 0 0 0 0 0 0
39389- 0 0 0 0 0 0 0 0 0 0 0 0
39390- 6 6 6 14 14 14 38 38 38 82 82 82
39391- 34 34 34 2 2 6 2 2 6 2 2 6
39392- 42 42 42 195 195 195 246 246 246 253 253 253
39393-253 253 253 253 253 253 253 253 253 250 250 250
39394-242 242 242 242 242 242 250 250 250 253 253 253
39395-253 253 253 253 253 253 253 253 253 253 253 253
39396-253 253 253 250 250 250 246 246 246 238 238 238
39397-226 226 226 231 231 231 101 101 101 6 6 6
39398- 2 2 6 2 2 6 2 2 6 2 2 6
39399- 2 2 6 2 2 6 2 2 6 2 2 6
39400- 38 38 38 82 82 82 42 42 42 14 14 14
39401- 6 6 6 0 0 0 0 0 0 0 0 0
39402- 0 0 0 0 0 0 0 0 0 0 0 0
39403- 0 0 0 0 0 0 0 0 0 0 0 0
39404- 0 0 0 0 0 0 0 0 0 0 0 0
39405- 0 0 0 0 0 0 0 0 0 0 0 0
39406- 0 0 0 0 0 0 0 0 0 0 0 0
39407- 0 0 0 0 0 0 0 0 0 0 0 0
39408- 0 0 0 0 0 0 0 0 0 0 0 0
39409- 0 0 0 0 0 0 0 0 0 0 0 0
39410- 10 10 10 26 26 26 62 62 62 66 66 66
39411- 2 2 6 2 2 6 2 2 6 6 6 6
39412- 70 70 70 170 170 170 206 206 206 234 234 234
39413-246 246 246 250 250 250 250 250 250 238 238 238
39414-226 226 226 231 231 231 238 238 238 250 250 250
39415-250 250 250 250 250 250 246 246 246 231 231 231
39416-214 214 214 206 206 206 202 202 202 202 202 202
39417-198 198 198 202 202 202 182 182 182 18 18 18
39418- 2 2 6 2 2 6 2 2 6 2 2 6
39419- 2 2 6 2 2 6 2 2 6 2 2 6
39420- 2 2 6 62 62 62 66 66 66 30 30 30
39421- 10 10 10 0 0 0 0 0 0 0 0 0
39422- 0 0 0 0 0 0 0 0 0 0 0 0
39423- 0 0 0 0 0 0 0 0 0 0 0 0
39424- 0 0 0 0 0 0 0 0 0 0 0 0
39425- 0 0 0 0 0 0 0 0 0 0 0 0
39426- 0 0 0 0 0 0 0 0 0 0 0 0
39427- 0 0 0 0 0 0 0 0 0 0 0 0
39428- 0 0 0 0 0 0 0 0 0 0 0 0
39429- 0 0 0 0 0 0 0 0 0 0 0 0
39430- 14 14 14 42 42 42 82 82 82 18 18 18
39431- 2 2 6 2 2 6 2 2 6 10 10 10
39432- 94 94 94 182 182 182 218 218 218 242 242 242
39433-250 250 250 253 253 253 253 253 253 250 250 250
39434-234 234 234 253 253 253 253 253 253 253 253 253
39435-253 253 253 253 253 253 253 253 253 246 246 246
39436-238 238 238 226 226 226 210 210 210 202 202 202
39437-195 195 195 195 195 195 210 210 210 158 158 158
39438- 6 6 6 14 14 14 50 50 50 14 14 14
39439- 2 2 6 2 2 6 2 2 6 2 2 6
39440- 2 2 6 6 6 6 86 86 86 46 46 46
39441- 18 18 18 6 6 6 0 0 0 0 0 0
39442- 0 0 0 0 0 0 0 0 0 0 0 0
39443- 0 0 0 0 0 0 0 0 0 0 0 0
39444- 0 0 0 0 0 0 0 0 0 0 0 0
39445- 0 0 0 0 0 0 0 0 0 0 0 0
39446- 0 0 0 0 0 0 0 0 0 0 0 0
39447- 0 0 0 0 0 0 0 0 0 0 0 0
39448- 0 0 0 0 0 0 0 0 0 0 0 0
39449- 0 0 0 0 0 0 0 0 0 6 6 6
39450- 22 22 22 54 54 54 70 70 70 2 2 6
39451- 2 2 6 10 10 10 2 2 6 22 22 22
39452-166 166 166 231 231 231 250 250 250 253 253 253
39453-253 253 253 253 253 253 253 253 253 250 250 250
39454-242 242 242 253 253 253 253 253 253 253 253 253
39455-253 253 253 253 253 253 253 253 253 253 253 253
39456-253 253 253 253 253 253 253 253 253 246 246 246
39457-231 231 231 206 206 206 198 198 198 226 226 226
39458- 94 94 94 2 2 6 6 6 6 38 38 38
39459- 30 30 30 2 2 6 2 2 6 2 2 6
39460- 2 2 6 2 2 6 62 62 62 66 66 66
39461- 26 26 26 10 10 10 0 0 0 0 0 0
39462- 0 0 0 0 0 0 0 0 0 0 0 0
39463- 0 0 0 0 0 0 0 0 0 0 0 0
39464- 0 0 0 0 0 0 0 0 0 0 0 0
39465- 0 0 0 0 0 0 0 0 0 0 0 0
39466- 0 0 0 0 0 0 0 0 0 0 0 0
39467- 0 0 0 0 0 0 0 0 0 0 0 0
39468- 0 0 0 0 0 0 0 0 0 0 0 0
39469- 0 0 0 0 0 0 0 0 0 10 10 10
39470- 30 30 30 74 74 74 50 50 50 2 2 6
39471- 26 26 26 26 26 26 2 2 6 106 106 106
39472-238 238 238 253 253 253 253 253 253 253 253 253
39473-253 253 253 253 253 253 253 253 253 253 253 253
39474-253 253 253 253 253 253 253 253 253 253 253 253
39475-253 253 253 253 253 253 253 253 253 253 253 253
39476-253 253 253 253 253 253 253 253 253 253 253 253
39477-253 253 253 246 246 246 218 218 218 202 202 202
39478-210 210 210 14 14 14 2 2 6 2 2 6
39479- 30 30 30 22 22 22 2 2 6 2 2 6
39480- 2 2 6 2 2 6 18 18 18 86 86 86
39481- 42 42 42 14 14 14 0 0 0 0 0 0
39482- 0 0 0 0 0 0 0 0 0 0 0 0
39483- 0 0 0 0 0 0 0 0 0 0 0 0
39484- 0 0 0 0 0 0 0 0 0 0 0 0
39485- 0 0 0 0 0 0 0 0 0 0 0 0
39486- 0 0 0 0 0 0 0 0 0 0 0 0
39487- 0 0 0 0 0 0 0 0 0 0 0 0
39488- 0 0 0 0 0 0 0 0 0 0 0 0
39489- 0 0 0 0 0 0 0 0 0 14 14 14
39490- 42 42 42 90 90 90 22 22 22 2 2 6
39491- 42 42 42 2 2 6 18 18 18 218 218 218
39492-253 253 253 253 253 253 253 253 253 253 253 253
39493-253 253 253 253 253 253 253 253 253 253 253 253
39494-253 253 253 253 253 253 253 253 253 253 253 253
39495-253 253 253 253 253 253 253 253 253 253 253 253
39496-253 253 253 253 253 253 253 253 253 253 253 253
39497-253 253 253 253 253 253 250 250 250 221 221 221
39498-218 218 218 101 101 101 2 2 6 14 14 14
39499- 18 18 18 38 38 38 10 10 10 2 2 6
39500- 2 2 6 2 2 6 2 2 6 78 78 78
39501- 58 58 58 22 22 22 6 6 6 0 0 0
39502- 0 0 0 0 0 0 0 0 0 0 0 0
39503- 0 0 0 0 0 0 0 0 0 0 0 0
39504- 0 0 0 0 0 0 0 0 0 0 0 0
39505- 0 0 0 0 0 0 0 0 0 0 0 0
39506- 0 0 0 0 0 0 0 0 0 0 0 0
39507- 0 0 0 0 0 0 0 0 0 0 0 0
39508- 0 0 0 0 0 0 0 0 0 0 0 0
39509- 0 0 0 0 0 0 6 6 6 18 18 18
39510- 54 54 54 82 82 82 2 2 6 26 26 26
39511- 22 22 22 2 2 6 123 123 123 253 253 253
39512-253 253 253 253 253 253 253 253 253 253 253 253
39513-253 253 253 253 253 253 253 253 253 253 253 253
39514-253 253 253 253 253 253 253 253 253 253 253 253
39515-253 253 253 253 253 253 253 253 253 253 253 253
39516-253 253 253 253 253 253 253 253 253 253 253 253
39517-253 253 253 253 253 253 253 253 253 250 250 250
39518-238 238 238 198 198 198 6 6 6 38 38 38
39519- 58 58 58 26 26 26 38 38 38 2 2 6
39520- 2 2 6 2 2 6 2 2 6 46 46 46
39521- 78 78 78 30 30 30 10 10 10 0 0 0
39522- 0 0 0 0 0 0 0 0 0 0 0 0
39523- 0 0 0 0 0 0 0 0 0 0 0 0
39524- 0 0 0 0 0 0 0 0 0 0 0 0
39525- 0 0 0 0 0 0 0 0 0 0 0 0
39526- 0 0 0 0 0 0 0 0 0 0 0 0
39527- 0 0 0 0 0 0 0 0 0 0 0 0
39528- 0 0 0 0 0 0 0 0 0 0 0 0
39529- 0 0 0 0 0 0 10 10 10 30 30 30
39530- 74 74 74 58 58 58 2 2 6 42 42 42
39531- 2 2 6 22 22 22 231 231 231 253 253 253
39532-253 253 253 253 253 253 253 253 253 253 253 253
39533-253 253 253 253 253 253 253 253 253 250 250 250
39534-253 253 253 253 253 253 253 253 253 253 253 253
39535-253 253 253 253 253 253 253 253 253 253 253 253
39536-253 253 253 253 253 253 253 253 253 253 253 253
39537-253 253 253 253 253 253 253 253 253 253 253 253
39538-253 253 253 246 246 246 46 46 46 38 38 38
39539- 42 42 42 14 14 14 38 38 38 14 14 14
39540- 2 2 6 2 2 6 2 2 6 6 6 6
39541- 86 86 86 46 46 46 14 14 14 0 0 0
39542- 0 0 0 0 0 0 0 0 0 0 0 0
39543- 0 0 0 0 0 0 0 0 0 0 0 0
39544- 0 0 0 0 0 0 0 0 0 0 0 0
39545- 0 0 0 0 0 0 0 0 0 0 0 0
39546- 0 0 0 0 0 0 0 0 0 0 0 0
39547- 0 0 0 0 0 0 0 0 0 0 0 0
39548- 0 0 0 0 0 0 0 0 0 0 0 0
39549- 0 0 0 6 6 6 14 14 14 42 42 42
39550- 90 90 90 18 18 18 18 18 18 26 26 26
39551- 2 2 6 116 116 116 253 253 253 253 253 253
39552-253 253 253 253 253 253 253 253 253 253 253 253
39553-253 253 253 253 253 253 250 250 250 238 238 238
39554-253 253 253 253 253 253 253 253 253 253 253 253
39555-253 253 253 253 253 253 253 253 253 253 253 253
39556-253 253 253 253 253 253 253 253 253 253 253 253
39557-253 253 253 253 253 253 253 253 253 253 253 253
39558-253 253 253 253 253 253 94 94 94 6 6 6
39559- 2 2 6 2 2 6 10 10 10 34 34 34
39560- 2 2 6 2 2 6 2 2 6 2 2 6
39561- 74 74 74 58 58 58 22 22 22 6 6 6
39562- 0 0 0 0 0 0 0 0 0 0 0 0
39563- 0 0 0 0 0 0 0 0 0 0 0 0
39564- 0 0 0 0 0 0 0 0 0 0 0 0
39565- 0 0 0 0 0 0 0 0 0 0 0 0
39566- 0 0 0 0 0 0 0 0 0 0 0 0
39567- 0 0 0 0 0 0 0 0 0 0 0 0
39568- 0 0 0 0 0 0 0 0 0 0 0 0
39569- 0 0 0 10 10 10 26 26 26 66 66 66
39570- 82 82 82 2 2 6 38 38 38 6 6 6
39571- 14 14 14 210 210 210 253 253 253 253 253 253
39572-253 253 253 253 253 253 253 253 253 253 253 253
39573-253 253 253 253 253 253 246 246 246 242 242 242
39574-253 253 253 253 253 253 253 253 253 253 253 253
39575-253 253 253 253 253 253 253 253 253 253 253 253
39576-253 253 253 253 253 253 253 253 253 253 253 253
39577-253 253 253 253 253 253 253 253 253 253 253 253
39578-253 253 253 253 253 253 144 144 144 2 2 6
39579- 2 2 6 2 2 6 2 2 6 46 46 46
39580- 2 2 6 2 2 6 2 2 6 2 2 6
39581- 42 42 42 74 74 74 30 30 30 10 10 10
39582- 0 0 0 0 0 0 0 0 0 0 0 0
39583- 0 0 0 0 0 0 0 0 0 0 0 0
39584- 0 0 0 0 0 0 0 0 0 0 0 0
39585- 0 0 0 0 0 0 0 0 0 0 0 0
39586- 0 0 0 0 0 0 0 0 0 0 0 0
39587- 0 0 0 0 0 0 0 0 0 0 0 0
39588- 0 0 0 0 0 0 0 0 0 0 0 0
39589- 6 6 6 14 14 14 42 42 42 90 90 90
39590- 26 26 26 6 6 6 42 42 42 2 2 6
39591- 74 74 74 250 250 250 253 253 253 253 253 253
39592-253 253 253 253 253 253 253 253 253 253 253 253
39593-253 253 253 253 253 253 242 242 242 242 242 242
39594-253 253 253 253 253 253 253 253 253 253 253 253
39595-253 253 253 253 253 253 253 253 253 253 253 253
39596-253 253 253 253 253 253 253 253 253 253 253 253
39597-253 253 253 253 253 253 253 253 253 253 253 253
39598-253 253 253 253 253 253 182 182 182 2 2 6
39599- 2 2 6 2 2 6 2 2 6 46 46 46
39600- 2 2 6 2 2 6 2 2 6 2 2 6
39601- 10 10 10 86 86 86 38 38 38 10 10 10
39602- 0 0 0 0 0 0 0 0 0 0 0 0
39603- 0 0 0 0 0 0 0 0 0 0 0 0
39604- 0 0 0 0 0 0 0 0 0 0 0 0
39605- 0 0 0 0 0 0 0 0 0 0 0 0
39606- 0 0 0 0 0 0 0 0 0 0 0 0
39607- 0 0 0 0 0 0 0 0 0 0 0 0
39608- 0 0 0 0 0 0 0 0 0 0 0 0
39609- 10 10 10 26 26 26 66 66 66 82 82 82
39610- 2 2 6 22 22 22 18 18 18 2 2 6
39611-149 149 149 253 253 253 253 253 253 253 253 253
39612-253 253 253 253 253 253 253 253 253 253 253 253
39613-253 253 253 253 253 253 234 234 234 242 242 242
39614-253 253 253 253 253 253 253 253 253 253 253 253
39615-253 253 253 253 253 253 253 253 253 253 253 253
39616-253 253 253 253 253 253 253 253 253 253 253 253
39617-253 253 253 253 253 253 253 253 253 253 253 253
39618-253 253 253 253 253 253 206 206 206 2 2 6
39619- 2 2 6 2 2 6 2 2 6 38 38 38
39620- 2 2 6 2 2 6 2 2 6 2 2 6
39621- 6 6 6 86 86 86 46 46 46 14 14 14
39622- 0 0 0 0 0 0 0 0 0 0 0 0
39623- 0 0 0 0 0 0 0 0 0 0 0 0
39624- 0 0 0 0 0 0 0 0 0 0 0 0
39625- 0 0 0 0 0 0 0 0 0 0 0 0
39626- 0 0 0 0 0 0 0 0 0 0 0 0
39627- 0 0 0 0 0 0 0 0 0 0 0 0
39628- 0 0 0 0 0 0 0 0 0 6 6 6
39629- 18 18 18 46 46 46 86 86 86 18 18 18
39630- 2 2 6 34 34 34 10 10 10 6 6 6
39631-210 210 210 253 253 253 253 253 253 253 253 253
39632-253 253 253 253 253 253 253 253 253 253 253 253
39633-253 253 253 253 253 253 234 234 234 242 242 242
39634-253 253 253 253 253 253 253 253 253 253 253 253
39635-253 253 253 253 253 253 253 253 253 253 253 253
39636-253 253 253 253 253 253 253 253 253 253 253 253
39637-253 253 253 253 253 253 253 253 253 253 253 253
39638-253 253 253 253 253 253 221 221 221 6 6 6
39639- 2 2 6 2 2 6 6 6 6 30 30 30
39640- 2 2 6 2 2 6 2 2 6 2 2 6
39641- 2 2 6 82 82 82 54 54 54 18 18 18
39642- 6 6 6 0 0 0 0 0 0 0 0 0
39643- 0 0 0 0 0 0 0 0 0 0 0 0
39644- 0 0 0 0 0 0 0 0 0 0 0 0
39645- 0 0 0 0 0 0 0 0 0 0 0 0
39646- 0 0 0 0 0 0 0 0 0 0 0 0
39647- 0 0 0 0 0 0 0 0 0 0 0 0
39648- 0 0 0 0 0 0 0 0 0 10 10 10
39649- 26 26 26 66 66 66 62 62 62 2 2 6
39650- 2 2 6 38 38 38 10 10 10 26 26 26
39651-238 238 238 253 253 253 253 253 253 253 253 253
39652-253 253 253 253 253 253 253 253 253 253 253 253
39653-253 253 253 253 253 253 231 231 231 238 238 238
39654-253 253 253 253 253 253 253 253 253 253 253 253
39655-253 253 253 253 253 253 253 253 253 253 253 253
39656-253 253 253 253 253 253 253 253 253 253 253 253
39657-253 253 253 253 253 253 253 253 253 253 253 253
39658-253 253 253 253 253 253 231 231 231 6 6 6
39659- 2 2 6 2 2 6 10 10 10 30 30 30
39660- 2 2 6 2 2 6 2 2 6 2 2 6
39661- 2 2 6 66 66 66 58 58 58 22 22 22
39662- 6 6 6 0 0 0 0 0 0 0 0 0
39663- 0 0 0 0 0 0 0 0 0 0 0 0
39664- 0 0 0 0 0 0 0 0 0 0 0 0
39665- 0 0 0 0 0 0 0 0 0 0 0 0
39666- 0 0 0 0 0 0 0 0 0 0 0 0
39667- 0 0 0 0 0 0 0 0 0 0 0 0
39668- 0 0 0 0 0 0 0 0 0 10 10 10
39669- 38 38 38 78 78 78 6 6 6 2 2 6
39670- 2 2 6 46 46 46 14 14 14 42 42 42
39671-246 246 246 253 253 253 253 253 253 253 253 253
39672-253 253 253 253 253 253 253 253 253 253 253 253
39673-253 253 253 253 253 253 231 231 231 242 242 242
39674-253 253 253 253 253 253 253 253 253 253 253 253
39675-253 253 253 253 253 253 253 253 253 253 253 253
39676-253 253 253 253 253 253 253 253 253 253 253 253
39677-253 253 253 253 253 253 253 253 253 253 253 253
39678-253 253 253 253 253 253 234 234 234 10 10 10
39679- 2 2 6 2 2 6 22 22 22 14 14 14
39680- 2 2 6 2 2 6 2 2 6 2 2 6
39681- 2 2 6 66 66 66 62 62 62 22 22 22
39682- 6 6 6 0 0 0 0 0 0 0 0 0
39683- 0 0 0 0 0 0 0 0 0 0 0 0
39684- 0 0 0 0 0 0 0 0 0 0 0 0
39685- 0 0 0 0 0 0 0 0 0 0 0 0
39686- 0 0 0 0 0 0 0 0 0 0 0 0
39687- 0 0 0 0 0 0 0 0 0 0 0 0
39688- 0 0 0 0 0 0 6 6 6 18 18 18
39689- 50 50 50 74 74 74 2 2 6 2 2 6
39690- 14 14 14 70 70 70 34 34 34 62 62 62
39691-250 250 250 253 253 253 253 253 253 253 253 253
39692-253 253 253 253 253 253 253 253 253 253 253 253
39693-253 253 253 253 253 253 231 231 231 246 246 246
39694-253 253 253 253 253 253 253 253 253 253 253 253
39695-253 253 253 253 253 253 253 253 253 253 253 253
39696-253 253 253 253 253 253 253 253 253 253 253 253
39697-253 253 253 253 253 253 253 253 253 253 253 253
39698-253 253 253 253 253 253 234 234 234 14 14 14
39699- 2 2 6 2 2 6 30 30 30 2 2 6
39700- 2 2 6 2 2 6 2 2 6 2 2 6
39701- 2 2 6 66 66 66 62 62 62 22 22 22
39702- 6 6 6 0 0 0 0 0 0 0 0 0
39703- 0 0 0 0 0 0 0 0 0 0 0 0
39704- 0 0 0 0 0 0 0 0 0 0 0 0
39705- 0 0 0 0 0 0 0 0 0 0 0 0
39706- 0 0 0 0 0 0 0 0 0 0 0 0
39707- 0 0 0 0 0 0 0 0 0 0 0 0
39708- 0 0 0 0 0 0 6 6 6 18 18 18
39709- 54 54 54 62 62 62 2 2 6 2 2 6
39710- 2 2 6 30 30 30 46 46 46 70 70 70
39711-250 250 250 253 253 253 253 253 253 253 253 253
39712-253 253 253 253 253 253 253 253 253 253 253 253
39713-253 253 253 253 253 253 231 231 231 246 246 246
39714-253 253 253 253 253 253 253 253 253 253 253 253
39715-253 253 253 253 253 253 253 253 253 253 253 253
39716-253 253 253 253 253 253 253 253 253 253 253 253
39717-253 253 253 253 253 253 253 253 253 253 253 253
39718-253 253 253 253 253 253 226 226 226 10 10 10
39719- 2 2 6 6 6 6 30 30 30 2 2 6
39720- 2 2 6 2 2 6 2 2 6 2 2 6
39721- 2 2 6 66 66 66 58 58 58 22 22 22
39722- 6 6 6 0 0 0 0 0 0 0 0 0
39723- 0 0 0 0 0 0 0 0 0 0 0 0
39724- 0 0 0 0 0 0 0 0 0 0 0 0
39725- 0 0 0 0 0 0 0 0 0 0 0 0
39726- 0 0 0 0 0 0 0 0 0 0 0 0
39727- 0 0 0 0 0 0 0 0 0 0 0 0
39728- 0 0 0 0 0 0 6 6 6 22 22 22
39729- 58 58 58 62 62 62 2 2 6 2 2 6
39730- 2 2 6 2 2 6 30 30 30 78 78 78
39731-250 250 250 253 253 253 253 253 253 253 253 253
39732-253 253 253 253 253 253 253 253 253 253 253 253
39733-253 253 253 253 253 253 231 231 231 246 246 246
39734-253 253 253 253 253 253 253 253 253 253 253 253
39735-253 253 253 253 253 253 253 253 253 253 253 253
39736-253 253 253 253 253 253 253 253 253 253 253 253
39737-253 253 253 253 253 253 253 253 253 253 253 253
39738-253 253 253 253 253 253 206 206 206 2 2 6
39739- 22 22 22 34 34 34 18 14 6 22 22 22
39740- 26 26 26 18 18 18 6 6 6 2 2 6
39741- 2 2 6 82 82 82 54 54 54 18 18 18
39742- 6 6 6 0 0 0 0 0 0 0 0 0
39743- 0 0 0 0 0 0 0 0 0 0 0 0
39744- 0 0 0 0 0 0 0 0 0 0 0 0
39745- 0 0 0 0 0 0 0 0 0 0 0 0
39746- 0 0 0 0 0 0 0 0 0 0 0 0
39747- 0 0 0 0 0 0 0 0 0 0 0 0
39748- 0 0 0 0 0 0 6 6 6 26 26 26
39749- 62 62 62 106 106 106 74 54 14 185 133 11
39750-210 162 10 121 92 8 6 6 6 62 62 62
39751-238 238 238 253 253 253 253 253 253 253 253 253
39752-253 253 253 253 253 253 253 253 253 253 253 253
39753-253 253 253 253 253 253 231 231 231 246 246 246
39754-253 253 253 253 253 253 253 253 253 253 253 253
39755-253 253 253 253 253 253 253 253 253 253 253 253
39756-253 253 253 253 253 253 253 253 253 253 253 253
39757-253 253 253 253 253 253 253 253 253 253 253 253
39758-253 253 253 253 253 253 158 158 158 18 18 18
39759- 14 14 14 2 2 6 2 2 6 2 2 6
39760- 6 6 6 18 18 18 66 66 66 38 38 38
39761- 6 6 6 94 94 94 50 50 50 18 18 18
39762- 6 6 6 0 0 0 0 0 0 0 0 0
39763- 0 0 0 0 0 0 0 0 0 0 0 0
39764- 0 0 0 0 0 0 0 0 0 0 0 0
39765- 0 0 0 0 0 0 0 0 0 0 0 0
39766- 0 0 0 0 0 0 0 0 0 0 0 0
39767- 0 0 0 0 0 0 0 0 0 6 6 6
39768- 10 10 10 10 10 10 18 18 18 38 38 38
39769- 78 78 78 142 134 106 216 158 10 242 186 14
39770-246 190 14 246 190 14 156 118 10 10 10 10
39771- 90 90 90 238 238 238 253 253 253 253 253 253
39772-253 253 253 253 253 253 253 253 253 253 253 253
39773-253 253 253 253 253 253 231 231 231 250 250 250
39774-253 253 253 253 253 253 253 253 253 253 253 253
39775-253 253 253 253 253 253 253 253 253 253 253 253
39776-253 253 253 253 253 253 253 253 253 253 253 253
39777-253 253 253 253 253 253 253 253 253 246 230 190
39778-238 204 91 238 204 91 181 142 44 37 26 9
39779- 2 2 6 2 2 6 2 2 6 2 2 6
39780- 2 2 6 2 2 6 38 38 38 46 46 46
39781- 26 26 26 106 106 106 54 54 54 18 18 18
39782- 6 6 6 0 0 0 0 0 0 0 0 0
39783- 0 0 0 0 0 0 0 0 0 0 0 0
39784- 0 0 0 0 0 0 0 0 0 0 0 0
39785- 0 0 0 0 0 0 0 0 0 0 0 0
39786- 0 0 0 0 0 0 0 0 0 0 0 0
39787- 0 0 0 6 6 6 14 14 14 22 22 22
39788- 30 30 30 38 38 38 50 50 50 70 70 70
39789-106 106 106 190 142 34 226 170 11 242 186 14
39790-246 190 14 246 190 14 246 190 14 154 114 10
39791- 6 6 6 74 74 74 226 226 226 253 253 253
39792-253 253 253 253 253 253 253 253 253 253 253 253
39793-253 253 253 253 253 253 231 231 231 250 250 250
39794-253 253 253 253 253 253 253 253 253 253 253 253
39795-253 253 253 253 253 253 253 253 253 253 253 253
39796-253 253 253 253 253 253 253 253 253 253 253 253
39797-253 253 253 253 253 253 253 253 253 228 184 62
39798-241 196 14 241 208 19 232 195 16 38 30 10
39799- 2 2 6 2 2 6 2 2 6 2 2 6
39800- 2 2 6 6 6 6 30 30 30 26 26 26
39801-203 166 17 154 142 90 66 66 66 26 26 26
39802- 6 6 6 0 0 0 0 0 0 0 0 0
39803- 0 0 0 0 0 0 0 0 0 0 0 0
39804- 0 0 0 0 0 0 0 0 0 0 0 0
39805- 0 0 0 0 0 0 0 0 0 0 0 0
39806- 0 0 0 0 0 0 0 0 0 0 0 0
39807- 6 6 6 18 18 18 38 38 38 58 58 58
39808- 78 78 78 86 86 86 101 101 101 123 123 123
39809-175 146 61 210 150 10 234 174 13 246 186 14
39810-246 190 14 246 190 14 246 190 14 238 190 10
39811-102 78 10 2 2 6 46 46 46 198 198 198
39812-253 253 253 253 253 253 253 253 253 253 253 253
39813-253 253 253 253 253 253 234 234 234 242 242 242
39814-253 253 253 253 253 253 253 253 253 253 253 253
39815-253 253 253 253 253 253 253 253 253 253 253 253
39816-253 253 253 253 253 253 253 253 253 253 253 253
39817-253 253 253 253 253 253 253 253 253 224 178 62
39818-242 186 14 241 196 14 210 166 10 22 18 6
39819- 2 2 6 2 2 6 2 2 6 2 2 6
39820- 2 2 6 2 2 6 6 6 6 121 92 8
39821-238 202 15 232 195 16 82 82 82 34 34 34
39822- 10 10 10 0 0 0 0 0 0 0 0 0
39823- 0 0 0 0 0 0 0 0 0 0 0 0
39824- 0 0 0 0 0 0 0 0 0 0 0 0
39825- 0 0 0 0 0 0 0 0 0 0 0 0
39826- 0 0 0 0 0 0 0 0 0 0 0 0
39827- 14 14 14 38 38 38 70 70 70 154 122 46
39828-190 142 34 200 144 11 197 138 11 197 138 11
39829-213 154 11 226 170 11 242 186 14 246 190 14
39830-246 190 14 246 190 14 246 190 14 246 190 14
39831-225 175 15 46 32 6 2 2 6 22 22 22
39832-158 158 158 250 250 250 253 253 253 253 253 253
39833-253 253 253 253 253 253 253 253 253 253 253 253
39834-253 253 253 253 253 253 253 253 253 253 253 253
39835-253 253 253 253 253 253 253 253 253 253 253 253
39836-253 253 253 253 253 253 253 253 253 253 253 253
39837-253 253 253 250 250 250 242 242 242 224 178 62
39838-239 182 13 236 186 11 213 154 11 46 32 6
39839- 2 2 6 2 2 6 2 2 6 2 2 6
39840- 2 2 6 2 2 6 61 42 6 225 175 15
39841-238 190 10 236 186 11 112 100 78 42 42 42
39842- 14 14 14 0 0 0 0 0 0 0 0 0
39843- 0 0 0 0 0 0 0 0 0 0 0 0
39844- 0 0 0 0 0 0 0 0 0 0 0 0
39845- 0 0 0 0 0 0 0 0 0 0 0 0
39846- 0 0 0 0 0 0 0 0 0 6 6 6
39847- 22 22 22 54 54 54 154 122 46 213 154 11
39848-226 170 11 230 174 11 226 170 11 226 170 11
39849-236 178 12 242 186 14 246 190 14 246 190 14
39850-246 190 14 246 190 14 246 190 14 246 190 14
39851-241 196 14 184 144 12 10 10 10 2 2 6
39852- 6 6 6 116 116 116 242 242 242 253 253 253
39853-253 253 253 253 253 253 253 253 253 253 253 253
39854-253 253 253 253 253 253 253 253 253 253 253 253
39855-253 253 253 253 253 253 253 253 253 253 253 253
39856-253 253 253 253 253 253 253 253 253 253 253 253
39857-253 253 253 231 231 231 198 198 198 214 170 54
39858-236 178 12 236 178 12 210 150 10 137 92 6
39859- 18 14 6 2 2 6 2 2 6 2 2 6
39860- 6 6 6 70 47 6 200 144 11 236 178 12
39861-239 182 13 239 182 13 124 112 88 58 58 58
39862- 22 22 22 6 6 6 0 0 0 0 0 0
39863- 0 0 0 0 0 0 0 0 0 0 0 0
39864- 0 0 0 0 0 0 0 0 0 0 0 0
39865- 0 0 0 0 0 0 0 0 0 0 0 0
39866- 0 0 0 0 0 0 0 0 0 10 10 10
39867- 30 30 30 70 70 70 180 133 36 226 170 11
39868-239 182 13 242 186 14 242 186 14 246 186 14
39869-246 190 14 246 190 14 246 190 14 246 190 14
39870-246 190 14 246 190 14 246 190 14 246 190 14
39871-246 190 14 232 195 16 98 70 6 2 2 6
39872- 2 2 6 2 2 6 66 66 66 221 221 221
39873-253 253 253 253 253 253 253 253 253 253 253 253
39874-253 253 253 253 253 253 253 253 253 253 253 253
39875-253 253 253 253 253 253 253 253 253 253 253 253
39876-253 253 253 253 253 253 253 253 253 253 253 253
39877-253 253 253 206 206 206 198 198 198 214 166 58
39878-230 174 11 230 174 11 216 158 10 192 133 9
39879-163 110 8 116 81 8 102 78 10 116 81 8
39880-167 114 7 197 138 11 226 170 11 239 182 13
39881-242 186 14 242 186 14 162 146 94 78 78 78
39882- 34 34 34 14 14 14 6 6 6 0 0 0
39883- 0 0 0 0 0 0 0 0 0 0 0 0
39884- 0 0 0 0 0 0 0 0 0 0 0 0
39885- 0 0 0 0 0 0 0 0 0 0 0 0
39886- 0 0 0 0 0 0 0 0 0 6 6 6
39887- 30 30 30 78 78 78 190 142 34 226 170 11
39888-239 182 13 246 190 14 246 190 14 246 190 14
39889-246 190 14 246 190 14 246 190 14 246 190 14
39890-246 190 14 246 190 14 246 190 14 246 190 14
39891-246 190 14 241 196 14 203 166 17 22 18 6
39892- 2 2 6 2 2 6 2 2 6 38 38 38
39893-218 218 218 253 253 253 253 253 253 253 253 253
39894-253 253 253 253 253 253 253 253 253 253 253 253
39895-253 253 253 253 253 253 253 253 253 253 253 253
39896-253 253 253 253 253 253 253 253 253 253 253 253
39897-250 250 250 206 206 206 198 198 198 202 162 69
39898-226 170 11 236 178 12 224 166 10 210 150 10
39899-200 144 11 197 138 11 192 133 9 197 138 11
39900-210 150 10 226 170 11 242 186 14 246 190 14
39901-246 190 14 246 186 14 225 175 15 124 112 88
39902- 62 62 62 30 30 30 14 14 14 6 6 6
39903- 0 0 0 0 0 0 0 0 0 0 0 0
39904- 0 0 0 0 0 0 0 0 0 0 0 0
39905- 0 0 0 0 0 0 0 0 0 0 0 0
39906- 0 0 0 0 0 0 0 0 0 10 10 10
39907- 30 30 30 78 78 78 174 135 50 224 166 10
39908-239 182 13 246 190 14 246 190 14 246 190 14
39909-246 190 14 246 190 14 246 190 14 246 190 14
39910-246 190 14 246 190 14 246 190 14 246 190 14
39911-246 190 14 246 190 14 241 196 14 139 102 15
39912- 2 2 6 2 2 6 2 2 6 2 2 6
39913- 78 78 78 250 250 250 253 253 253 253 253 253
39914-253 253 253 253 253 253 253 253 253 253 253 253
39915-253 253 253 253 253 253 253 253 253 253 253 253
39916-253 253 253 253 253 253 253 253 253 253 253 253
39917-250 250 250 214 214 214 198 198 198 190 150 46
39918-219 162 10 236 178 12 234 174 13 224 166 10
39919-216 158 10 213 154 11 213 154 11 216 158 10
39920-226 170 11 239 182 13 246 190 14 246 190 14
39921-246 190 14 246 190 14 242 186 14 206 162 42
39922-101 101 101 58 58 58 30 30 30 14 14 14
39923- 6 6 6 0 0 0 0 0 0 0 0 0
39924- 0 0 0 0 0 0 0 0 0 0 0 0
39925- 0 0 0 0 0 0 0 0 0 0 0 0
39926- 0 0 0 0 0 0 0 0 0 10 10 10
39927- 30 30 30 74 74 74 174 135 50 216 158 10
39928-236 178 12 246 190 14 246 190 14 246 190 14
39929-246 190 14 246 190 14 246 190 14 246 190 14
39930-246 190 14 246 190 14 246 190 14 246 190 14
39931-246 190 14 246 190 14 241 196 14 226 184 13
39932- 61 42 6 2 2 6 2 2 6 2 2 6
39933- 22 22 22 238 238 238 253 253 253 253 253 253
39934-253 253 253 253 253 253 253 253 253 253 253 253
39935-253 253 253 253 253 253 253 253 253 253 253 253
39936-253 253 253 253 253 253 253 253 253 253 253 253
39937-253 253 253 226 226 226 187 187 187 180 133 36
39938-216 158 10 236 178 12 239 182 13 236 178 12
39939-230 174 11 226 170 11 226 170 11 230 174 11
39940-236 178 12 242 186 14 246 190 14 246 190 14
39941-246 190 14 246 190 14 246 186 14 239 182 13
39942-206 162 42 106 106 106 66 66 66 34 34 34
39943- 14 14 14 6 6 6 0 0 0 0 0 0
39944- 0 0 0 0 0 0 0 0 0 0 0 0
39945- 0 0 0 0 0 0 0 0 0 0 0 0
39946- 0 0 0 0 0 0 0 0 0 6 6 6
39947- 26 26 26 70 70 70 163 133 67 213 154 11
39948-236 178 12 246 190 14 246 190 14 246 190 14
39949-246 190 14 246 190 14 246 190 14 246 190 14
39950-246 190 14 246 190 14 246 190 14 246 190 14
39951-246 190 14 246 190 14 246 190 14 241 196 14
39952-190 146 13 18 14 6 2 2 6 2 2 6
39953- 46 46 46 246 246 246 253 253 253 253 253 253
39954-253 253 253 253 253 253 253 253 253 253 253 253
39955-253 253 253 253 253 253 253 253 253 253 253 253
39956-253 253 253 253 253 253 253 253 253 253 253 253
39957-253 253 253 221 221 221 86 86 86 156 107 11
39958-216 158 10 236 178 12 242 186 14 246 186 14
39959-242 186 14 239 182 13 239 182 13 242 186 14
39960-242 186 14 246 186 14 246 190 14 246 190 14
39961-246 190 14 246 190 14 246 190 14 246 190 14
39962-242 186 14 225 175 15 142 122 72 66 66 66
39963- 30 30 30 10 10 10 0 0 0 0 0 0
39964- 0 0 0 0 0 0 0 0 0 0 0 0
39965- 0 0 0 0 0 0 0 0 0 0 0 0
39966- 0 0 0 0 0 0 0 0 0 6 6 6
39967- 26 26 26 70 70 70 163 133 67 210 150 10
39968-236 178 12 246 190 14 246 190 14 246 190 14
39969-246 190 14 246 190 14 246 190 14 246 190 14
39970-246 190 14 246 190 14 246 190 14 246 190 14
39971-246 190 14 246 190 14 246 190 14 246 190 14
39972-232 195 16 121 92 8 34 34 34 106 106 106
39973-221 221 221 253 253 253 253 253 253 253 253 253
39974-253 253 253 253 253 253 253 253 253 253 253 253
39975-253 253 253 253 253 253 253 253 253 253 253 253
39976-253 253 253 253 253 253 253 253 253 253 253 253
39977-242 242 242 82 82 82 18 14 6 163 110 8
39978-216 158 10 236 178 12 242 186 14 246 190 14
39979-246 190 14 246 190 14 246 190 14 246 190 14
39980-246 190 14 246 190 14 246 190 14 246 190 14
39981-246 190 14 246 190 14 246 190 14 246 190 14
39982-246 190 14 246 190 14 242 186 14 163 133 67
39983- 46 46 46 18 18 18 6 6 6 0 0 0
39984- 0 0 0 0 0 0 0 0 0 0 0 0
39985- 0 0 0 0 0 0 0 0 0 0 0 0
39986- 0 0 0 0 0 0 0 0 0 10 10 10
39987- 30 30 30 78 78 78 163 133 67 210 150 10
39988-236 178 12 246 186 14 246 190 14 246 190 14
39989-246 190 14 246 190 14 246 190 14 246 190 14
39990-246 190 14 246 190 14 246 190 14 246 190 14
39991-246 190 14 246 190 14 246 190 14 246 190 14
39992-241 196 14 215 174 15 190 178 144 253 253 253
39993-253 253 253 253 253 253 253 253 253 253 253 253
39994-253 253 253 253 253 253 253 253 253 253 253 253
39995-253 253 253 253 253 253 253 253 253 253 253 253
39996-253 253 253 253 253 253 253 253 253 218 218 218
39997- 58 58 58 2 2 6 22 18 6 167 114 7
39998-216 158 10 236 178 12 246 186 14 246 190 14
39999-246 190 14 246 190 14 246 190 14 246 190 14
40000-246 190 14 246 190 14 246 190 14 246 190 14
40001-246 190 14 246 190 14 246 190 14 246 190 14
40002-246 190 14 246 186 14 242 186 14 190 150 46
40003- 54 54 54 22 22 22 6 6 6 0 0 0
40004- 0 0 0 0 0 0 0 0 0 0 0 0
40005- 0 0 0 0 0 0 0 0 0 0 0 0
40006- 0 0 0 0 0 0 0 0 0 14 14 14
40007- 38 38 38 86 86 86 180 133 36 213 154 11
40008-236 178 12 246 186 14 246 190 14 246 190 14
40009-246 190 14 246 190 14 246 190 14 246 190 14
40010-246 190 14 246 190 14 246 190 14 246 190 14
40011-246 190 14 246 190 14 246 190 14 246 190 14
40012-246 190 14 232 195 16 190 146 13 214 214 214
40013-253 253 253 253 253 253 253 253 253 253 253 253
40014-253 253 253 253 253 253 253 253 253 253 253 253
40015-253 253 253 253 253 253 253 253 253 253 253 253
40016-253 253 253 250 250 250 170 170 170 26 26 26
40017- 2 2 6 2 2 6 37 26 9 163 110 8
40018-219 162 10 239 182 13 246 186 14 246 190 14
40019-246 190 14 246 190 14 246 190 14 246 190 14
40020-246 190 14 246 190 14 246 190 14 246 190 14
40021-246 190 14 246 190 14 246 190 14 246 190 14
40022-246 186 14 236 178 12 224 166 10 142 122 72
40023- 46 46 46 18 18 18 6 6 6 0 0 0
40024- 0 0 0 0 0 0 0 0 0 0 0 0
40025- 0 0 0 0 0 0 0 0 0 0 0 0
40026- 0 0 0 0 0 0 6 6 6 18 18 18
40027- 50 50 50 109 106 95 192 133 9 224 166 10
40028-242 186 14 246 190 14 246 190 14 246 190 14
40029-246 190 14 246 190 14 246 190 14 246 190 14
40030-246 190 14 246 190 14 246 190 14 246 190 14
40031-246 190 14 246 190 14 246 190 14 246 190 14
40032-242 186 14 226 184 13 210 162 10 142 110 46
40033-226 226 226 253 253 253 253 253 253 253 253 253
40034-253 253 253 253 253 253 253 253 253 253 253 253
40035-253 253 253 253 253 253 253 253 253 253 253 253
40036-198 198 198 66 66 66 2 2 6 2 2 6
40037- 2 2 6 2 2 6 50 34 6 156 107 11
40038-219 162 10 239 182 13 246 186 14 246 190 14
40039-246 190 14 246 190 14 246 190 14 246 190 14
40040-246 190 14 246 190 14 246 190 14 246 190 14
40041-246 190 14 246 190 14 246 190 14 242 186 14
40042-234 174 13 213 154 11 154 122 46 66 66 66
40043- 30 30 30 10 10 10 0 0 0 0 0 0
40044- 0 0 0 0 0 0 0 0 0 0 0 0
40045- 0 0 0 0 0 0 0 0 0 0 0 0
40046- 0 0 0 0 0 0 6 6 6 22 22 22
40047- 58 58 58 154 121 60 206 145 10 234 174 13
40048-242 186 14 246 186 14 246 190 14 246 190 14
40049-246 190 14 246 190 14 246 190 14 246 190 14
40050-246 190 14 246 190 14 246 190 14 246 190 14
40051-246 190 14 246 190 14 246 190 14 246 190 14
40052-246 186 14 236 178 12 210 162 10 163 110 8
40053- 61 42 6 138 138 138 218 218 218 250 250 250
40054-253 253 253 253 253 253 253 253 253 250 250 250
40055-242 242 242 210 210 210 144 144 144 66 66 66
40056- 6 6 6 2 2 6 2 2 6 2 2 6
40057- 2 2 6 2 2 6 61 42 6 163 110 8
40058-216 158 10 236 178 12 246 190 14 246 190 14
40059-246 190 14 246 190 14 246 190 14 246 190 14
40060-246 190 14 246 190 14 246 190 14 246 190 14
40061-246 190 14 239 182 13 230 174 11 216 158 10
40062-190 142 34 124 112 88 70 70 70 38 38 38
40063- 18 18 18 6 6 6 0 0 0 0 0 0
40064- 0 0 0 0 0 0 0 0 0 0 0 0
40065- 0 0 0 0 0 0 0 0 0 0 0 0
40066- 0 0 0 0 0 0 6 6 6 22 22 22
40067- 62 62 62 168 124 44 206 145 10 224 166 10
40068-236 178 12 239 182 13 242 186 14 242 186 14
40069-246 186 14 246 190 14 246 190 14 246 190 14
40070-246 190 14 246 190 14 246 190 14 246 190 14
40071-246 190 14 246 190 14 246 190 14 246 190 14
40072-246 190 14 236 178 12 216 158 10 175 118 6
40073- 80 54 7 2 2 6 6 6 6 30 30 30
40074- 54 54 54 62 62 62 50 50 50 38 38 38
40075- 14 14 14 2 2 6 2 2 6 2 2 6
40076- 2 2 6 2 2 6 2 2 6 2 2 6
40077- 2 2 6 6 6 6 80 54 7 167 114 7
40078-213 154 11 236 178 12 246 190 14 246 190 14
40079-246 190 14 246 190 14 246 190 14 246 190 14
40080-246 190 14 242 186 14 239 182 13 239 182 13
40081-230 174 11 210 150 10 174 135 50 124 112 88
40082- 82 82 82 54 54 54 34 34 34 18 18 18
40083- 6 6 6 0 0 0 0 0 0 0 0 0
40084- 0 0 0 0 0 0 0 0 0 0 0 0
40085- 0 0 0 0 0 0 0 0 0 0 0 0
40086- 0 0 0 0 0 0 6 6 6 18 18 18
40087- 50 50 50 158 118 36 192 133 9 200 144 11
40088-216 158 10 219 162 10 224 166 10 226 170 11
40089-230 174 11 236 178 12 239 182 13 239 182 13
40090-242 186 14 246 186 14 246 190 14 246 190 14
40091-246 190 14 246 190 14 246 190 14 246 190 14
40092-246 186 14 230 174 11 210 150 10 163 110 8
40093-104 69 6 10 10 10 2 2 6 2 2 6
40094- 2 2 6 2 2 6 2 2 6 2 2 6
40095- 2 2 6 2 2 6 2 2 6 2 2 6
40096- 2 2 6 2 2 6 2 2 6 2 2 6
40097- 2 2 6 6 6 6 91 60 6 167 114 7
40098-206 145 10 230 174 11 242 186 14 246 190 14
40099-246 190 14 246 190 14 246 186 14 242 186 14
40100-239 182 13 230 174 11 224 166 10 213 154 11
40101-180 133 36 124 112 88 86 86 86 58 58 58
40102- 38 38 38 22 22 22 10 10 10 6 6 6
40103- 0 0 0 0 0 0 0 0 0 0 0 0
40104- 0 0 0 0 0 0 0 0 0 0 0 0
40105- 0 0 0 0 0 0 0 0 0 0 0 0
40106- 0 0 0 0 0 0 0 0 0 14 14 14
40107- 34 34 34 70 70 70 138 110 50 158 118 36
40108-167 114 7 180 123 7 192 133 9 197 138 11
40109-200 144 11 206 145 10 213 154 11 219 162 10
40110-224 166 10 230 174 11 239 182 13 242 186 14
40111-246 186 14 246 186 14 246 186 14 246 186 14
40112-239 182 13 216 158 10 185 133 11 152 99 6
40113-104 69 6 18 14 6 2 2 6 2 2 6
40114- 2 2 6 2 2 6 2 2 6 2 2 6
40115- 2 2 6 2 2 6 2 2 6 2 2 6
40116- 2 2 6 2 2 6 2 2 6 2 2 6
40117- 2 2 6 6 6 6 80 54 7 152 99 6
40118-192 133 9 219 162 10 236 178 12 239 182 13
40119-246 186 14 242 186 14 239 182 13 236 178 12
40120-224 166 10 206 145 10 192 133 9 154 121 60
40121- 94 94 94 62 62 62 42 42 42 22 22 22
40122- 14 14 14 6 6 6 0 0 0 0 0 0
40123- 0 0 0 0 0 0 0 0 0 0 0 0
40124- 0 0 0 0 0 0 0 0 0 0 0 0
40125- 0 0 0 0 0 0 0 0 0 0 0 0
40126- 0 0 0 0 0 0 0 0 0 6 6 6
40127- 18 18 18 34 34 34 58 58 58 78 78 78
40128-101 98 89 124 112 88 142 110 46 156 107 11
40129-163 110 8 167 114 7 175 118 6 180 123 7
40130-185 133 11 197 138 11 210 150 10 219 162 10
40131-226 170 11 236 178 12 236 178 12 234 174 13
40132-219 162 10 197 138 11 163 110 8 130 83 6
40133- 91 60 6 10 10 10 2 2 6 2 2 6
40134- 18 18 18 38 38 38 38 38 38 38 38 38
40135- 38 38 38 38 38 38 38 38 38 38 38 38
40136- 38 38 38 38 38 38 26 26 26 2 2 6
40137- 2 2 6 6 6 6 70 47 6 137 92 6
40138-175 118 6 200 144 11 219 162 10 230 174 11
40139-234 174 13 230 174 11 219 162 10 210 150 10
40140-192 133 9 163 110 8 124 112 88 82 82 82
40141- 50 50 50 30 30 30 14 14 14 6 6 6
40142- 0 0 0 0 0 0 0 0 0 0 0 0
40143- 0 0 0 0 0 0 0 0 0 0 0 0
40144- 0 0 0 0 0 0 0 0 0 0 0 0
40145- 0 0 0 0 0 0 0 0 0 0 0 0
40146- 0 0 0 0 0 0 0 0 0 0 0 0
40147- 6 6 6 14 14 14 22 22 22 34 34 34
40148- 42 42 42 58 58 58 74 74 74 86 86 86
40149-101 98 89 122 102 70 130 98 46 121 87 25
40150-137 92 6 152 99 6 163 110 8 180 123 7
40151-185 133 11 197 138 11 206 145 10 200 144 11
40152-180 123 7 156 107 11 130 83 6 104 69 6
40153- 50 34 6 54 54 54 110 110 110 101 98 89
40154- 86 86 86 82 82 82 78 78 78 78 78 78
40155- 78 78 78 78 78 78 78 78 78 78 78 78
40156- 78 78 78 82 82 82 86 86 86 94 94 94
40157-106 106 106 101 101 101 86 66 34 124 80 6
40158-156 107 11 180 123 7 192 133 9 200 144 11
40159-206 145 10 200 144 11 192 133 9 175 118 6
40160-139 102 15 109 106 95 70 70 70 42 42 42
40161- 22 22 22 10 10 10 0 0 0 0 0 0
40162- 0 0 0 0 0 0 0 0 0 0 0 0
40163- 0 0 0 0 0 0 0 0 0 0 0 0
40164- 0 0 0 0 0 0 0 0 0 0 0 0
40165- 0 0 0 0 0 0 0 0 0 0 0 0
40166- 0 0 0 0 0 0 0 0 0 0 0 0
40167- 0 0 0 0 0 0 6 6 6 10 10 10
40168- 14 14 14 22 22 22 30 30 30 38 38 38
40169- 50 50 50 62 62 62 74 74 74 90 90 90
40170-101 98 89 112 100 78 121 87 25 124 80 6
40171-137 92 6 152 99 6 152 99 6 152 99 6
40172-138 86 6 124 80 6 98 70 6 86 66 30
40173-101 98 89 82 82 82 58 58 58 46 46 46
40174- 38 38 38 34 34 34 34 34 34 34 34 34
40175- 34 34 34 34 34 34 34 34 34 34 34 34
40176- 34 34 34 34 34 34 38 38 38 42 42 42
40177- 54 54 54 82 82 82 94 86 76 91 60 6
40178-134 86 6 156 107 11 167 114 7 175 118 6
40179-175 118 6 167 114 7 152 99 6 121 87 25
40180-101 98 89 62 62 62 34 34 34 18 18 18
40181- 6 6 6 0 0 0 0 0 0 0 0 0
40182- 0 0 0 0 0 0 0 0 0 0 0 0
40183- 0 0 0 0 0 0 0 0 0 0 0 0
40184- 0 0 0 0 0 0 0 0 0 0 0 0
40185- 0 0 0 0 0 0 0 0 0 0 0 0
40186- 0 0 0 0 0 0 0 0 0 0 0 0
40187- 0 0 0 0 0 0 0 0 0 0 0 0
40188- 0 0 0 6 6 6 6 6 6 10 10 10
40189- 18 18 18 22 22 22 30 30 30 42 42 42
40190- 50 50 50 66 66 66 86 86 86 101 98 89
40191-106 86 58 98 70 6 104 69 6 104 69 6
40192-104 69 6 91 60 6 82 62 34 90 90 90
40193- 62 62 62 38 38 38 22 22 22 14 14 14
40194- 10 10 10 10 10 10 10 10 10 10 10 10
40195- 10 10 10 10 10 10 6 6 6 10 10 10
40196- 10 10 10 10 10 10 10 10 10 14 14 14
40197- 22 22 22 42 42 42 70 70 70 89 81 66
40198- 80 54 7 104 69 6 124 80 6 137 92 6
40199-134 86 6 116 81 8 100 82 52 86 86 86
40200- 58 58 58 30 30 30 14 14 14 6 6 6
40201- 0 0 0 0 0 0 0 0 0 0 0 0
40202- 0 0 0 0 0 0 0 0 0 0 0 0
40203- 0 0 0 0 0 0 0 0 0 0 0 0
40204- 0 0 0 0 0 0 0 0 0 0 0 0
40205- 0 0 0 0 0 0 0 0 0 0 0 0
40206- 0 0 0 0 0 0 0 0 0 0 0 0
40207- 0 0 0 0 0 0 0 0 0 0 0 0
40208- 0 0 0 0 0 0 0 0 0 0 0 0
40209- 0 0 0 6 6 6 10 10 10 14 14 14
40210- 18 18 18 26 26 26 38 38 38 54 54 54
40211- 70 70 70 86 86 86 94 86 76 89 81 66
40212- 89 81 66 86 86 86 74 74 74 50 50 50
40213- 30 30 30 14 14 14 6 6 6 0 0 0
40214- 0 0 0 0 0 0 0 0 0 0 0 0
40215- 0 0 0 0 0 0 0 0 0 0 0 0
40216- 0 0 0 0 0 0 0 0 0 0 0 0
40217- 6 6 6 18 18 18 34 34 34 58 58 58
40218- 82 82 82 89 81 66 89 81 66 89 81 66
40219- 94 86 66 94 86 76 74 74 74 50 50 50
40220- 26 26 26 14 14 14 6 6 6 0 0 0
40221- 0 0 0 0 0 0 0 0 0 0 0 0
40222- 0 0 0 0 0 0 0 0 0 0 0 0
40223- 0 0 0 0 0 0 0 0 0 0 0 0
40224- 0 0 0 0 0 0 0 0 0 0 0 0
40225- 0 0 0 0 0 0 0 0 0 0 0 0
40226- 0 0 0 0 0 0 0 0 0 0 0 0
40227- 0 0 0 0 0 0 0 0 0 0 0 0
40228- 0 0 0 0 0 0 0 0 0 0 0 0
40229- 0 0 0 0 0 0 0 0 0 0 0 0
40230- 6 6 6 6 6 6 14 14 14 18 18 18
40231- 30 30 30 38 38 38 46 46 46 54 54 54
40232- 50 50 50 42 42 42 30 30 30 18 18 18
40233- 10 10 10 0 0 0 0 0 0 0 0 0
40234- 0 0 0 0 0 0 0 0 0 0 0 0
40235- 0 0 0 0 0 0 0 0 0 0 0 0
40236- 0 0 0 0 0 0 0 0 0 0 0 0
40237- 0 0 0 6 6 6 14 14 14 26 26 26
40238- 38 38 38 50 50 50 58 58 58 58 58 58
40239- 54 54 54 42 42 42 30 30 30 18 18 18
40240- 10 10 10 0 0 0 0 0 0 0 0 0
40241- 0 0 0 0 0 0 0 0 0 0 0 0
40242- 0 0 0 0 0 0 0 0 0 0 0 0
40243- 0 0 0 0 0 0 0 0 0 0 0 0
40244- 0 0 0 0 0 0 0 0 0 0 0 0
40245- 0 0 0 0 0 0 0 0 0 0 0 0
40246- 0 0 0 0 0 0 0 0 0 0 0 0
40247- 0 0 0 0 0 0 0 0 0 0 0 0
40248- 0 0 0 0 0 0 0 0 0 0 0 0
40249- 0 0 0 0 0 0 0 0 0 0 0 0
40250- 0 0 0 0 0 0 0 0 0 6 6 6
40251- 6 6 6 10 10 10 14 14 14 18 18 18
40252- 18 18 18 14 14 14 10 10 10 6 6 6
40253- 0 0 0 0 0 0 0 0 0 0 0 0
40254- 0 0 0 0 0 0 0 0 0 0 0 0
40255- 0 0 0 0 0 0 0 0 0 0 0 0
40256- 0 0 0 0 0 0 0 0 0 0 0 0
40257- 0 0 0 0 0 0 0 0 0 6 6 6
40258- 14 14 14 18 18 18 22 22 22 22 22 22
40259- 18 18 18 14 14 14 10 10 10 6 6 6
40260- 0 0 0 0 0 0 0 0 0 0 0 0
40261- 0 0 0 0 0 0 0 0 0 0 0 0
40262- 0 0 0 0 0 0 0 0 0 0 0 0
40263- 0 0 0 0 0 0 0 0 0 0 0 0
40264- 0 0 0 0 0 0 0 0 0 0 0 0
40265+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40277+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40278+4 4 4 4 4 4
40279+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40291+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40292+4 4 4 4 4 4
40293+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40305+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40306+4 4 4 4 4 4
40307+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40319+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40320+4 4 4 4 4 4
40321+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40333+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40334+4 4 4 4 4 4
40335+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40347+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40348+4 4 4 4 4 4
40349+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40353+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
40354+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
40355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40358+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
40359+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40360+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
40361+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40362+4 4 4 4 4 4
40363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40367+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
40368+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
40369+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40372+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
40373+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
40374+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
40375+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40376+4 4 4 4 4 4
40377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40381+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
40382+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
40383+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40385+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40386+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
40387+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
40388+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
40389+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
40390+4 4 4 4 4 4
40391+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40394+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
40395+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
40396+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
40397+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
40398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40399+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40400+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
40401+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
40402+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
40403+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
40404+4 4 4 4 4 4
40405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40408+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
40409+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
40410+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
40411+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
40412+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
40413+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
40414+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
40415+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
40416+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
40417+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
40418+4 4 4 4 4 4
40419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
40422+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
40423+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
40424+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
40425+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
40426+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
40427+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
40428+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
40429+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
40430+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
40431+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
40432+4 4 4 4 4 4
40433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40435+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
40436+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
40437+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
40438+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
40439+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
40440+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
40441+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
40442+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
40443+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
40444+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
40445+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
40446+4 4 4 4 4 4
40447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40449+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
40450+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
40451+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
40452+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
40453+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
40454+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
40455+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
40456+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
40457+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
40458+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
40459+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
40460+4 4 4 4 4 4
40461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40463+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
40464+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
40465+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
40466+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
40467+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
40468+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
40469+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
40470+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
40471+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
40472+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
40473+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
40474+4 4 4 4 4 4
40475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40477+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
40478+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
40479+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
40480+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
40481+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
40482+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
40483+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
40484+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
40485+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
40486+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
40487+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
40488+4 4 4 4 4 4
40489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40490+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
40491+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
40492+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
40493+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
40494+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
40495+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
40496+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
40497+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
40498+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
40499+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
40500+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
40501+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
40502+4 4 4 4 4 4
40503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40504+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
40505+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
40506+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
40507+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
40508+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
40509+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
40510+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
40511+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
40512+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
40513+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
40514+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
40515+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
40516+0 0 0 4 4 4
40517+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
40518+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
40519+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
40520+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
40521+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
40522+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
40523+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
40524+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
40525+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
40526+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
40527+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
40528+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
40529+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
40530+2 0 0 0 0 0
40531+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
40532+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
40533+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
40534+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
40535+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
40536+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
40537+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
40538+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
40539+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
40540+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
40541+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
40542+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
40543+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
40544+37 38 37 0 0 0
40545+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40546+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
40547+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
40548+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
40549+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
40550+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
40551+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
40552+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
40553+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
40554+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
40555+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
40556+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
40557+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
40558+85 115 134 4 0 0
40559+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
40560+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
40561+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
40562+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
40563+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
40564+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
40565+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
40566+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
40567+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
40568+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
40569+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
40570+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
40571+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
40572+60 73 81 4 0 0
40573+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
40574+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
40575+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
40576+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
40577+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
40578+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
40579+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
40580+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
40581+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
40582+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
40583+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
40584+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
40585+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
40586+16 19 21 4 0 0
40587+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
40588+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
40589+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
40590+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
40591+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
40592+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
40593+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
40594+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
40595+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
40596+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
40597+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
40598+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
40599+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
40600+4 0 0 4 3 3
40601+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
40602+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
40603+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
40604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
40605+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
40606+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
40607+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
40608+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
40609+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
40610+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
40611+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
40612+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
40613+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
40614+3 2 2 4 4 4
40615+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
40616+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
40617+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
40618+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
40619+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
40620+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
40621+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
40622+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
40623+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
40624+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
40625+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
40626+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
40627+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
40628+4 4 4 4 4 4
40629+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
40630+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
40631+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
40632+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
40633+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
40634+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
40635+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
40636+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
40637+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
40638+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
40639+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
40640+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
40641+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
40642+4 4 4 4 4 4
40643+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
40644+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
40645+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
40646+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
40647+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
40648+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40649+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
40650+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
40651+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
40652+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
40653+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
40654+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
40655+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
40656+5 5 5 5 5 5
40657+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
40658+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
40659+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
40660+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
40661+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
40662+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40663+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
40664+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
40665+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
40666+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
40667+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
40668+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
40669+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
40670+5 5 5 4 4 4
40671+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
40672+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
40673+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
40674+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
40675+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40676+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
40677+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
40678+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
40679+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
40680+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
40681+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
40682+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
40683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40684+4 4 4 4 4 4
40685+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
40686+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
40687+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
40688+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
40689+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
40690+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40691+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40692+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
40693+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
40694+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
40695+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
40696+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
40697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40698+4 4 4 4 4 4
40699+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
40700+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
40701+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
40702+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
40703+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40704+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
40705+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
40706+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
40707+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
40708+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
40709+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
40710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40712+4 4 4 4 4 4
40713+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
40714+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
40715+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
40716+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
40717+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40718+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40719+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
40720+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
40721+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
40722+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
40723+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
40724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40726+4 4 4 4 4 4
40727+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
40728+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
40729+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
40730+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
40731+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40732+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
40733+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
40734+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
40735+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
40736+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
40737+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40740+4 4 4 4 4 4
40741+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
40742+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
40743+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
40744+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
40745+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
40746+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
40747+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
40748+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
40749+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
40750+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
40751+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
40752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40754+4 4 4 4 4 4
40755+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
40756+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
40757+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
40758+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
40759+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
40760+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
40761+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
40762+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
40763+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
40764+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
40765+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
40766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40768+4 4 4 4 4 4
40769+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
40770+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
40771+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
40772+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40773+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
40774+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
40775+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
40776+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
40777+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
40778+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
40779+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40782+4 4 4 4 4 4
40783+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
40784+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
40785+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
40786+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40787+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40788+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
40789+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
40790+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
40791+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
40792+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
40793+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40796+4 4 4 4 4 4
40797+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
40798+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
40799+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40800+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
40801+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40802+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
40803+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
40804+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
40805+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
40806+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
40807+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40810+4 4 4 4 4 4
40811+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
40812+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
40813+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40814+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
40815+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40816+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
40817+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
40818+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
40819+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40820+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40821+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40824+4 4 4 4 4 4
40825+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40826+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
40827+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
40828+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
40829+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
40830+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
40831+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
40832+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
40833+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40834+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40835+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40838+4 4 4 4 4 4
40839+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
40840+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
40841+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
40842+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
40843+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40844+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
40845+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
40846+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
40847+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40848+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40849+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40852+4 4 4 4 4 4
40853+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
40854+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
40855+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40856+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
40857+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
40858+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
40859+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
40860+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
40861+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40862+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40863+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40866+4 4 4 4 4 4
40867+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
40868+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
40869+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40870+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
40871+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
40872+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
40873+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
40874+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
40875+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
40876+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40877+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40880+4 4 4 4 4 4
40881+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40882+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
40883+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
40884+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
40885+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
40886+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
40887+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
40888+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
40889+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40890+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40891+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40894+4 4 4 4 4 4
40895+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
40896+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
40897+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40898+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
40899+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
40900+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
40901+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
40902+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
40903+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
40904+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40905+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40908+4 4 4 4 4 4
40909+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
40910+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
40911+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
40912+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
40913+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
40914+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
40915+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
40916+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
40917+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40918+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40919+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40922+4 4 4 4 4 4
40923+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40924+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
40925+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
40926+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
40927+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
40928+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
40929+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
40930+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
40931+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40932+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40933+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40936+4 4 4 4 4 4
40937+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40938+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
40939+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
40940+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
40941+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
40942+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
40943+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40944+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
40945+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
40946+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40947+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40950+4 4 4 4 4 4
40951+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40952+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
40953+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
40954+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40955+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
40956+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
40957+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
40958+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
40959+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
40960+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40961+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40963+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40964+4 4 4 4 4 4
40965+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
40966+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
40967+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
40968+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
40969+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
40970+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
40971+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
40972+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
40973+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
40974+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40975+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40977+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40978+4 4 4 4 4 4
40979+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
40980+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
40981+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
40982+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
40983+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
40984+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
40985+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
40986+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
40987+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
40988+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
40989+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
40990+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40991+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
40992+4 4 4 4 4 4
40993+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
40994+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
40995+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
40996+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
40997+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
40998+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
40999+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
41000+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
41001+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
41002+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41003+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41004+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41005+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41006+4 4 4 4 4 4
41007+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
41008+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
41009+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
41010+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
41011+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
41012+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
41013+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
41014+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
41015+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
41016+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
41017+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41018+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41019+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41020+4 4 4 4 4 4
41021+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
41022+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
41023+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
41024+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
41025+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
41026+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
41027+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
41028+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
41029+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
41030+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
41031+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41032+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41033+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41034+4 4 4 4 4 4
41035+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
41036+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41037+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
41038+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
41039+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
41040+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
41041+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
41042+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
41043+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
41044+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
41045+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41046+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41047+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41048+4 4 4 4 4 4
41049+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
41050+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
41051+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
41052+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
41053+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
41054+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
41055+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41056+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
41057+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
41058+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
41059+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41060+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41061+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41062+4 4 4 4 4 4
41063+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
41064+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
41065+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
41066+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
41067+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
41068+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
41069+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
41070+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
41071+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
41072+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
41073+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41074+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41075+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41076+4 4 4 4 4 4
41077+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
41078+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
41079+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41080+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
41081+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
41082+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
41083+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
41084+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
41085+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
41086+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
41087+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41088+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41089+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41090+4 4 4 4 4 4
41091+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41092+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
41093+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
41094+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
41095+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
41096+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
41097+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
41098+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
41099+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
41100+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41101+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41102+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41103+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41104+4 4 4 4 4 4
41105+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
41106+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
41107+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
41108+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
41109+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
41110+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
41111+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
41112+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
41113+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
41114+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
41115+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41116+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41117+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41118+4 4 4 4 4 4
41119+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
41120+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
41121+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
41122+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
41123+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
41124+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
41125+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
41126+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
41127+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
41128+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41129+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41130+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41131+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41132+4 4 4 4 4 4
41133+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
41134+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41135+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
41136+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
41137+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
41138+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
41139+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
41140+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
41141+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
41142+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41143+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41144+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41145+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41146+4 4 4 4 4 4
41147+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
41148+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
41149+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
41150+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
41151+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
41152+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
41153+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
41154+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
41155+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
41156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41157+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41158+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41159+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41160+4 4 4 4 4 4
41161+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41162+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
41163+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
41164+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
41165+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
41166+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
41167+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
41168+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
41169+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41171+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41172+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41173+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41174+4 4 4 4 4 4
41175+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
41176+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
41177+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41178+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
41179+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
41180+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
41181+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
41182+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
41183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41185+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41186+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41187+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41188+4 4 4 4 4 4
41189+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41190+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
41191+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
41192+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
41193+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
41194+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
41195+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
41196+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
41197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41199+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41200+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41201+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41202+4 4 4 4 4 4
41203+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41204+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
41205+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41206+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
41207+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
41208+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
41209+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
41210+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
41211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41213+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41214+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41215+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41216+4 4 4 4 4 4
41217+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41218+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
41219+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
41220+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
41221+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
41222+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
41223+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
41224+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
41225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41227+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41228+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41230+4 4 4 4 4 4
41231+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41232+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
41233+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
41234+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41235+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
41236+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
41237+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
41238+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41241+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41242+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41244+4 4 4 4 4 4
41245+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41247+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41248+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
41249+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
41250+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
41251+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
41252+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
41253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41255+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41256+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41258+4 4 4 4 4 4
41259+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41262+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41263+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
41264+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
41265+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
41266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41269+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41270+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41272+4 4 4 4 4 4
41273+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41276+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
41277+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
41278+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
41279+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
41280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41283+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41284+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41286+4 4 4 4 4 4
41287+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41290+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
41291+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
41292+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
41293+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
41294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41297+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41298+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41300+4 4 4 4 4 4
41301+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41304+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
41305+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
41306+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
41307+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
41308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41311+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41312+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41314+4 4 4 4 4 4
41315+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
41319+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
41320+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
41321+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
41322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41325+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41326+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41328+4 4 4 4 4 4
41329+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41333+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
41334+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
41335+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
41336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41339+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41340+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41342+4 4 4 4 4 4
41343+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41347+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
41348+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
41349+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41353+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41354+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41356+4 4 4 4 4 4
41357+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41361+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
41362+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
41363+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41367+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41368+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41370+4 4 4 4 4 4
41371+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41375+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
41376+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
41377+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41381+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41382+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
41384+4 4 4 4 4 4
fe2de317 41385diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
c6e2a6c8 41386index a159b63..4ab532d 100644
fe2de317
MT
41387--- a/drivers/video/udlfb.c
41388+++ b/drivers/video/udlfb.c
c6e2a6c8 41389@@ -620,11 +620,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
8308f9c9
MT
41390 dlfb_urb_completion(urb);
41391
41392 error:
41393- atomic_add(bytes_sent, &dev->bytes_sent);
41394- atomic_add(bytes_identical, &dev->bytes_identical);
41395- atomic_add(width*height*2, &dev->bytes_rendered);
41396+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41397+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41398+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
41399 end_cycles = get_cycles();
41400- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41401+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41402 >> 10)), /* Kcycles */
41403 &dev->cpu_kcycles_used);
41404
c6e2a6c8 41405@@ -745,11 +745,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
8308f9c9
MT
41406 dlfb_urb_completion(urb);
41407
41408 error:
41409- atomic_add(bytes_sent, &dev->bytes_sent);
41410- atomic_add(bytes_identical, &dev->bytes_identical);
41411- atomic_add(bytes_rendered, &dev->bytes_rendered);
41412+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
41413+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
41414+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
41415 end_cycles = get_cycles();
41416- atomic_add(((unsigned int) ((end_cycles - start_cycles)
41417+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
41418 >> 10)), /* Kcycles */
41419 &dev->cpu_kcycles_used);
41420 }
c6e2a6c8 41421@@ -1373,7 +1373,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
8308f9c9
MT
41422 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41423 struct dlfb_data *dev = fb_info->par;
41424 return snprintf(buf, PAGE_SIZE, "%u\n",
41425- atomic_read(&dev->bytes_rendered));
41426+ atomic_read_unchecked(&dev->bytes_rendered));
41427 }
41428
41429 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
c6e2a6c8 41430@@ -1381,7 +1381,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
8308f9c9
MT
41431 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41432 struct dlfb_data *dev = fb_info->par;
41433 return snprintf(buf, PAGE_SIZE, "%u\n",
41434- atomic_read(&dev->bytes_identical));
41435+ atomic_read_unchecked(&dev->bytes_identical));
41436 }
41437
41438 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
c6e2a6c8 41439@@ -1389,7 +1389,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
8308f9c9
MT
41440 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41441 struct dlfb_data *dev = fb_info->par;
41442 return snprintf(buf, PAGE_SIZE, "%u\n",
41443- atomic_read(&dev->bytes_sent));
41444+ atomic_read_unchecked(&dev->bytes_sent));
41445 }
41446
41447 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
c6e2a6c8 41448@@ -1397,7 +1397,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
8308f9c9
MT
41449 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41450 struct dlfb_data *dev = fb_info->par;
41451 return snprintf(buf, PAGE_SIZE, "%u\n",
41452- atomic_read(&dev->cpu_kcycles_used));
41453+ atomic_read_unchecked(&dev->cpu_kcycles_used));
41454 }
41455
41456 static ssize_t edid_show(
c6e2a6c8 41457@@ -1457,10 +1457,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
8308f9c9
MT
41458 struct fb_info *fb_info = dev_get_drvdata(fbdev);
41459 struct dlfb_data *dev = fb_info->par;
41460
41461- atomic_set(&dev->bytes_rendered, 0);
41462- atomic_set(&dev->bytes_identical, 0);
41463- atomic_set(&dev->bytes_sent, 0);
41464- atomic_set(&dev->cpu_kcycles_used, 0);
41465+ atomic_set_unchecked(&dev->bytes_rendered, 0);
41466+ atomic_set_unchecked(&dev->bytes_identical, 0);
41467+ atomic_set_unchecked(&dev->bytes_sent, 0);
41468+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
41469
41470 return count;
41471 }
fe2de317 41472diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
c6e2a6c8 41473index b0e2a42..e2df3ad 100644
fe2de317
MT
41474--- a/drivers/video/uvesafb.c
41475+++ b/drivers/video/uvesafb.c
df50ba0c 41476@@ -19,6 +19,7 @@
58c5fc13
MT
41477 #include <linux/io.h>
41478 #include <linux/mutex.h>
df50ba0c 41479 #include <linux/slab.h>
58c5fc13
MT
41480+#include <linux/moduleloader.h>
41481 #include <video/edid.h>
41482 #include <video/uvesafb.h>
41483 #ifdef CONFIG_X86
fe2de317 41484@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
58c5fc13
MT
41485 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
41486 par->pmi_setpal = par->ypan = 0;
41487 } else {
41488+
41489+#ifdef CONFIG_PAX_KERNEXEC
41490+#ifdef CONFIG_MODULES
58c5fc13
MT
41491+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
41492+#endif
41493+ if (!par->pmi_code) {
41494+ par->pmi_setpal = par->ypan = 0;
41495+ return 0;
41496+ }
41497+#endif
41498+
41499 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
41500 + task->t.regs.edi);
41501+
41502+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
ae4e228f 41503+ pax_open_kernel();
58c5fc13 41504+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
ae4e228f 41505+ pax_close_kernel();
58c5fc13
MT
41506+
41507+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
41508+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
41509+#else
41510 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
41511 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
41512+#endif
41513+
41514 printk(KERN_INFO "uvesafb: protected mode interface info at "
41515 "%04x:%04x\n",
41516 (u16)task->t.regs.es, (u16)task->t.regs.edi);
5e856224
MT
41517@@ -816,13 +839,14 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
41518 par->ypan = ypan;
41519
41520 if (par->pmi_setpal || par->ypan) {
41521+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
41522 if (__supported_pte_mask & _PAGE_NX) {
41523 par->pmi_setpal = par->ypan = 0;
41524 printk(KERN_WARNING "uvesafb: NX protection is actively."
41525 "We have better not to use the PMI.\n");
41526- } else {
41527+ } else
41528+#endif
41529 uvesafb_vbe_getpmi(task, par);
41530- }
41531 }
41532 #else
41533 /* The protected mode interface is not available on non-x86. */
c6e2a6c8 41534@@ -1836,6 +1860,11 @@ out:
58c5fc13
MT
41535 if (par->vbe_modes)
41536 kfree(par->vbe_modes);
41537
41538+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41539+ if (par->pmi_code)
41540+ module_free_exec(NULL, par->pmi_code);
41541+#endif
41542+
41543 framebuffer_release(info);
41544 return err;
41545 }
c6e2a6c8 41546@@ -1862,6 +1891,12 @@ static int uvesafb_remove(struct platform_device *dev)
58c5fc13
MT
41547 kfree(par->vbe_state_orig);
41548 if (par->vbe_state_saved)
41549 kfree(par->vbe_state_saved);
41550+
41551+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41552+ if (par->pmi_code)
41553+ module_free_exec(NULL, par->pmi_code);
41554+#endif
41555+
41556 }
41557
41558 framebuffer_release(info);
fe2de317
MT
41559diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
41560index 501b340..86bd4cf 100644
41561--- a/drivers/video/vesafb.c
41562+++ b/drivers/video/vesafb.c
58c5fc13
MT
41563@@ -9,6 +9,7 @@
41564 */
41565
41566 #include <linux/module.h>
41567+#include <linux/moduleloader.h>
41568 #include <linux/kernel.h>
41569 #include <linux/errno.h>
41570 #include <linux/string.h>
fe2de317 41571@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
58c5fc13
MT
41572 static int vram_total __initdata; /* Set total amount of memory */
41573 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
41574 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
41575-static void (*pmi_start)(void) __read_mostly;
41576-static void (*pmi_pal) (void) __read_mostly;
41577+static void (*pmi_start)(void) __read_only;
41578+static void (*pmi_pal) (void) __read_only;
41579 static int depth __read_mostly;
41580 static int vga_compat __read_mostly;
41581 /* --------------------------------------------------------------------- */
fe2de317 41582@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
41583 unsigned int size_vmode;
41584 unsigned int size_remap;
41585 unsigned int size_total;
41586+ void *pmi_code = NULL;
41587
41588 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
41589 return -ENODEV;
fe2de317 41590@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
41591 size_remap = size_total;
41592 vesafb_fix.smem_len = size_remap;
41593
41594-#ifndef __i386__
41595- screen_info.vesapm_seg = 0;
41596-#endif
41597-
41598 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
41599 printk(KERN_WARNING
41600 "vesafb: cannot reserve video memory at 0x%lx\n",
fe2de317 41601@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
41602 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
41603 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
41604
41605+#ifdef __i386__
41606+
41607+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41608+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
41609+ if (!pmi_code)
41610+#elif !defined(CONFIG_PAX_KERNEXEC)
41611+ if (0)
41612+#endif
41613+
41614+#endif
41615+ screen_info.vesapm_seg = 0;
41616+
41617 if (screen_info.vesapm_seg) {
41618- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
41619- screen_info.vesapm_seg,screen_info.vesapm_off);
41620+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
41621+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
41622 }
41623
41624 if (screen_info.vesapm_seg < 0xc000)
fe2de317 41625@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
41626
41627 if (ypan || pmi_setpal) {
41628 unsigned short *pmi_base;
15a11c5b
MT
41629+
41630 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
58c5fc13
MT
41631- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
41632- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
41633+
58c5fc13 41634+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
ae4e228f 41635+ pax_open_kernel();
58c5fc13
MT
41636+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
41637+#else
15a11c5b 41638+ pmi_code = pmi_base;
58c5fc13
MT
41639+#endif
41640+
41641+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
41642+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
41643+
41644+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41645+ pmi_start = ktva_ktla(pmi_start);
41646+ pmi_pal = ktva_ktla(pmi_pal);
ae4e228f 41647+ pax_close_kernel();
58c5fc13
MT
41648+#endif
41649+
41650 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
41651 if (pmi_base[3]) {
41652 printk(KERN_INFO "vesafb: pmi: ports = ");
fe2de317 41653@@ -488,6 +514,11 @@ static int __init vesafb_probe(struct platform_device *dev)
58c5fc13
MT
41654 info->node, info->fix.id);
41655 return 0;
41656 err:
41657+
41658+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
41659+ module_free_exec(NULL, pmi_code);
41660+#endif
41661+
41662 if (info->screen_base)
41663 iounmap(info->screen_base);
41664 framebuffer_release(info);
fe2de317
MT
41665diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
41666index 88714ae..16c2e11 100644
41667--- a/drivers/video/via/via_clock.h
41668+++ b/drivers/video/via/via_clock.h
15a11c5b
MT
41669@@ -56,7 +56,7 @@ struct via_clock {
41670
41671 void (*set_engine_pll_state)(u8 state);
41672 void (*set_engine_pll)(struct via_pll_config config);
41673-};
41674+} __no_const;
41675
41676
41677 static inline u32 get_pll_internal_frequency(u32 ref_freq,
fe2de317
MT
41678diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
41679index e56c934..fc22f4b 100644
41680--- a/drivers/xen/xen-pciback/conf_space.h
41681+++ b/drivers/xen/xen-pciback/conf_space.h
6e9df6a3
MT
41682@@ -44,15 +44,15 @@ struct config_field {
41683 struct {
41684 conf_dword_write write;
41685 conf_dword_read read;
41686- } dw;
41687+ } __no_const dw;
41688 struct {
41689 conf_word_write write;
41690 conf_word_read read;
41691- } w;
41692+ } __no_const w;
41693 struct {
41694 conf_byte_write write;
41695 conf_byte_read read;
41696- } b;
41697+ } __no_const b;
41698 } u;
41699 struct list_head list;
41700 };
fe2de317 41701diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
5e856224 41702index 014c8dd..6f3dfe6 100644
fe2de317
MT
41703--- a/fs/9p/vfs_inode.c
41704+++ b/fs/9p/vfs_inode.c
5e856224 41705@@ -1303,7 +1303,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
16454cff 41706 void
58c5fc13
MT
41707 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
41708 {
41709- char *s = nd_get_link(nd);
41710+ const char *s = nd_get_link(nd);
41711
5e856224
MT
41712 p9_debug(P9_DEBUG_VFS, " %s %s\n",
41713 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
fe2de317 41714diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
5e856224 41715index e95d1b6..3454244 100644
fe2de317
MT
41716--- a/fs/Kconfig.binfmt
41717+++ b/fs/Kconfig.binfmt
5e856224 41718@@ -89,7 +89,7 @@ config HAVE_AOUT
fe2de317
MT
41719
41720 config BINFMT_AOUT
41721 tristate "Kernel support for a.out and ECOFF binaries"
41722- depends on HAVE_AOUT
41723+ depends on HAVE_AOUT && BROKEN
41724 ---help---
41725 A.out (Assembler.OUTput) is a set of formats for libraries and
41726 executables used in the earliest versions of UNIX. Linux used
41727diff --git a/fs/aio.c b/fs/aio.c
c6e2a6c8 41728index e7f2fad..15ad8a4 100644
fe2de317
MT
41729--- a/fs/aio.c
41730+++ b/fs/aio.c
c6e2a6c8 41731@@ -118,7 +118,7 @@ static int aio_setup_ring(struct kioctx *ctx)
58c5fc13
MT
41732 size += sizeof(struct io_event) * nr_events;
41733 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
41734
41735- if (nr_pages < 0)
41736+ if (nr_pages <= 0)
41737 return -EINVAL;
41738
41739 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
c6e2a6c8 41740@@ -1440,18 +1440,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
15a11c5b
MT
41741 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41742 {
41743 ssize_t ret;
41744+ struct iovec iovstack;
41745
41746 #ifdef CONFIG_COMPAT
41747 if (compat)
41748 ret = compat_rw_copy_check_uvector(type,
41749 (struct compat_iovec __user *)kiocb->ki_buf,
41750- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41751+ kiocb->ki_nbytes, 1, &iovstack,
4c928ab7 41752 &kiocb->ki_iovec, 1);
15a11c5b
MT
41753 else
41754 #endif
41755 ret = rw_copy_check_uvector(type,
41756 (struct iovec __user *)kiocb->ki_buf,
41757- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
41758+ kiocb->ki_nbytes, 1, &iovstack,
4c928ab7 41759 &kiocb->ki_iovec, 1);
15a11c5b
MT
41760 if (ret < 0)
41761 goto out;
c6e2a6c8
MT
41762@@ -1460,6 +1461,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
41763 if (ret < 0)
41764 goto out;
15a11c5b
MT
41765
41766+ if (kiocb->ki_iovec == &iovstack) {
41767+ kiocb->ki_inline_vec = iovstack;
41768+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
41769+ }
41770 kiocb->ki_nr_segs = kiocb->ki_nbytes;
41771 kiocb->ki_cur_seg = 0;
41772 /* ki_nbytes/left now reflect bytes instead of segs */
fe2de317 41773diff --git a/fs/attr.c b/fs/attr.c
c6e2a6c8 41774index d94d1b6..f9bccd6 100644
fe2de317
MT
41775--- a/fs/attr.c
41776+++ b/fs/attr.c
4c928ab7 41777@@ -99,6 +99,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
ae4e228f
MT
41778 unsigned long limit;
41779
df50ba0c 41780 limit = rlimit(RLIMIT_FSIZE);
ae4e228f
MT
41781+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
41782 if (limit != RLIM_INFINITY && offset > limit)
41783 goto out_sig;
41784 if (offset > inode->i_sb->s_maxbytes)
fe2de317 41785diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
c6e2a6c8 41786index da8876d..9f3e6d8 100644
fe2de317
MT
41787--- a/fs/autofs4/waitq.c
41788+++ b/fs/autofs4/waitq.c
5e856224 41789@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
6e9df6a3
MT
41790 {
41791 unsigned long sigpipe, flags;
41792 mm_segment_t fs;
41793- const char *data = (const char *)addr;
41794+ const char __user *data = (const char __force_user *)addr;
41795 ssize_t wr = 0;
41796
5e856224 41797 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
fe2de317 41798diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
c6e2a6c8 41799index e18da23..affc30e 100644
fe2de317
MT
41800--- a/fs/befs/linuxvfs.c
41801+++ b/fs/befs/linuxvfs.c
5e856224 41802@@ -502,7 +502,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58c5fc13
MT
41803 {
41804 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
41805 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
41806- char *link = nd_get_link(nd);
41807+ const char *link = nd_get_link(nd);
41808 if (!IS_ERR(link))
41809 kfree(link);
41810 }
fe2de317 41811diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
c6e2a6c8 41812index d146e18..12d1bd1 100644
fe2de317
MT
41813--- a/fs/binfmt_aout.c
41814+++ b/fs/binfmt_aout.c
58c5fc13
MT
41815@@ -16,6 +16,7 @@
41816 #include <linux/string.h>
41817 #include <linux/fs.h>
41818 #include <linux/file.h>
41819+#include <linux/security.h>
41820 #include <linux/stat.h>
41821 #include <linux/fcntl.h>
41822 #include <linux/ptrace.h>
c6e2a6c8 41823@@ -83,6 +84,8 @@ static int aout_core_dump(struct coredump_params *cprm)
6892158b
MT
41824 #endif
41825 # define START_STACK(u) ((void __user *)u.start_stack)
41826
41827+ memset(&dump, 0, sizeof(dump));
41828+
41829 fs = get_fs();
41830 set_fs(KERNEL_DS);
41831 has_dumped = 1;
c6e2a6c8 41832@@ -94,10 +97,12 @@ static int aout_core_dump(struct coredump_params *cprm)
58c5fc13
MT
41833
41834 /* If the size of the dump file exceeds the rlimit, then see what would happen
41835 if we wrote the stack, but not the data area. */
41836+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
ae4e228f 41837 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
58c5fc13
MT
41838 dump.u_dsize = 0;
41839
41840 /* Make sure we have enough room to write the stack and data areas. */
41841+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
ae4e228f 41842 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
58c5fc13
MT
41843 dump.u_ssize = 0;
41844
c6e2a6c8 41845@@ -231,6 +236,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
df50ba0c 41846 rlim = rlimit(RLIMIT_DATA);
58c5fc13
MT
41847 if (rlim >= RLIM_INFINITY)
41848 rlim = ~0;
41849+
41850+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
41851 if (ex.a_data + ex.a_bss > rlim)
41852 return -ENOMEM;
41853
c6e2a6c8
MT
41854@@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41855
58c5fc13 41856 install_exec_creds(bprm);
58c5fc13
MT
41857
41858+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41859+ current->mm->pax_flags = 0UL;
41860+#endif
41861+
41862+#ifdef CONFIG_PAX_PAGEEXEC
41863+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
41864+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
41865+
41866+#ifdef CONFIG_PAX_EMUTRAMP
41867+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
41868+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
41869+#endif
41870+
41871+#ifdef CONFIG_PAX_MPROTECT
41872+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
41873+ current->mm->pax_flags |= MF_PAX_MPROTECT;
41874+#endif
41875+
41876+ }
41877+#endif
41878+
41879 if (N_MAGIC(ex) == OMAGIC) {
41880 unsigned long text_addr, map_size;
41881 loff_t pos;
c6e2a6c8
MT
41882@@ -330,7 +358,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
41883 }
58c5fc13 41884
c6e2a6c8 41885 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
58c5fc13
MT
41886- PROT_READ | PROT_WRITE | PROT_EXEC,
41887+ PROT_READ | PROT_WRITE,
41888 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
41889 fd_offset + ex.a_text);
c6e2a6c8 41890 if (error != N_DATADDR(ex)) {
fe2de317 41891diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
572b4308 41892index 16f7354..7cc1e24 100644
fe2de317
MT
41893--- a/fs/binfmt_elf.c
41894+++ b/fs/binfmt_elf.c
4c928ab7
MT
41895@@ -32,6 +32,7 @@
41896 #include <linux/elf.h>
41897 #include <linux/utsname.h>
41898 #include <linux/coredump.h>
41899+#include <linux/xattr.h>
41900 #include <asm/uaccess.h>
41901 #include <asm/param.h>
41902 #include <asm/page.h>
c6e2a6c8 41903@@ -52,6 +53,10 @@ static int elf_core_dump(struct coredump_params *cprm);
58c5fc13
MT
41904 #define elf_core_dump NULL
41905 #endif
41906
41907+#ifdef CONFIG_PAX_MPROTECT
41908+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
41909+#endif
41910+
41911 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
41912 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
41913 #else
c6e2a6c8 41914@@ -71,6 +76,11 @@ static struct linux_binfmt elf_format = {
16454cff
MT
41915 .load_binary = load_elf_binary,
41916 .load_shlib = load_elf_library,
41917 .core_dump = elf_core_dump,
58c5fc13
MT
41918+
41919+#ifdef CONFIG_PAX_MPROTECT
4c928ab7 41920+ .handle_mprotect= elf_handle_mprotect,
58c5fc13
MT
41921+#endif
41922+
16454cff 41923 .min_coredump = ELF_EXEC_PAGESIZE,
58c5fc13 41924 };
16454cff 41925
c6e2a6c8 41926@@ -78,6 +88,8 @@ static struct linux_binfmt elf_format = {
58c5fc13
MT
41927
41928 static int set_brk(unsigned long start, unsigned long end)
41929 {
41930+ unsigned long e = end;
41931+
41932 start = ELF_PAGEALIGN(start);
41933 end = ELF_PAGEALIGN(end);
41934 if (end > start) {
c6e2a6c8 41935@@ -86,7 +98,7 @@ static int set_brk(unsigned long start, unsigned long end)
58c5fc13
MT
41936 if (BAD_ADDR(addr))
41937 return addr;
41938 }
41939- current->mm->start_brk = current->mm->brk = end;
41940+ current->mm->start_brk = current->mm->brk = e;
41941 return 0;
41942 }
41943
c6e2a6c8 41944@@ -147,12 +159,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58c5fc13
MT
41945 elf_addr_t __user *u_rand_bytes;
41946 const char *k_platform = ELF_PLATFORM;
41947 const char *k_base_platform = ELF_BASE_PLATFORM;
41948- unsigned char k_rand_bytes[16];
41949+ u32 k_rand_bytes[4];
41950 int items;
41951 elf_addr_t *elf_info;
41952 int ei_index = 0;
71d190be
MT
41953 const struct cred *cred = current_cred();
41954 struct vm_area_struct *vma;
41955+ unsigned long saved_auxv[AT_VECTOR_SIZE];
41956
41957 /*
41958 * In some cases (e.g. Hyper-Threading), we want to avoid L1
c6e2a6c8 41959@@ -194,8 +207,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
58c5fc13
MT
41960 * Generate 16 random bytes for userspace PRNG seeding.
41961 */
41962 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
df50ba0c
MT
41963- u_rand_bytes = (elf_addr_t __user *)
41964- STACK_ALLOC(p, sizeof(k_rand_bytes));
58c5fc13
MT
41965+ srandom32(k_rand_bytes[0] ^ random32());
41966+ srandom32(k_rand_bytes[1] ^ random32());
41967+ srandom32(k_rand_bytes[2] ^ random32());
41968+ srandom32(k_rand_bytes[3] ^ random32());
df50ba0c
MT
41969+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
41970+ u_rand_bytes = (elf_addr_t __user *) p;
58c5fc13 41971 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
df50ba0c
MT
41972 return -EFAULT;
41973
c6e2a6c8 41974@@ -307,9 +324,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
71d190be
MT
41975 return -EFAULT;
41976 current->mm->env_end = p;
41977
41978+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
41979+
41980 /* Put the elf_info on the stack in the right place. */
41981 sp = (elf_addr_t __user *)envp + 1;
41982- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
41983+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
41984 return -EFAULT;
41985 return 0;
41986 }
c6e2a6c8 41987@@ -380,10 +399,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58c5fc13
MT
41988 {
41989 struct elf_phdr *elf_phdata;
41990 struct elf_phdr *eppnt;
41991- unsigned long load_addr = 0;
41992+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
41993 int load_addr_set = 0;
41994 unsigned long last_bss = 0, elf_bss = 0;
41995- unsigned long error = ~0UL;
41996+ unsigned long error = -EINVAL;
41997 unsigned long total_size;
41998 int retval, i, size;
41999
c6e2a6c8 42000@@ -429,6 +448,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58c5fc13
MT
42001 goto out_close;
42002 }
42003
42004+#ifdef CONFIG_PAX_SEGMEXEC
42005+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
42006+ pax_task_size = SEGMEXEC_TASK_SIZE;
42007+#endif
42008+
42009 eppnt = elf_phdata;
42010 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
42011 if (eppnt->p_type == PT_LOAD) {
c6e2a6c8 42012@@ -472,8 +496,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
58c5fc13
MT
42013 k = load_addr + eppnt->p_vaddr;
42014 if (BAD_ADDR(k) ||
42015 eppnt->p_filesz > eppnt->p_memsz ||
42016- eppnt->p_memsz > TASK_SIZE ||
42017- TASK_SIZE - eppnt->p_memsz < k) {
42018+ eppnt->p_memsz > pax_task_size ||
42019+ pax_task_size - eppnt->p_memsz < k) {
42020 error = -ENOMEM;
42021 goto out_close;
42022 }
572b4308 42023@@ -525,6 +549,311 @@ out:
58c5fc13
MT
42024 return error;
42025 }
42026
572b4308
MT
42027+#ifdef CONFIG_PAX_PT_PAX_FLAGS
42028+#ifdef CONFIG_PAX_SOFTMODE
4c928ab7 42029+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
58c5fc13
MT
42030+{
42031+ unsigned long pax_flags = 0UL;
42032+
42033+#ifdef CONFIG_PAX_PAGEEXEC
42034+ if (elf_phdata->p_flags & PF_PAGEEXEC)
42035+ pax_flags |= MF_PAX_PAGEEXEC;
42036+#endif
42037+
42038+#ifdef CONFIG_PAX_SEGMEXEC
42039+ if (elf_phdata->p_flags & PF_SEGMEXEC)
42040+ pax_flags |= MF_PAX_SEGMEXEC;
42041+#endif
42042+
58c5fc13
MT
42043+#ifdef CONFIG_PAX_EMUTRAMP
42044+ if (elf_phdata->p_flags & PF_EMUTRAMP)
42045+ pax_flags |= MF_PAX_EMUTRAMP;
42046+#endif
42047+
42048+#ifdef CONFIG_PAX_MPROTECT
42049+ if (elf_phdata->p_flags & PF_MPROTECT)
42050+ pax_flags |= MF_PAX_MPROTECT;
42051+#endif
42052+
42053+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42054+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
42055+ pax_flags |= MF_PAX_RANDMMAP;
42056+#endif
42057+
42058+ return pax_flags;
42059+}
572b4308 42060+#endif
58c5fc13 42061+
4c928ab7 42062+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
58c5fc13
MT
42063+{
42064+ unsigned long pax_flags = 0UL;
42065+
42066+#ifdef CONFIG_PAX_PAGEEXEC
42067+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
42068+ pax_flags |= MF_PAX_PAGEEXEC;
42069+#endif
42070+
42071+#ifdef CONFIG_PAX_SEGMEXEC
42072+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
42073+ pax_flags |= MF_PAX_SEGMEXEC;
42074+#endif
42075+
58c5fc13
MT
42076+#ifdef CONFIG_PAX_EMUTRAMP
42077+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
42078+ pax_flags |= MF_PAX_EMUTRAMP;
42079+#endif
42080+
42081+#ifdef CONFIG_PAX_MPROTECT
42082+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
42083+ pax_flags |= MF_PAX_MPROTECT;
42084+#endif
42085+
42086+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42087+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
42088+ pax_flags |= MF_PAX_RANDMMAP;
42089+#endif
42090+
572b4308
MT
42091+ return pax_flags;
42092+}
42093+#endif
42094+
42095+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42096+#ifdef CONFIG_PAX_SOFTMODE
42097+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
42098+{
42099+ unsigned long pax_flags = 0UL;
42100+
42101+#ifdef CONFIG_PAX_PAGEEXEC
42102+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
42103+ pax_flags |= MF_PAX_PAGEEXEC;
42104+#endif
42105+
42106+#ifdef CONFIG_PAX_SEGMEXEC
42107+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
42108+ pax_flags |= MF_PAX_SEGMEXEC;
42109+#endif
42110+
42111+#ifdef CONFIG_PAX_EMUTRAMP
42112+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
42113+ pax_flags |= MF_PAX_EMUTRAMP;
42114+#endif
42115+
42116+#ifdef CONFIG_PAX_MPROTECT
42117+ if (pax_flags_softmode & MF_PAX_MPROTECT)
42118+ pax_flags |= MF_PAX_MPROTECT;
42119+#endif
42120+
42121+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42122+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
42123+ pax_flags |= MF_PAX_RANDMMAP;
42124+#endif
42125+
42126+ return pax_flags;
42127+}
42128+#endif
42129+
42130+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
42131+{
42132+ unsigned long pax_flags = 0UL;
42133+
42134+#ifdef CONFIG_PAX_PAGEEXEC
42135+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
42136+ pax_flags |= MF_PAX_PAGEEXEC;
42137+#endif
42138+
42139+#ifdef CONFIG_PAX_SEGMEXEC
42140+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
42141+ pax_flags |= MF_PAX_SEGMEXEC;
42142+#endif
42143+
42144+#ifdef CONFIG_PAX_EMUTRAMP
42145+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
42146+ pax_flags |= MF_PAX_EMUTRAMP;
42147+#endif
42148+
42149+#ifdef CONFIG_PAX_MPROTECT
42150+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
42151+ pax_flags |= MF_PAX_MPROTECT;
42152+#endif
42153+
42154+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
42155+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
42156+ pax_flags |= MF_PAX_RANDMMAP;
4c928ab7
MT
42157+#endif
42158+
58c5fc13
MT
42159+ return pax_flags;
42160+}
572b4308 42161+#endif
58c5fc13 42162+
572b4308 42163+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
58c5fc13
MT
42164+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
42165+{
42166+ unsigned long pax_flags = 0UL;
42167+
4c928ab7
MT
42168+#ifdef CONFIG_PAX_EI_PAX
42169+
58c5fc13
MT
42170+#ifdef CONFIG_PAX_PAGEEXEC
42171+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
42172+ pax_flags |= MF_PAX_PAGEEXEC;
42173+#endif
42174+
42175+#ifdef CONFIG_PAX_SEGMEXEC
42176+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
42177+ pax_flags |= MF_PAX_SEGMEXEC;
42178+#endif
42179+
58c5fc13
MT
42180+#ifdef CONFIG_PAX_EMUTRAMP
42181+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
42182+ pax_flags |= MF_PAX_EMUTRAMP;
42183+#endif
42184+
42185+#ifdef CONFIG_PAX_MPROTECT
42186+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
42187+ pax_flags |= MF_PAX_MPROTECT;
42188+#endif
42189+
42190+#ifdef CONFIG_PAX_ASLR
42191+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
42192+ pax_flags |= MF_PAX_RANDMMAP;
42193+#endif
42194+
4c928ab7
MT
42195+#else
42196+
42197+#ifdef CONFIG_PAX_PAGEEXEC
42198+ pax_flags |= MF_PAX_PAGEEXEC;
58c5fc13
MT
42199+#endif
42200+
572b4308
MT
42201+#ifdef CONFIG_PAX_SEGMEXEC
42202+ pax_flags |= MF_PAX_SEGMEXEC;
42203+#endif
42204+
4c928ab7
MT
42205+#ifdef CONFIG_PAX_MPROTECT
42206+ pax_flags |= MF_PAX_MPROTECT;
42207+#endif
58c5fc13 42208+
4c928ab7 42209+#ifdef CONFIG_PAX_RANDMMAP
572b4308
MT
42210+ if (randomize_va_space)
42211+ pax_flags |= MF_PAX_RANDMMAP;
58c5fc13
MT
42212+#endif
42213+
4c928ab7
MT
42214+#endif
42215+
42216+ return pax_flags;
42217+}
42218+
42219+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
42220+{
42221+
58c5fc13 42222+#ifdef CONFIG_PAX_PT_PAX_FLAGS
4c928ab7
MT
42223+ unsigned long i;
42224+
58c5fc13
MT
42225+ for (i = 0UL; i < elf_ex->e_phnum; i++)
42226+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
42227+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
42228+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
42229+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
42230+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
42231+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
4c928ab7 42232+ return ~0UL;
58c5fc13
MT
42233+
42234+#ifdef CONFIG_PAX_SOFTMODE
42235+ if (pax_softmode)
4c928ab7 42236+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
58c5fc13
MT
42237+ else
42238+#endif
42239+
4c928ab7 42240+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
58c5fc13
MT
42241+ break;
42242+ }
42243+#endif
42244+
4c928ab7
MT
42245+ return ~0UL;
42246+}
42247+
4c928ab7
MT
42248+static unsigned long pax_parse_xattr_pax(struct file * const file)
42249+{
42250+
42251+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
42252+ ssize_t xattr_size, i;
42253+ unsigned char xattr_value[5];
42254+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
42255+
42256+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
42257+ if (xattr_size <= 0)
42258+ return ~0UL;
42259+
42260+ for (i = 0; i < xattr_size; i++)
42261+ switch (xattr_value[i]) {
42262+ default:
42263+ return ~0UL;
42264+
42265+#define parse_flag(option1, option2, flag) \
42266+ case option1: \
42267+ pax_flags_hardmode |= MF_PAX_##flag; \
42268+ break; \
42269+ case option2: \
42270+ pax_flags_softmode |= MF_PAX_##flag; \
42271+ break;
42272+
42273+ parse_flag('p', 'P', PAGEEXEC);
42274+ parse_flag('e', 'E', EMUTRAMP);
42275+ parse_flag('m', 'M', MPROTECT);
42276+ parse_flag('r', 'R', RANDMMAP);
42277+ parse_flag('s', 'S', SEGMEXEC);
42278+
42279+#undef parse_flag
42280+ }
42281+
42282+ if (pax_flags_hardmode & pax_flags_softmode)
42283+ return ~0UL;
42284+
42285+#ifdef CONFIG_PAX_SOFTMODE
42286+ if (pax_softmode)
42287+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
42288+ else
42289+#endif
42290+
42291+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
42292+#else
42293+ return ~0UL;
42294+#endif
42295+
42296+}
42297+
42298+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
42299+{
42300+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
42301+
42302+ pax_flags = pax_parse_ei_pax(elf_ex);
42303+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
42304+ xattr_pax_flags = pax_parse_xattr_pax(file);
42305+
42306+ if (pt_pax_flags == ~0UL)
42307+ pt_pax_flags = xattr_pax_flags;
42308+ else if (xattr_pax_flags == ~0UL)
42309+ xattr_pax_flags = pt_pax_flags;
42310+ if (pt_pax_flags != xattr_pax_flags)
42311+ return -EINVAL;
42312+ if (pt_pax_flags != ~0UL)
42313+ pax_flags = pt_pax_flags;
42314+
572b4308
MT
42315+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
42316+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42317+ if ((__supported_pte_mask & _PAGE_NX))
42318+ pax_flags &= ~MF_PAX_SEGMEXEC;
42319+ else
42320+ pax_flags &= ~MF_PAX_PAGEEXEC;
42321+ }
42322+#endif
42323+
58c5fc13
MT
42324+ if (0 > pax_check_flags(&pax_flags))
42325+ return -EINVAL;
42326+
42327+ current->mm->pax_flags = pax_flags;
42328+ return 0;
42329+}
42330+#endif
42331+
42332 /*
42333 * These are the functions used to load ELF style executables and shared
42334 * libraries. There is no binary dependent code anywhere else.
572b4308 42335@@ -541,6 +870,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
58c5fc13
MT
42336 {
42337 unsigned int random_variable = 0;
42338
42339+#ifdef CONFIG_PAX_RANDUSTACK
42340+ if (randomize_va_space)
42341+ return stack_top - current->mm->delta_stack;
42342+#endif
42343+
42344 if ((current->flags & PF_RANDOMIZE) &&
42345 !(current->personality & ADDR_NO_RANDOMIZE)) {
42346 random_variable = get_random_int() & STACK_RND_MASK;
572b4308 42347@@ -559,7 +893,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
42348 unsigned long load_addr = 0, load_bias = 0;
42349 int load_addr_set = 0;
42350 char * elf_interpreter = NULL;
42351- unsigned long error;
42352+ unsigned long error = 0;
42353 struct elf_phdr *elf_ppnt, *elf_phdata;
42354 unsigned long elf_bss, elf_brk;
42355 int retval, i;
572b4308 42356@@ -569,11 +903,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13 42357 unsigned long start_code, end_code, start_data, end_data;
66a7e928 42358 unsigned long reloc_func_desc __maybe_unused = 0;
58c5fc13
MT
42359 int executable_stack = EXSTACK_DEFAULT;
42360- unsigned long def_flags = 0;
42361 struct {
42362 struct elfhdr elf_ex;
42363 struct elfhdr interp_elf_ex;
42364 } *loc;
42365+ unsigned long pax_task_size = TASK_SIZE;
42366
42367 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
42368 if (!loc) {
572b4308 42369@@ -709,11 +1043,81 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
c6e2a6c8 42370 goto out_free_dentry;
58c5fc13
MT
42371
42372 /* OK, This is the point of no return */
58c5fc13
MT
42373- current->mm->def_flags = def_flags;
42374+
42375+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42376+ current->mm->pax_flags = 0UL;
42377+#endif
42378+
42379+#ifdef CONFIG_PAX_DLRESOLVE
42380+ current->mm->call_dl_resolve = 0UL;
42381+#endif
42382+
42383+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
42384+ current->mm->call_syscall = 0UL;
42385+#endif
42386+
42387+#ifdef CONFIG_PAX_ASLR
42388+ current->mm->delta_mmap = 0UL;
42389+ current->mm->delta_stack = 0UL;
42390+#endif
42391+
42392+ current->mm->def_flags = 0;
42393+
572b4308 42394+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
4c928ab7 42395+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
58c5fc13
MT
42396+ send_sig(SIGKILL, current, 0);
42397+ goto out_free_dentry;
42398+ }
42399+#endif
42400+
42401+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
42402+ pax_set_initial_flags(bprm);
42403+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
42404+ if (pax_set_initial_flags_func)
42405+ (pax_set_initial_flags_func)(bprm);
42406+#endif
42407+
42408+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
ae4e228f 42409+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
58c5fc13
MT
42410+ current->mm->context.user_cs_limit = PAGE_SIZE;
42411+ current->mm->def_flags |= VM_PAGEEXEC;
42412+ }
42413+#endif
42414+
42415+#ifdef CONFIG_PAX_SEGMEXEC
42416+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
42417+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
42418+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
42419+ pax_task_size = SEGMEXEC_TASK_SIZE;
66a7e928 42420+ current->mm->def_flags |= VM_NOHUGEPAGE;
58c5fc13
MT
42421+ }
42422+#endif
42423+
42424+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
42425+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42426+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
42427+ put_cpu();
42428+ }
42429+#endif
ae4e228f
MT
42430
42431 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
42432 may depend on the personality. */
42433 SET_PERSONALITY(loc->elf_ex);
58c5fc13
MT
42434+
42435+#ifdef CONFIG_PAX_ASLR
42436+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42437+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
42438+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
42439+ }
42440+#endif
58c5fc13
MT
42441+
42442+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
42443+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
42444+ executable_stack = EXSTACK_DISABLE_X;
42445+ current->personality &= ~READ_IMPLIES_EXEC;
42446+ } else
42447+#endif
42448+
42449 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
42450 current->personality |= READ_IMPLIES_EXEC;
42451
572b4308 42452@@ -804,6 +1208,20 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
42453 #else
42454 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
42455 #endif
42456+
42457+#ifdef CONFIG_PAX_RANDMMAP
42458+ /* PaX: randomize base address at the default exe base if requested */
42459+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
42460+#ifdef CONFIG_SPARC64
42461+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
42462+#else
42463+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
42464+#endif
42465+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
42466+ elf_flags |= MAP_FIXED;
42467+ }
42468+#endif
42469+
42470 }
42471
42472 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
572b4308 42473@@ -836,9 +1254,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
42474 * allowed task size. Note that p_filesz must always be
42475 * <= p_memsz so it is only necessary to check p_memsz.
42476 */
42477- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42478- elf_ppnt->p_memsz > TASK_SIZE ||
42479- TASK_SIZE - elf_ppnt->p_memsz < k) {
42480+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
42481+ elf_ppnt->p_memsz > pax_task_size ||
42482+ pax_task_size - elf_ppnt->p_memsz < k) {
42483 /* set_brk can never work. Avoid overflows. */
42484 send_sig(SIGKILL, current, 0);
42485 retval = -EINVAL;
572b4308 42486@@ -877,11 +1295,40 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
58c5fc13
MT
42487 goto out_free_dentry;
42488 }
42489 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
42490- send_sig(SIGSEGV, current, 0);
42491- retval = -EFAULT; /* Nobody gets to see this, but.. */
42492- goto out_free_dentry;
42493+ /*
42494+ * This bss-zeroing can fail if the ELF
42495+ * file specifies odd protections. So
42496+ * we don't check the return value
42497+ */
42498 }
42499
5e856224
MT
42500+#ifdef CONFIG_PAX_RANDMMAP
42501+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
42502+ unsigned long start, size;
42503+
42504+ start = ELF_PAGEALIGN(elf_brk);
42505+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
42506+ down_write(&current->mm->mmap_sem);
42507+ retval = -ENOMEM;
42508+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
42509+ unsigned long prot = PROT_NONE;
42510+
42511+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
42512+// if (current->personality & ADDR_NO_RANDOMIZE)
42513+// prot = PROT_READ;
42514+ start = do_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
42515+ retval = IS_ERR_VALUE(start) ? start : 0;
42516+ }
42517+ up_write(&current->mm->mmap_sem);
42518+ if (retval == 0)
42519+ retval = set_brk(start + size, start + size + PAGE_SIZE);
42520+ if (retval < 0) {
42521+ send_sig(SIGKILL, current, 0);
42522+ goto out_free_dentry;
42523+ }
42524+ }
42525+#endif
42526+
58c5fc13 42527 if (elf_interpreter) {
5e856224
MT
42528 unsigned long uninitialized_var(interp_map_addr);
42529
572b4308 42530@@ -1109,7 +1556,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
58c5fc13
MT
42531 * Decide what to dump of a segment, part, all or none.
42532 */
42533 static unsigned long vma_dump_size(struct vm_area_struct *vma,
42534- unsigned long mm_flags)
42535+ unsigned long mm_flags, long signr)
42536 {
42537 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
42538
572b4308 42539@@ -1146,7 +1593,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
58c5fc13
MT
42540 if (vma->vm_file == NULL)
42541 return 0;
42542
42543- if (FILTER(MAPPED_PRIVATE))
42544+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
42545 goto whole;
42546
42547 /*
572b4308 42548@@ -1368,9 +1815,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
ae4e228f
MT
42549 {
42550 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
42551 int i = 0;
42552- do
42553+ do {
42554 i += 2;
42555- while (auxv[i - 2] != AT_NULL);
42556+ } while (auxv[i - 2] != AT_NULL);
42557 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
42558 }
42559
572b4308 42560@@ -1892,14 +2339,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
df50ba0c
MT
42561 }
42562
42563 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
42564- unsigned long mm_flags)
42565+ struct coredump_params *cprm)
42566 {
42567 struct vm_area_struct *vma;
42568 size_t size = 0;
42569
42570 for (vma = first_vma(current, gate_vma); vma != NULL;
42571 vma = next_vma(vma, gate_vma))
42572- size += vma_dump_size(vma, mm_flags);
42573+ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
42574 return size;
42575 }
42576
572b4308 42577@@ -1993,7 +2440,7 @@ static int elf_core_dump(struct coredump_params *cprm)
df50ba0c
MT
42578
42579 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
42580
42581- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
42582+ offset += elf_core_vma_data_size(gate_vma, cprm);
42583 offset += elf_core_extra_data_size();
42584 e_shoff = offset;
42585
572b4308 42586@@ -2007,10 +2454,12 @@ static int elf_core_dump(struct coredump_params *cprm)
df50ba0c
MT
42587 offset = dataoff;
42588
42589 size += sizeof(*elf);
42590+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42591 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
42592 goto end_coredump;
42593
42594 size += sizeof(*phdr4note);
42595+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42596 if (size > cprm->limit
42597 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
42598 goto end_coredump;
572b4308 42599@@ -2024,7 +2473,7 @@ static int elf_core_dump(struct coredump_params *cprm)
58c5fc13
MT
42600 phdr.p_offset = offset;
42601 phdr.p_vaddr = vma->vm_start;
42602 phdr.p_paddr = 0;
df50ba0c
MT
42603- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
42604+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
58c5fc13
MT
42605 phdr.p_memsz = vma->vm_end - vma->vm_start;
42606 offset += phdr.p_filesz;
42607 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
572b4308 42608@@ -2035,6 +2484,7 @@ static int elf_core_dump(struct coredump_params *cprm)
df50ba0c
MT
42609 phdr.p_align = ELF_EXEC_PAGESIZE;
42610
42611 size += sizeof(phdr);
42612+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42613 if (size > cprm->limit
42614 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
42615 goto end_coredump;
572b4308 42616@@ -2059,7 +2509,7 @@ static int elf_core_dump(struct coredump_params *cprm)
58c5fc13
MT
42617 unsigned long addr;
42618 unsigned long end;
42619
df50ba0c
MT
42620- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
42621+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
58c5fc13
MT
42622
42623 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
42624 struct page *page;
572b4308 42625@@ -2068,6 +2518,7 @@ static int elf_core_dump(struct coredump_params *cprm)
ae4e228f
MT
42626 page = get_dump_page(addr);
42627 if (page) {
42628 void *kaddr = kmap(page);
42629+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
42630 stop = ((size += PAGE_SIZE) > cprm->limit) ||
42631 !dump_write(cprm->file, kaddr,
42632 PAGE_SIZE);
572b4308 42633@@ -2085,6 +2536,7 @@ static int elf_core_dump(struct coredump_params *cprm)
df50ba0c
MT
42634
42635 if (e_phnum == PN_XNUM) {
42636 size += sizeof(*shdr4extnum);
42637+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
42638 if (size > cprm->limit
42639 || !dump_write(cprm->file, shdr4extnum,
42640 sizeof(*shdr4extnum)))
572b4308 42641@@ -2105,6 +2557,97 @@ out:
ae4e228f
MT
42642
42643 #endif /* CONFIG_ELF_CORE */
58c5fc13
MT
42644
42645+#ifdef CONFIG_PAX_MPROTECT
42646+/* PaX: non-PIC ELF libraries need relocations on their executable segments
42647+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
42648+ * we'll remove VM_MAYWRITE for good on RELRO segments.
42649+ *
42650+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
42651+ * basis because we want to allow the common case and not the special ones.
42652+ */
42653+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
42654+{
42655+ struct elfhdr elf_h;
42656+ struct elf_phdr elf_p;
42657+ unsigned long i;
42658+ unsigned long oldflags;
42659+ bool is_textrel_rw, is_textrel_rx, is_relro;
42660+
42661+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
42662+ return;
42663+
42664+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
42665+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
42666+
57199397 42667+#ifdef CONFIG_PAX_ELFRELOCS
58c5fc13
MT
42668+ /* possible TEXTREL */
42669+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
42670+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
57199397
MT
42671+#else
42672+ is_textrel_rw = false;
42673+ is_textrel_rx = false;
58c5fc13
MT
42674+#endif
42675+
42676+ /* possible RELRO */
42677+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
42678+
42679+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
42680+ return;
42681+
42682+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
42683+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
42684+
42685+#ifdef CONFIG_PAX_ETEXECRELOCS
42686+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42687+#else
42688+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
42689+#endif
42690+
42691+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
42692+ !elf_check_arch(&elf_h) ||
42693+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
42694+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
42695+ return;
42696+
42697+ for (i = 0UL; i < elf_h.e_phnum; i++) {
42698+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
42699+ return;
42700+ switch (elf_p.p_type) {
42701+ case PT_DYNAMIC:
42702+ if (!is_textrel_rw && !is_textrel_rx)
42703+ continue;
42704+ i = 0UL;
42705+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
42706+ elf_dyn dyn;
42707+
42708+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
42709+ return;
42710+ if (dyn.d_tag == DT_NULL)
42711+ return;
42712+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
42713+ gr_log_textrel(vma);
42714+ if (is_textrel_rw)
42715+ vma->vm_flags |= VM_MAYWRITE;
42716+ else
42717+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
42718+ vma->vm_flags &= ~VM_MAYWRITE;
42719+ return;
42720+ }
42721+ i++;
42722+ }
42723+ return;
42724+
42725+ case PT_GNU_RELRO:
42726+ if (!is_relro)
42727+ continue;
42728+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
42729+ vma->vm_flags &= ~VM_MAYWRITE;
42730+ return;
42731+ }
42732+ }
42733+}
42734+#endif
42735+
42736 static int __init init_elf_binfmt(void)
42737 {
c6e2a6c8 42738 register_binfmt(&elf_format);
fe2de317 42739diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
c6e2a6c8 42740index 6b2daf9..a70dccb 100644
fe2de317
MT
42741--- a/fs/binfmt_flat.c
42742+++ b/fs/binfmt_flat.c
c6e2a6c8 42743@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
58c5fc13
MT
42744 realdatastart = (unsigned long) -ENOMEM;
42745 printk("Unable to allocate RAM for process data, errno %d\n",
42746 (int)-realdatastart);
42747+ down_write(&current->mm->mmap_sem);
42748 do_munmap(current->mm, textpos, text_len);
42749+ up_write(&current->mm->mmap_sem);
42750 ret = realdatastart;
42751 goto err;
42752 }
c6e2a6c8 42753@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
58c5fc13 42754 }
ae4e228f 42755 if (IS_ERR_VALUE(result)) {
58c5fc13
MT
42756 printk("Unable to read data+bss, errno %d\n", (int)-result);
42757+ down_write(&current->mm->mmap_sem);
42758 do_munmap(current->mm, textpos, text_len);
57199397 42759 do_munmap(current->mm, realdatastart, len);
58c5fc13
MT
42760+ up_write(&current->mm->mmap_sem);
42761 ret = result;
42762 goto err;
42763 }
c6e2a6c8 42764@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
58c5fc13 42765 }
ae4e228f 42766 if (IS_ERR_VALUE(result)) {
58c5fc13
MT
42767 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
42768+ down_write(&current->mm->mmap_sem);
42769 do_munmap(current->mm, textpos, text_len + data_len + extra +
42770 MAX_SHARED_LIBS * sizeof(unsigned long));
42771+ up_write(&current->mm->mmap_sem);
42772 ret = result;
42773 goto err;
42774 }
fe2de317 42775diff --git a/fs/bio.c b/fs/bio.c
572b4308 42776index 84da885..bac1d48 100644
fe2de317
MT
42777--- a/fs/bio.c
42778+++ b/fs/bio.c
c6e2a6c8 42779@@ -838,7 +838,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
5e856224
MT
42780 /*
42781 * Overflow, abort
42782 */
42783- if (end < start)
572b4308
MT
42784+ if (end < start || end - start > INT_MAX - nr_pages)
42785 return ERR_PTR(-EINVAL);
42786
42787 nr_pages += end - start;
42788@@ -972,7 +972,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
42789 /*
42790 * Overflow, abort
42791 */
42792- if (end < start)
5e856224
MT
42793+ if (end < start || end - start > INT_MAX - nr_pages)
42794 return ERR_PTR(-EINVAL);
42795
42796 nr_pages += end - start;
c6e2a6c8 42797@@ -1234,7 +1234,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
ae4e228f
MT
42798 const int read = bio_data_dir(bio) == READ;
42799 struct bio_map_data *bmd = bio->bi_private;
42800 int i;
42801- char *p = bmd->sgvecs[0].iov_base;
6e9df6a3 42802+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
ae4e228f
MT
42803
42804 __bio_for_each_segment(bvec, bio, i, 0) {
42805 char *addr = page_address(bvec->bv_page);
fe2de317 42806diff --git a/fs/block_dev.c b/fs/block_dev.c
c6e2a6c8 42807index ba11c30..623d736 100644
fe2de317
MT
42808--- a/fs/block_dev.c
42809+++ b/fs/block_dev.c
c6e2a6c8 42810@@ -704,7 +704,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
df50ba0c 42811 else if (bdev->bd_contains == bdev)
57199397
MT
42812 return true; /* is a whole device which isn't held */
42813
16454cff
MT
42814- else if (whole->bd_holder == bd_may_claim)
42815+ else if (whole->bd_holder == (void *)bd_may_claim)
57199397
MT
42816 return true; /* is a partition of a device that is being partitioned */
42817 else if (whole->bd_holder != NULL)
42818 return false; /* is a partition of a held device */
5e856224 42819diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
c6e2a6c8 42820index c053e90..e5f1afc 100644
5e856224
MT
42821--- a/fs/btrfs/check-integrity.c
42822+++ b/fs/btrfs/check-integrity.c
c6e2a6c8 42823@@ -156,7 +156,7 @@ struct btrfsic_block {
5e856224
MT
42824 union {
42825 bio_end_io_t *bio;
42826 bh_end_io_t *bh;
42827- } orig_bio_bh_end_io;
42828+ } __no_const orig_bio_bh_end_io;
42829 int submit_bio_bh_rw;
42830 u64 flush_gen; /* only valid if !never_written */
42831 };
fe2de317 42832diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
c6e2a6c8 42833index 4106264..8157ede 100644
fe2de317
MT
42834--- a/fs/btrfs/ctree.c
42835+++ b/fs/btrfs/ctree.c
c6e2a6c8 42836@@ -513,9 +513,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
6892158b
MT
42837 free_extent_buffer(buf);
42838 add_root_to_dirty_list(root);
42839 } else {
42840- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
42841- parent_start = parent->start;
42842- else
42843+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
42844+ if (parent)
42845+ parent_start = parent->start;
42846+ else
42847+ parent_start = 0;
42848+ } else
42849 parent_start = 0;
42850
42851 WARN_ON(trans->transid != btrfs_header_generation(parent));
fe2de317 42852diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
c6e2a6c8 42853index 0df0d1f..4bdcbfe 100644
fe2de317
MT
42854--- a/fs/btrfs/inode.c
42855+++ b/fs/btrfs/inode.c
c6e2a6c8 42856@@ -7074,7 +7074,7 @@ fail:
16454cff
MT
42857 return -ENOMEM;
42858 }
42859
42860-static int btrfs_getattr(struct vfsmount *mnt,
42861+int btrfs_getattr(struct vfsmount *mnt,
42862 struct dentry *dentry, struct kstat *stat)
42863 {
42864 struct inode *inode = dentry->d_inode;
c6e2a6c8 42865@@ -7088,6 +7088,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
16454cff
MT
42866 return 0;
42867 }
42868
42869+EXPORT_SYMBOL(btrfs_getattr);
42870+
42871+dev_t get_btrfs_dev_from_inode(struct inode *inode)
42872+{
6e9df6a3 42873+ return BTRFS_I(inode)->root->anon_dev;
16454cff
MT
42874+}
42875+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
42876+
66a7e928
MT
42877 /*
42878 * If a file is moved, it will inherit the cow and compression flags of the new
42879 * directory.
fe2de317 42880diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
c6e2a6c8 42881index 14f8e1f..ab8d81f 100644
fe2de317
MT
42882--- a/fs/btrfs/ioctl.c
42883+++ b/fs/btrfs/ioctl.c
c6e2a6c8 42884@@ -2882,9 +2882,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
c52201e0
MT
42885 for (i = 0; i < num_types; i++) {
42886 struct btrfs_space_info *tmp;
42887
42888+ /* Don't copy in more than we allocated */
317566c1
MT
42889 if (!slot_count)
42890 break;
42891
c52201e0
MT
42892+ slot_count--;
42893+
42894 info = NULL;
42895 rcu_read_lock();
42896 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
c6e2a6c8 42897@@ -2906,15 +2909,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
317566c1
MT
42898 memcpy(dest, &space, sizeof(space));
42899 dest++;
42900 space_args.total_spaces++;
42901- slot_count--;
42902 }
42903- if (!slot_count)
42904- break;
42905 }
42906 up_read(&info->groups_sem);
42907 }
6e9df6a3
MT
42908
42909- user_dest = (struct btrfs_ioctl_space_info *)
42910+ user_dest = (struct btrfs_ioctl_space_info __user *)
42911 (arg + sizeof(struct btrfs_ioctl_space_args));
42912
42913 if (copy_to_user(user_dest, dest_orig, alloc_size))
fe2de317 42914diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
c6e2a6c8 42915index 646ee21..f020f87 100644
fe2de317
MT
42916--- a/fs/btrfs/relocation.c
42917+++ b/fs/btrfs/relocation.c
c6e2a6c8 42918@@ -1268,7 +1268,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
6892158b
MT
42919 }
42920 spin_unlock(&rc->reloc_root_tree.lock);
42921
42922- BUG_ON((struct btrfs_root *)node->data != root);
42923+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
42924
42925 if (!del) {
42926 spin_lock(&rc->reloc_root_tree.lock);
fe2de317
MT
42927diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
42928index 622f469..e8d2d55 100644
42929--- a/fs/cachefiles/bind.c
42930+++ b/fs/cachefiles/bind.c
42931@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
df50ba0c
MT
42932 args);
42933
42934 /* start by checking things over */
42935- ASSERT(cache->fstop_percent >= 0 &&
42936- cache->fstop_percent < cache->fcull_percent &&
42937+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
42938 cache->fcull_percent < cache->frun_percent &&
42939 cache->frun_percent < 100);
42940
42941- ASSERT(cache->bstop_percent >= 0 &&
42942- cache->bstop_percent < cache->bcull_percent &&
42943+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
42944 cache->bcull_percent < cache->brun_percent &&
42945 cache->brun_percent < 100);
42946
fe2de317
MT
42947diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
42948index 0a1467b..6a53245 100644
42949--- a/fs/cachefiles/daemon.c
42950+++ b/fs/cachefiles/daemon.c
42951@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
ae4e228f
MT
42952 if (n > buflen)
42953 return -EMSGSIZE;
42954
42955- if (copy_to_user(_buffer, buffer, n) != 0)
42956+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
42957 return -EFAULT;
58c5fc13 42958
ae4e228f 42959 return n;
fe2de317 42960@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
df50ba0c
MT
42961 if (test_bit(CACHEFILES_DEAD, &cache->flags))
42962 return -EIO;
42963
42964- if (datalen < 0 || datalen > PAGE_SIZE - 1)
42965+ if (datalen > PAGE_SIZE - 1)
42966 return -EOPNOTSUPP;
42967
42968 /* drag the command string into the kernel so we can parse it */
fe2de317 42969@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
df50ba0c
MT
42970 if (args[0] != '%' || args[1] != '\0')
42971 return -EINVAL;
42972
42973- if (fstop < 0 || fstop >= cache->fcull_percent)
42974+ if (fstop >= cache->fcull_percent)
42975 return cachefiles_daemon_range_error(cache, args);
42976
42977 cache->fstop_percent = fstop;
fe2de317 42978@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
df50ba0c
MT
42979 if (args[0] != '%' || args[1] != '\0')
42980 return -EINVAL;
42981
42982- if (bstop < 0 || bstop >= cache->bcull_percent)
42983+ if (bstop >= cache->bcull_percent)
42984 return cachefiles_daemon_range_error(cache, args);
42985
42986 cache->bstop_percent = bstop;
fe2de317
MT
42987diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
42988index bd6bc1b..b627b53 100644
42989--- a/fs/cachefiles/internal.h
42990+++ b/fs/cachefiles/internal.h
8308f9c9
MT
42991@@ -57,7 +57,7 @@ struct cachefiles_cache {
42992 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
42993 struct rb_root active_nodes; /* active nodes (can't be culled) */
42994 rwlock_t active_lock; /* lock for active_nodes */
42995- atomic_t gravecounter; /* graveyard uniquifier */
42996+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
42997 unsigned frun_percent; /* when to stop culling (% files) */
42998 unsigned fcull_percent; /* when to start culling (% files) */
42999 unsigned fstop_percent; /* when to stop allocating (% files) */
fe2de317 43000@@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
8308f9c9
MT
43001 * proc.c
43002 */
43003 #ifdef CONFIG_CACHEFILES_HISTOGRAM
43004-extern atomic_t cachefiles_lookup_histogram[HZ];
43005-extern atomic_t cachefiles_mkdir_histogram[HZ];
43006-extern atomic_t cachefiles_create_histogram[HZ];
43007+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43008+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43009+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
43010
43011 extern int __init cachefiles_proc_init(void);
43012 extern void cachefiles_proc_cleanup(void);
43013 static inline
43014-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
43015+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
43016 {
43017 unsigned long jif = jiffies - start_jif;
43018 if (jif >= HZ)
43019 jif = HZ - 1;
43020- atomic_inc(&histogram[jif]);
43021+ atomic_inc_unchecked(&histogram[jif]);
43022 }
43023
43024 #else
fe2de317 43025diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
c6e2a6c8 43026index 7f0771d..87d4f36 100644
fe2de317
MT
43027--- a/fs/cachefiles/namei.c
43028+++ b/fs/cachefiles/namei.c
66a7e928 43029@@ -318,7 +318,7 @@ try_again:
8308f9c9
MT
43030 /* first step is to make up a grave dentry in the graveyard */
43031 sprintf(nbuffer, "%08x%08x",
43032 (uint32_t) get_seconds(),
43033- (uint32_t) atomic_inc_return(&cache->gravecounter));
43034+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
43035
43036 /* do the multiway lock magic */
43037 trap = lock_rename(cache->graveyard, dir);
fe2de317
MT
43038diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
43039index eccd339..4c1d995 100644
43040--- a/fs/cachefiles/proc.c
43041+++ b/fs/cachefiles/proc.c
8308f9c9
MT
43042@@ -14,9 +14,9 @@
43043 #include <linux/seq_file.h>
43044 #include "internal.h"
43045
43046-atomic_t cachefiles_lookup_histogram[HZ];
43047-atomic_t cachefiles_mkdir_histogram[HZ];
43048-atomic_t cachefiles_create_histogram[HZ];
43049+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
43050+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
43051+atomic_unchecked_t cachefiles_create_histogram[HZ];
43052
43053 /*
43054 * display the latency histogram
fe2de317 43055@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
8308f9c9
MT
43056 return 0;
43057 default:
43058 index = (unsigned long) v - 3;
43059- x = atomic_read(&cachefiles_lookup_histogram[index]);
43060- y = atomic_read(&cachefiles_mkdir_histogram[index]);
43061- z = atomic_read(&cachefiles_create_histogram[index]);
43062+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
43063+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
43064+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
43065 if (x == 0 && y == 0 && z == 0)
43066 return 0;
43067
fe2de317
MT
43068diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
43069index 0e3c092..818480e 100644
43070--- a/fs/cachefiles/rdwr.c
43071+++ b/fs/cachefiles/rdwr.c
43072@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
ae4e228f
MT
43073 old_fs = get_fs();
43074 set_fs(KERNEL_DS);
43075 ret = file->f_op->write(
43076- file, (const void __user *) data, len, &pos);
6e9df6a3 43077+ file, (const void __force_user *) data, len, &pos);
ae4e228f
MT
43078 set_fs(old_fs);
43079 kunmap(page);
43080 if (ret != len)
fe2de317 43081diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
5e856224 43082index 3e8094b..cb3ff3d 100644
fe2de317
MT
43083--- a/fs/ceph/dir.c
43084+++ b/fs/ceph/dir.c
43085@@ -244,7 +244,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
bc901d79
MT
43086 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
43087 struct ceph_mds_client *mdsc = fsc->mdsc;
6892158b
MT
43088 unsigned frag = fpos_frag(filp->f_pos);
43089- int off = fpos_off(filp->f_pos);
43090+ unsigned int off = fpos_off(filp->f_pos);
43091 int err;
43092 u32 ftype;
43093 struct ceph_mds_reply_info_parsed *rinfo;
4c928ab7
MT
43094@@ -598,7 +598,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
43095 if (nd &&
43096 (nd->flags & LOOKUP_OPEN) &&
43097 !(nd->intent.open.flags & O_CREAT)) {
43098- int mode = nd->intent.open.create_mode & ~current->fs->umask;
43099+ int mode = nd->intent.open.create_mode & ~current_umask();
43100 return ceph_lookup_open(dir, dentry, nd, mode, 1);
43101 }
43102
fe2de317 43103diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
c6e2a6c8 43104index 2704646..c581c91 100644
fe2de317
MT
43105--- a/fs/cifs/cifs_debug.c
43106+++ b/fs/cifs/cifs_debug.c
43107@@ -265,8 +265,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
15a11c5b
MT
43108
43109 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
43110 #ifdef CONFIG_CIFS_STATS2
43111- atomic_set(&totBufAllocCount, 0);
43112- atomic_set(&totSmBufAllocCount, 0);
43113+ atomic_set_unchecked(&totBufAllocCount, 0);
43114+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43115 #endif /* CONFIG_CIFS_STATS2 */
43116 spin_lock(&cifs_tcp_ses_lock);
43117 list_for_each(tmp1, &cifs_tcp_ses_list) {
fe2de317 43118@@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
8308f9c9 43119 tcon = list_entry(tmp3,
15a11c5b 43120 struct cifs_tcon,
8308f9c9
MT
43121 tcon_list);
43122- atomic_set(&tcon->num_smbs_sent, 0);
43123- atomic_set(&tcon->num_writes, 0);
43124- atomic_set(&tcon->num_reads, 0);
43125- atomic_set(&tcon->num_oplock_brks, 0);
43126- atomic_set(&tcon->num_opens, 0);
43127- atomic_set(&tcon->num_posixopens, 0);
43128- atomic_set(&tcon->num_posixmkdirs, 0);
43129- atomic_set(&tcon->num_closes, 0);
43130- atomic_set(&tcon->num_deletes, 0);
43131- atomic_set(&tcon->num_mkdirs, 0);
43132- atomic_set(&tcon->num_rmdirs, 0);
43133- atomic_set(&tcon->num_renames, 0);
43134- atomic_set(&tcon->num_t2renames, 0);
43135- atomic_set(&tcon->num_ffirst, 0);
43136- atomic_set(&tcon->num_fnext, 0);
43137- atomic_set(&tcon->num_fclose, 0);
43138- atomic_set(&tcon->num_hardlinks, 0);
43139- atomic_set(&tcon->num_symlinks, 0);
43140- atomic_set(&tcon->num_locks, 0);
43141+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
43142+ atomic_set_unchecked(&tcon->num_writes, 0);
43143+ atomic_set_unchecked(&tcon->num_reads, 0);
43144+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
43145+ atomic_set_unchecked(&tcon->num_opens, 0);
43146+ atomic_set_unchecked(&tcon->num_posixopens, 0);
43147+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
43148+ atomic_set_unchecked(&tcon->num_closes, 0);
43149+ atomic_set_unchecked(&tcon->num_deletes, 0);
43150+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
43151+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
43152+ atomic_set_unchecked(&tcon->num_renames, 0);
43153+ atomic_set_unchecked(&tcon->num_t2renames, 0);
43154+ atomic_set_unchecked(&tcon->num_ffirst, 0);
43155+ atomic_set_unchecked(&tcon->num_fnext, 0);
43156+ atomic_set_unchecked(&tcon->num_fclose, 0);
43157+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
43158+ atomic_set_unchecked(&tcon->num_symlinks, 0);
43159+ atomic_set_unchecked(&tcon->num_locks, 0);
43160 }
43161 }
43162 }
fe2de317 43163@@ -327,8 +327,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
15a11c5b
MT
43164 smBufAllocCount.counter, cifs_min_small);
43165 #ifdef CONFIG_CIFS_STATS2
43166 seq_printf(m, "Total Large %d Small %d Allocations\n",
43167- atomic_read(&totBufAllocCount),
43168- atomic_read(&totSmBufAllocCount));
43169+ atomic_read_unchecked(&totBufAllocCount),
43170+ atomic_read_unchecked(&totSmBufAllocCount));
43171 #endif /* CONFIG_CIFS_STATS2 */
43172
43173 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
fe2de317 43174@@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
8308f9c9
MT
43175 if (tcon->need_reconnect)
43176 seq_puts(m, "\tDISCONNECTED ");
43177 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
43178- atomic_read(&tcon->num_smbs_sent),
43179- atomic_read(&tcon->num_oplock_brks));
43180+ atomic_read_unchecked(&tcon->num_smbs_sent),
43181+ atomic_read_unchecked(&tcon->num_oplock_brks));
43182 seq_printf(m, "\nReads: %d Bytes: %lld",
43183- atomic_read(&tcon->num_reads),
43184+ atomic_read_unchecked(&tcon->num_reads),
43185 (long long)(tcon->bytes_read));
43186 seq_printf(m, "\nWrites: %d Bytes: %lld",
43187- atomic_read(&tcon->num_writes),
43188+ atomic_read_unchecked(&tcon->num_writes),
43189 (long long)(tcon->bytes_written));
43190 seq_printf(m, "\nFlushes: %d",
43191- atomic_read(&tcon->num_flushes));
43192+ atomic_read_unchecked(&tcon->num_flushes));
43193 seq_printf(m, "\nLocks: %d HardLinks: %d "
43194 "Symlinks: %d",
43195- atomic_read(&tcon->num_locks),
43196- atomic_read(&tcon->num_hardlinks),
43197- atomic_read(&tcon->num_symlinks));
43198+ atomic_read_unchecked(&tcon->num_locks),
43199+ atomic_read_unchecked(&tcon->num_hardlinks),
43200+ atomic_read_unchecked(&tcon->num_symlinks));
43201 seq_printf(m, "\nOpens: %d Closes: %d "
43202 "Deletes: %d",
43203- atomic_read(&tcon->num_opens),
43204- atomic_read(&tcon->num_closes),
43205- atomic_read(&tcon->num_deletes));
43206+ atomic_read_unchecked(&tcon->num_opens),
43207+ atomic_read_unchecked(&tcon->num_closes),
43208+ atomic_read_unchecked(&tcon->num_deletes));
43209 seq_printf(m, "\nPosix Opens: %d "
43210 "Posix Mkdirs: %d",
43211- atomic_read(&tcon->num_posixopens),
43212- atomic_read(&tcon->num_posixmkdirs));
43213+ atomic_read_unchecked(&tcon->num_posixopens),
43214+ atomic_read_unchecked(&tcon->num_posixmkdirs));
43215 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
43216- atomic_read(&tcon->num_mkdirs),
43217- atomic_read(&tcon->num_rmdirs));
43218+ atomic_read_unchecked(&tcon->num_mkdirs),
43219+ atomic_read_unchecked(&tcon->num_rmdirs));
43220 seq_printf(m, "\nRenames: %d T2 Renames %d",
43221- atomic_read(&tcon->num_renames),
43222- atomic_read(&tcon->num_t2renames));
43223+ atomic_read_unchecked(&tcon->num_renames),
43224+ atomic_read_unchecked(&tcon->num_t2renames));
43225 seq_printf(m, "\nFindFirst: %d FNext %d "
43226 "FClose %d",
43227- atomic_read(&tcon->num_ffirst),
43228- atomic_read(&tcon->num_fnext),
43229- atomic_read(&tcon->num_fclose));
43230+ atomic_read_unchecked(&tcon->num_ffirst),
43231+ atomic_read_unchecked(&tcon->num_fnext),
43232+ atomic_read_unchecked(&tcon->num_fclose));
43233 }
43234 }
43235 }
fe2de317 43236diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
c6e2a6c8 43237index 541ef81..a78deb8 100644
fe2de317
MT
43238--- a/fs/cifs/cifsfs.c
43239+++ b/fs/cifs/cifsfs.c
c6e2a6c8 43240@@ -985,7 +985,7 @@ cifs_init_request_bufs(void)
15a11c5b
MT
43241 cifs_req_cachep = kmem_cache_create("cifs_request",
43242 CIFSMaxBufSize +
43243 MAX_CIFS_HDR_SIZE, 0,
43244- SLAB_HWCACHE_ALIGN, NULL);
43245+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
43246 if (cifs_req_cachep == NULL)
43247 return -ENOMEM;
43248
c6e2a6c8 43249@@ -1012,7 +1012,7 @@ cifs_init_request_bufs(void)
15a11c5b
MT
43250 efficient to alloc 1 per page off the slab compared to 17K (5page)
43251 alloc of large cifs buffers even when page debugging is on */
43252 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
43253- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
43254+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
43255 NULL);
43256 if (cifs_sm_req_cachep == NULL) {
43257 mempool_destroy(cifs_req_poolp);
c6e2a6c8 43258@@ -1097,8 +1097,8 @@ init_cifs(void)
15a11c5b
MT
43259 atomic_set(&bufAllocCount, 0);
43260 atomic_set(&smBufAllocCount, 0);
43261 #ifdef CONFIG_CIFS_STATS2
43262- atomic_set(&totBufAllocCount, 0);
43263- atomic_set(&totSmBufAllocCount, 0);
43264+ atomic_set_unchecked(&totBufAllocCount, 0);
43265+ atomic_set_unchecked(&totSmBufAllocCount, 0);
43266 #endif /* CONFIG_CIFS_STATS2 */
43267
43268 atomic_set(&midCount, 0);
fe2de317 43269diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
c6e2a6c8 43270index 73fea28..b996b84 100644
fe2de317
MT
43271--- a/fs/cifs/cifsglob.h
43272+++ b/fs/cifs/cifsglob.h
c6e2a6c8 43273@@ -439,28 +439,28 @@ struct cifs_tcon {
8308f9c9
MT
43274 __u16 Flags; /* optional support bits */
43275 enum statusEnum tidStatus;
43276 #ifdef CONFIG_CIFS_STATS
43277- atomic_t num_smbs_sent;
43278- atomic_t num_writes;
43279- atomic_t num_reads;
43280- atomic_t num_flushes;
43281- atomic_t num_oplock_brks;
43282- atomic_t num_opens;
43283- atomic_t num_closes;
43284- atomic_t num_deletes;
43285- atomic_t num_mkdirs;
43286- atomic_t num_posixopens;
43287- atomic_t num_posixmkdirs;
43288- atomic_t num_rmdirs;
43289- atomic_t num_renames;
43290- atomic_t num_t2renames;
43291- atomic_t num_ffirst;
43292- atomic_t num_fnext;
43293- atomic_t num_fclose;
43294- atomic_t num_hardlinks;
43295- atomic_t num_symlinks;
43296- atomic_t num_locks;
43297- atomic_t num_acl_get;
43298- atomic_t num_acl_set;
43299+ atomic_unchecked_t num_smbs_sent;
43300+ atomic_unchecked_t num_writes;
43301+ atomic_unchecked_t num_reads;
43302+ atomic_unchecked_t num_flushes;
43303+ atomic_unchecked_t num_oplock_brks;
43304+ atomic_unchecked_t num_opens;
43305+ atomic_unchecked_t num_closes;
43306+ atomic_unchecked_t num_deletes;
43307+ atomic_unchecked_t num_mkdirs;
43308+ atomic_unchecked_t num_posixopens;
43309+ atomic_unchecked_t num_posixmkdirs;
43310+ atomic_unchecked_t num_rmdirs;
43311+ atomic_unchecked_t num_renames;
43312+ atomic_unchecked_t num_t2renames;
43313+ atomic_unchecked_t num_ffirst;
43314+ atomic_unchecked_t num_fnext;
43315+ atomic_unchecked_t num_fclose;
43316+ atomic_unchecked_t num_hardlinks;
43317+ atomic_unchecked_t num_symlinks;
43318+ atomic_unchecked_t num_locks;
43319+ atomic_unchecked_t num_acl_get;
43320+ atomic_unchecked_t num_acl_set;
43321 #ifdef CONFIG_CIFS_STATS2
43322 unsigned long long time_writes;
43323 unsigned long long time_reads;
c6e2a6c8 43324@@ -677,7 +677,7 @@ convert_delimiter(char *path, char delim)
8308f9c9
MT
43325 }
43326
43327 #ifdef CONFIG_CIFS_STATS
43328-#define cifs_stats_inc atomic_inc
43329+#define cifs_stats_inc atomic_inc_unchecked
43330
15a11c5b 43331 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
8308f9c9 43332 unsigned int bytes)
c6e2a6c8 43333@@ -1036,8 +1036,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
15a11c5b
MT
43334 /* Various Debug counters */
43335 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
43336 #ifdef CONFIG_CIFS_STATS2
43337-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
43338-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
43339+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
43340+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
43341 #endif
43342 GLOBAL_EXTERN atomic_t smBufAllocCount;
43343 GLOBAL_EXTERN atomic_t midCount;
fe2de317 43344diff --git a/fs/cifs/link.c b/fs/cifs/link.c
4c928ab7 43345index 6b0e064..94e6c3c 100644
fe2de317
MT
43346--- a/fs/cifs/link.c
43347+++ b/fs/cifs/link.c
4c928ab7 43348@@ -600,7 +600,7 @@ symlink_exit:
58c5fc13
MT
43349
43350 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
43351 {
43352- char *p = nd_get_link(nd);
43353+ const char *p = nd_get_link(nd);
43354 if (!IS_ERR(p))
43355 kfree(p);
43356 }
fe2de317 43357diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
c6e2a6c8 43358index c29d1aa..58018da 100644
fe2de317
MT
43359--- a/fs/cifs/misc.c
43360+++ b/fs/cifs/misc.c
15a11c5b
MT
43361@@ -156,7 +156,7 @@ cifs_buf_get(void)
43362 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
43363 atomic_inc(&bufAllocCount);
43364 #ifdef CONFIG_CIFS_STATS2
43365- atomic_inc(&totBufAllocCount);
43366+ atomic_inc_unchecked(&totBufAllocCount);
43367 #endif /* CONFIG_CIFS_STATS2 */
43368 }
43369
43370@@ -191,7 +191,7 @@ cifs_small_buf_get(void)
43371 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
43372 atomic_inc(&smBufAllocCount);
43373 #ifdef CONFIG_CIFS_STATS2
43374- atomic_inc(&totSmBufAllocCount);
43375+ atomic_inc_unchecked(&totSmBufAllocCount);
43376 #endif /* CONFIG_CIFS_STATS2 */
43377
43378 }
fe2de317
MT
43379diff --git a/fs/coda/cache.c b/fs/coda/cache.c
43380index 6901578..d402eb5 100644
43381--- a/fs/coda/cache.c
43382+++ b/fs/coda/cache.c
8308f9c9
MT
43383@@ -24,7 +24,7 @@
43384 #include "coda_linux.h"
43385 #include "coda_cache.h"
43386
43387-static atomic_t permission_epoch = ATOMIC_INIT(0);
43388+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
43389
43390 /* replace or extend an acl cache hit */
43391 void coda_cache_enter(struct inode *inode, int mask)
fe2de317 43392@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
8308f9c9
MT
43393 struct coda_inode_info *cii = ITOC(inode);
43394
43395 spin_lock(&cii->c_lock);
43396- cii->c_cached_epoch = atomic_read(&permission_epoch);
43397+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
43398 if (cii->c_uid != current_fsuid()) {
43399 cii->c_uid = current_fsuid();
43400 cii->c_cached_perm = mask;
fe2de317 43401@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
8308f9c9
MT
43402 {
43403 struct coda_inode_info *cii = ITOC(inode);
43404 spin_lock(&cii->c_lock);
43405- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
43406+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
43407 spin_unlock(&cii->c_lock);
43408 }
43409
43410 /* remove all acl caches */
43411 void coda_cache_clear_all(struct super_block *sb)
43412 {
43413- atomic_inc(&permission_epoch);
43414+ atomic_inc_unchecked(&permission_epoch);
43415 }
43416
43417
fe2de317 43418@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
8308f9c9
MT
43419 spin_lock(&cii->c_lock);
43420 hit = (mask & cii->c_cached_perm) == mask &&
43421 cii->c_uid == current_fsuid() &&
43422- cii->c_cached_epoch == atomic_read(&permission_epoch);
43423+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
43424 spin_unlock(&cii->c_lock);
43425
43426 return hit;
fe2de317 43427diff --git a/fs/compat.c b/fs/compat.c
c6e2a6c8 43428index f2944ac..62845d2 100644
fe2de317
MT
43429--- a/fs/compat.c
43430+++ b/fs/compat.c
c6e2a6c8 43431@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
6e9df6a3
MT
43432
43433 set_fs(KERNEL_DS);
43434 /* The __user pointer cast is valid because of the set_fs() */
43435- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
43436+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
43437 set_fs(oldfs);
43438 /* truncating is ok because it's a user address */
43439 if (!ret)
c6e2a6c8 43440@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
6892158b
MT
43441 goto out;
43442
43443 ret = -EINVAL;
43444- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
43445+ if (nr_segs > UIO_MAXIOV)
43446 goto out;
43447 if (nr_segs > fast_segs) {
43448 ret = -ENOMEM;
c6e2a6c8 43449@@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
58c5fc13 43450
bc901d79
MT
43451 struct compat_readdir_callback {
43452 struct compat_old_linux_dirent __user *dirent;
43453+ struct file * file;
43454 int result;
43455 };
43456
c6e2a6c8 43457@@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
bc901d79
MT
43458 buf->result = -EOVERFLOW;
43459 return -EOVERFLOW;
43460 }
43461+
43462+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43463+ return 0;
43464+
43465 buf->result++;
43466 dirent = buf->dirent;
43467 if (!access_ok(VERIFY_WRITE, dirent,
c6e2a6c8 43468@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
bc901d79
MT
43469
43470 buf.result = 0;
43471 buf.dirent = dirent;
43472+ buf.file = file;
43473
43474 error = vfs_readdir(file, compat_fillonedir, &buf);
43475 if (buf.result)
c6e2a6c8 43476@@ -900,6 +906,7 @@ struct compat_linux_dirent {
bc901d79
MT
43477 struct compat_getdents_callback {
43478 struct compat_linux_dirent __user *current_dir;
43479 struct compat_linux_dirent __user *previous;
43480+ struct file * file;
43481 int count;
43482 int error;
43483 };
c6e2a6c8 43484@@ -921,6 +928,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
bc901d79
MT
43485 buf->error = -EOVERFLOW;
43486 return -EOVERFLOW;
43487 }
43488+
43489+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43490+ return 0;
43491+
43492 dirent = buf->previous;
43493 if (dirent) {
43494 if (__put_user(offset, &dirent->d_off))
c6e2a6c8 43495@@ -968,6 +979,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
bc901d79
MT
43496 buf.previous = NULL;
43497 buf.count = count;
43498 buf.error = 0;
43499+ buf.file = file;
43500
43501 error = vfs_readdir(file, compat_filldir, &buf);
43502 if (error >= 0)
c6e2a6c8 43503@@ -989,6 +1001,7 @@ out:
bc901d79
MT
43504 struct compat_getdents_callback64 {
43505 struct linux_dirent64 __user *current_dir;
43506 struct linux_dirent64 __user *previous;
43507+ struct file * file;
43508 int count;
43509 int error;
43510 };
c6e2a6c8 43511@@ -1005,6 +1018,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
bc901d79
MT
43512 buf->error = -EINVAL; /* only used if we fail.. */
43513 if (reclen > buf->count)
43514 return -EINVAL;
43515+
43516+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43517+ return 0;
43518+
43519 dirent = buf->previous;
43520
43521 if (dirent) {
c6e2a6c8 43522@@ -1056,13 +1073,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
bc901d79
MT
43523 buf.previous = NULL;
43524 buf.count = count;
43525 buf.error = 0;
43526+ buf.file = file;
43527
43528 error = vfs_readdir(file, compat_filldir64, &buf);
43529 if (error >= 0)
6e9df6a3
MT
43530 error = buf.error;
43531 lastdirent = buf.previous;
43532 if (lastdirent) {
43533- typeof(lastdirent->d_off) d_off = file->f_pos;
43534+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
43535 if (__put_user_unaligned(d_off, &lastdirent->d_off))
43536 error = -EFAULT;
43537 else
fe2de317
MT
43538diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
43539index 112e45a..b59845b 100644
43540--- a/fs/compat_binfmt_elf.c
43541+++ b/fs/compat_binfmt_elf.c
43542@@ -30,11 +30,13 @@
43543 #undef elf_phdr
43544 #undef elf_shdr
43545 #undef elf_note
43546+#undef elf_dyn
43547 #undef elf_addr_t
43548 #define elfhdr elf32_hdr
43549 #define elf_phdr elf32_phdr
43550 #define elf_shdr elf32_shdr
43551 #define elf_note elf32_note
43552+#define elf_dyn Elf32_Dyn
43553 #define elf_addr_t Elf32_Addr
43554
43555 /*
43556diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
c6e2a6c8 43557index debdfe0..75d31d4 100644
fe2de317
MT
43558--- a/fs/compat_ioctl.c
43559+++ b/fs/compat_ioctl.c
c6e2a6c8 43560@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
6892158b
MT
43561
43562 err = get_user(palp, &up->palette);
43563 err |= get_user(length, &up->length);
43564+ if (err)
43565+ return -EFAULT;
43566
43567 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
43568 err = put_user(compat_ptr(palp), &up_native->palette);
c6e2a6c8 43569@@ -621,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
6e9df6a3
MT
43570 return -EFAULT;
43571 if (__get_user(udata, &ss32->iomem_base))
43572 return -EFAULT;
43573- ss.iomem_base = compat_ptr(udata);
43574+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
43575 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
43576 __get_user(ss.port_high, &ss32->port_high))
43577 return -EFAULT;
c6e2a6c8 43578@@ -796,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
6e9df6a3
MT
43579 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
43580 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
43581 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
43582- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43583+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
43584 return -EFAULT;
43585
43586 return ioctl_preallocate(file, p);
c6e2a6c8 43587@@ -1610,8 +1612,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
bc901d79
MT
43588 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
43589 {
43590 unsigned int a, b;
43591- a = *(unsigned int *)p;
43592- b = *(unsigned int *)q;
43593+ a = *(const unsigned int *)p;
43594+ b = *(const unsigned int *)q;
43595 if (a > b)
43596 return 1;
43597 if (a < b)
fe2de317 43598diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
c6e2a6c8 43599index 7e6c52d..94bc756 100644
fe2de317
MT
43600--- a/fs/configfs/dir.c
43601+++ b/fs/configfs/dir.c
c6e2a6c8 43602@@ -1564,7 +1564,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66a7e928
MT
43603 }
43604 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
43605 struct configfs_dirent *next;
43606- const char * name;
43607+ const unsigned char * name;
43608+ char d_name[sizeof(next->s_dentry->d_iname)];
43609 int len;
43610 struct inode *inode = NULL;
43611
c6e2a6c8 43612@@ -1574,7 +1575,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
66a7e928
MT
43613 continue;
43614
43615 name = configfs_get_name(next);
43616- len = strlen(name);
43617+ if (next->s_dentry && name == next->s_dentry->d_iname) {
43618+ len = next->s_dentry->d_name.len;
43619+ memcpy(d_name, name, len);
43620+ name = d_name;
43621+ } else
43622+ len = strlen(name);
43623
43624 /*
43625 * We'll have a dentry and an inode for
fe2de317 43626diff --git a/fs/dcache.c b/fs/dcache.c
c6e2a6c8 43627index b80531c..8ca7e2d 100644
fe2de317
MT
43628--- a/fs/dcache.c
43629+++ b/fs/dcache.c
c6e2a6c8 43630@@ -3084,7 +3084,7 @@ void __init vfs_caches_init(unsigned long mempages)
71d190be
MT
43631 mempages -= reserve;
43632
43633 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
43634- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
43635+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
43636
43637 dcache_init();
43638 inode_init();
4c928ab7 43639diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
c6e2a6c8 43640index b80bc84..0d46d1a 100644
4c928ab7
MT
43641--- a/fs/debugfs/inode.c
43642+++ b/fs/debugfs/inode.c
c6e2a6c8 43643@@ -408,7 +408,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
4c928ab7
MT
43644 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
43645 {
43646 return debugfs_create_file(name,
43647+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43648+ S_IFDIR | S_IRWXU,
43649+#else
43650 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43651+#endif
43652 parent, NULL, NULL);
43653 }
43654 EXPORT_SYMBOL_GPL(debugfs_create_dir);
fe2de317 43655diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
5e856224 43656index ab35b11..b30af66 100644
fe2de317
MT
43657--- a/fs/ecryptfs/inode.c
43658+++ b/fs/ecryptfs/inode.c
5e856224 43659@@ -672,7 +672,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
ae4e228f
MT
43660 old_fs = get_fs();
43661 set_fs(get_ds());
43662 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
43663- (char __user *)lower_buf,
6e9df6a3 43664+ (char __force_user *)lower_buf,
ae4e228f
MT
43665 lower_bufsiz);
43666 set_fs(old_fs);
df50ba0c 43667 if (rc < 0)
5e856224 43668@@ -718,7 +718,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
ae4e228f
MT
43669 }
43670 old_fs = get_fs();
43671 set_fs(get_ds());
43672- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
6e9df6a3 43673+ rc = dentry->d_inode->i_op->readlink(dentry, (char __force_user *)buf, len);
ae4e228f
MT
43674 set_fs(old_fs);
43675 if (rc < 0) {
43676 kfree(buf);
5e856224 43677@@ -733,7 +733,7 @@ out:
ae4e228f
MT
43678 static void
43679 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
43680 {
43681- char *buf = nd_get_link(nd);
43682+ const char *buf = nd_get_link(nd);
43683 if (!IS_ERR(buf)) {
43684 /* Free the char* */
43685 kfree(buf);
fe2de317 43686diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
572b4308 43687index c0038f6..47ab347 100644
fe2de317
MT
43688--- a/fs/ecryptfs/miscdev.c
43689+++ b/fs/ecryptfs/miscdev.c
572b4308 43690@@ -355,7 +355,7 @@ check_list:
ae4e228f 43691 goto out_unlock_msg_ctx;
5e856224 43692 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
ae4e228f
MT
43693 if (msg_ctx->msg) {
43694- if (copy_to_user(&buf[i], packet_length, packet_length_size))
43695+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
43696 goto out_unlock_msg_ctx;
43697 i += packet_length_size;
43698 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
fe2de317 43699diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
5e856224 43700index b2a34a1..162fa69 100644
fe2de317
MT
43701--- a/fs/ecryptfs/read_write.c
43702+++ b/fs/ecryptfs/read_write.c
43703@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
6e9df6a3
MT
43704 return -EIO;
43705 fs_save = get_fs();
43706 set_fs(get_ds());
43707- rc = vfs_write(lower_file, data, size, &offset);
43708+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
43709 set_fs(fs_save);
43710 mark_inode_dirty_sync(ecryptfs_inode);
43711 return rc;
4c928ab7 43712@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
6e9df6a3
MT
43713 return -EIO;
43714 fs_save = get_fs();
43715 set_fs(get_ds());
43716- rc = vfs_read(lower_file, data, size, &offset);
43717+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
43718 set_fs(fs_save);
43719 return rc;
43720 }
fe2de317 43721diff --git a/fs/exec.c b/fs/exec.c
572b4308 43722index 29e5f84..8bfc7cb 100644
fe2de317
MT
43723--- a/fs/exec.c
43724+++ b/fs/exec.c
c6e2a6c8 43725@@ -55,6 +55,15 @@
ae4e228f 43726 #include <linux/pipe_fs_i.h>
bc901d79 43727 #include <linux/oom.h>
15a11c5b 43728 #include <linux/compat.h>
58c5fc13
MT
43729+#include <linux/random.h>
43730+#include <linux/seq_file.h>
43731+
43732+#ifdef CONFIG_PAX_REFCOUNT
43733+#include <linux/kallsyms.h>
43734+#include <linux/kdebug.h>
43735+#endif
c6e2a6c8
MT
43736+
43737+#include <trace/events/fs.h>
58c5fc13
MT
43738
43739 #include <asm/uaccess.h>
43740 #include <asm/mmu_context.h>
c6e2a6c8 43741@@ -66,6 +75,18 @@
58c5fc13 43742
c6e2a6c8
MT
43743 #include <trace/events/sched.h>
43744
43745+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
43746+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
43747+{
43748+ WARN_ONCE(1, "PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
43749+}
4c928ab7
MT
43750+#endif
43751+
58c5fc13
MT
43752+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
43753+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
43754+EXPORT_SYMBOL(pax_set_initial_flags_func);
43755+#endif
43756+
43757 int core_uses_pid;
43758 char core_pattern[CORENAME_MAX_SIZE] = "core";
ae4e228f 43759 unsigned int core_pipe_limit;
c6e2a6c8 43760@@ -75,7 +96,7 @@ struct core_name {
8308f9c9
MT
43761 char *corename;
43762 int used, size;
43763 };
43764-static atomic_t call_count = ATOMIC_INIT(1);
43765+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
43766
43767 /* The maximal length of core_pattern is also specified in sysctl.c */
43768
c6e2a6c8 43769@@ -191,18 +212,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
58c5fc13
MT
43770 int write)
43771 {
43772 struct page *page;
43773- int ret;
43774
43775-#ifdef CONFIG_STACK_GROWSUP
43776- if (write) {
15a11c5b 43777- ret = expand_downwards(bprm->vma, pos);
58c5fc13
MT
43778- if (ret < 0)
43779- return NULL;
43780- }
43781-#endif
43782- ret = get_user_pages(current, bprm->mm, pos,
43783- 1, write, 1, &page, NULL);
43784- if (ret <= 0)
15a11c5b 43785+ if (0 > expand_downwards(bprm->vma, pos))
58c5fc13
MT
43786+ return NULL;
43787+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
43788 return NULL;
43789
43790 if (write) {
c6e2a6c8 43791@@ -218,6 +231,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
4c928ab7
MT
43792 if (size <= ARG_MAX)
43793 return page;
43794
43795+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43796+ // only allow 512KB for argv+env on suid/sgid binaries
43797+ // to prevent easy ASLR exhaustion
43798+ if (((bprm->cred->euid != current_euid()) ||
43799+ (bprm->cred->egid != current_egid())) &&
43800+ (size > (512 * 1024))) {
43801+ put_page(page);
43802+ return NULL;
43803+ }
43804+#endif
43805+
43806 /*
43807 * Limit to 1/4-th the stack size for the argv+env strings.
43808 * This ensures that:
c6e2a6c8 43809@@ -277,6 +301,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
58c5fc13
MT
43810 vma->vm_end = STACK_TOP_MAX;
43811 vma->vm_start = vma->vm_end - PAGE_SIZE;
57199397 43812 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
58c5fc13
MT
43813+
43814+#ifdef CONFIG_PAX_SEGMEXEC
43815+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
43816+#endif
43817+
43818 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
df50ba0c 43819 INIT_LIST_HEAD(&vma->anon_vma_chain);
bc901d79 43820
c6e2a6c8 43821@@ -291,6 +320,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
58c5fc13
MT
43822 mm->stack_vm = mm->total_vm = 1;
43823 up_write(&mm->mmap_sem);
43824 bprm->p = vma->vm_end - sizeof(void *);
43825+
43826+#ifdef CONFIG_PAX_RANDUSTACK
43827+ if (randomize_va_space)
4c928ab7 43828+ bprm->p ^= random32() & ~PAGE_MASK;
58c5fc13
MT
43829+#endif
43830+
43831 return 0;
43832 err:
43833 up_write(&mm->mmap_sem);
c6e2a6c8 43834@@ -399,19 +434,7 @@ err:
15a11c5b
MT
43835 return err;
43836 }
43837
43838-struct user_arg_ptr {
43839-#ifdef CONFIG_COMPAT
43840- bool is_compat;
43841-#endif
43842- union {
43843- const char __user *const __user *native;
43844-#ifdef CONFIG_COMPAT
43845- compat_uptr_t __user *compat;
43846-#endif
43847- } ptr;
43848-};
43849-
43850-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43851+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
43852 {
43853 const char __user *native;
43854
c6e2a6c8 43855@@ -420,14 +443,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
6e9df6a3
MT
43856 compat_uptr_t compat;
43857
43858 if (get_user(compat, argv.ptr.compat + nr))
43859- return ERR_PTR(-EFAULT);
43860+ return (const char __force_user *)ERR_PTR(-EFAULT);
43861
43862 return compat_ptr(compat);
43863 }
43864 #endif
43865
43866 if (get_user(native, argv.ptr.native + nr))
43867- return ERR_PTR(-EFAULT);
43868+ return (const char __force_user *)ERR_PTR(-EFAULT);
43869
43870 return native;
43871 }
c6e2a6c8 43872@@ -446,7 +469,7 @@ static int count(struct user_arg_ptr argv, int max)
6e9df6a3
MT
43873 if (!p)
43874 break;
43875
43876- if (IS_ERR(p))
43877+ if (IS_ERR((const char __force_kernel *)p))
43878 return -EFAULT;
43879
43880 if (i++ >= max)
c6e2a6c8 43881@@ -480,7 +503,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
6e9df6a3
MT
43882
43883 ret = -EFAULT;
43884 str = get_user_arg_ptr(argv, argc);
43885- if (IS_ERR(str))
43886+ if (IS_ERR((const char __force_kernel *)str))
43887 goto out;
43888
43889 len = strnlen_user(str, MAX_ARG_STRLEN);
c6e2a6c8 43890@@ -562,7 +585,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
ae4e228f
MT
43891 int r;
43892 mm_segment_t oldfs = get_fs();
15a11c5b
MT
43893 struct user_arg_ptr argv = {
43894- .ptr.native = (const char __user *const __user *)__argv,
6e9df6a3 43895+ .ptr.native = (const char __force_user *const __force_user *)__argv,
15a11c5b
MT
43896 };
43897
ae4e228f 43898 set_fs(KERNEL_DS);
c6e2a6c8 43899@@ -597,7 +620,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
58c5fc13 43900 unsigned long new_end = old_end - shift;
15a11c5b 43901 struct mmu_gather tlb;
58c5fc13
MT
43902
43903- BUG_ON(new_start > new_end);
43904+ if (new_start >= new_end || new_start < mmap_min_addr)
bc901d79 43905+ return -ENOMEM;
58c5fc13
MT
43906
43907 /*
43908 * ensure there are no vmas between where we want to go
c6e2a6c8 43909@@ -606,6 +630,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
58c5fc13
MT
43910 if (vma != find_vma(mm, new_start))
43911 return -EFAULT;
43912
43913+#ifdef CONFIG_PAX_SEGMEXEC
43914+ BUG_ON(pax_find_mirror_vma(vma));
43915+#endif
43916+
43917 /*
43918 * cover the whole range: [new_start, old_end)
43919 */
c6e2a6c8 43920@@ -686,10 +714,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
bc901d79
MT
43921 stack_top = arch_align_stack(stack_top);
43922 stack_top = PAGE_ALIGN(stack_top);
43923
43924- if (unlikely(stack_top < mmap_min_addr) ||
43925- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
43926- return -ENOMEM;
43927-
43928 stack_shift = vma->vm_end - stack_top;
43929
43930 bprm->p -= stack_shift;
c6e2a6c8 43931@@ -701,8 +725,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
58c5fc13
MT
43932 bprm->exec -= stack_shift;
43933
43934 down_write(&mm->mmap_sem);
43935+
43936+ /* Move stack pages down in memory. */
43937+ if (stack_shift) {
43938+ ret = shift_arg_pages(vma, stack_shift);
43939+ if (ret)
43940+ goto out_unlock;
43941+ }
43942+
43943 vm_flags = VM_STACK_FLAGS;
43944
58c5fc13
MT
43945+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
43946+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
43947+ vm_flags &= ~VM_EXEC;
43948+
43949+#ifdef CONFIG_PAX_MPROTECT
43950+ if (mm->pax_flags & MF_PAX_MPROTECT)
43951+ vm_flags &= ~VM_MAYEXEC;
43952+#endif
43953+
43954+ }
43955+#endif
43956+
ae4e228f
MT
43957 /*
43958 * Adjust stack execute permissions; explicitly enable for
43959 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
c6e2a6c8 43960@@ -721,13 +765,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
58c5fc13
MT
43961 goto out_unlock;
43962 BUG_ON(prev != vma);
43963
43964- /* Move stack pages down in memory. */
43965- if (stack_shift) {
43966- ret = shift_arg_pages(vma, stack_shift);
ae4e228f
MT
43967- if (ret)
43968- goto out_unlock;
58c5fc13
MT
43969- }
43970-
57199397
MT
43971 /* mprotect_fixup is overkill to remove the temporary stack flags */
43972 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
43973
c6e2a6c8
MT
43974@@ -785,6 +822,8 @@ struct file *open_exec(const char *name)
43975
43976 fsnotify_open(file);
43977
43978+ trace_open_exec(name);
43979+
43980 err = deny_write_access(file);
43981 if (err)
43982 goto exit;
43983@@ -808,7 +847,7 @@ int kernel_read(struct file *file, loff_t offset,
ae4e228f
MT
43984 old_fs = get_fs();
43985 set_fs(get_ds());
43986 /* The cast to a user pointer is valid due to the set_fs() */
43987- result = vfs_read(file, (void __user *)addr, count, &pos);
6e9df6a3 43988+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
ae4e228f
MT
43989 set_fs(old_fs);
43990 return result;
43991 }
c6e2a6c8 43992@@ -1254,7 +1293,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
58c5fc13
MT
43993 }
43994 rcu_read_unlock();
43995
43996- if (p->fs->users > n_fs) {
43997+ if (atomic_read(&p->fs->users) > n_fs) {
43998 bprm->unsafe |= LSM_UNSAFE_SHARE;
43999 } else {
44000 res = -EAGAIN;
c6e2a6c8 44001@@ -1451,6 +1490,28 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
4c928ab7
MT
44002
44003 EXPORT_SYMBOL(search_binary_handler);
44004
44005+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44006+static DEFINE_PER_CPU(u64, exec_counter);
44007+static int __init init_exec_counters(void)
44008+{
44009+ unsigned int cpu;
44010+
44011+ for_each_possible_cpu(cpu) {
44012+ per_cpu(exec_counter, cpu) = (u64)cpu;
44013+ }
44014+
44015+ return 0;
44016+}
44017+early_initcall(init_exec_counters);
44018+static inline void increment_exec_counter(void)
44019+{
44020+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
44021+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
44022+}
44023+#else
44024+static inline void increment_exec_counter(void) {}
44025+#endif
44026+
44027 /*
44028 * sys_execve() executes a new program.
44029 */
c6e2a6c8 44030@@ -1459,6 +1520,11 @@ static int do_execve_common(const char *filename,
15a11c5b
MT
44031 struct user_arg_ptr envp,
44032 struct pt_regs *regs)
58c5fc13
MT
44033 {
44034+#ifdef CONFIG_GRKERNSEC
44035+ struct file *old_exec_file;
44036+ struct acl_subject_label *old_acl;
44037+ struct rlimit old_rlim[RLIM_NLIMITS];
44038+#endif
44039 struct linux_binprm *bprm;
44040 struct file *file;
44041 struct files_struct *displaced;
c6e2a6c8 44042@@ -1466,6 +1532,8 @@ static int do_execve_common(const char *filename,
15a11c5b 44043 int retval;
6e9df6a3
MT
44044 const struct cred *cred = current_cred();
44045
15a11c5b
MT
44046+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
44047+
6e9df6a3
MT
44048 /*
44049 * We move the actual failure in case of RLIMIT_NPROC excess from
44050 * set*uid() to execve() because too many poorly written programs
c6e2a6c8 44051@@ -1506,12 +1574,27 @@ static int do_execve_common(const char *filename,
4c928ab7
MT
44052 if (IS_ERR(file))
44053 goto out_unmark;
44054
44055+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
44056+ retval = -EPERM;
44057+ goto out_file;
44058+ }
44059+
44060 sched_exec();
44061
44062 bprm->file = file;
58c5fc13
MT
44063 bprm->filename = filename;
44064 bprm->interp = filename;
44065
71d190be
MT
44066+ if (gr_process_user_ban()) {
44067+ retval = -EPERM;
44068+ goto out_file;
44069+ }
44070+
58c5fc13
MT
44071+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
44072+ retval = -EACCES;
44073+ goto out_file;
44074+ }
44075+
44076 retval = bprm_mm_init(bprm);
44077 if (retval)
44078 goto out_file;
c6e2a6c8 44079@@ -1528,24 +1611,65 @@ static int do_execve_common(const char *filename,
58c5fc13
MT
44080 if (retval < 0)
44081 goto out;
44082
58c5fc13
MT
44083+#ifdef CONFIG_GRKERNSEC
44084+ old_acl = current->acl;
44085+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
44086+ old_exec_file = current->exec_file;
44087+ get_file(file);
44088+ current->exec_file = file;
44089+#endif
4c928ab7
MT
44090+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
44091+ /* limit suid stack to 8MB
44092+ we saved the old limits above and will restore them if this exec fails
44093+ */
44094+ if (((bprm->cred->euid != current_euid()) || (bprm->cred->egid != current_egid())) &&
44095+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
44096+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
44097+#endif
44098+
44099+ if (!gr_tpe_allow(file)) {
44100+ retval = -EACCES;
44101+ goto out_fail;
44102+ }
44103+
44104+ if (gr_check_crash_exec(file)) {
44105+ retval = -EACCES;
44106+ goto out_fail;
44107+ }
58c5fc13
MT
44108+
44109+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
4c928ab7 44110+ bprm->unsafe);
58c5fc13
MT
44111+ if (retval < 0)
44112+ goto out_fail;
44113+
4c928ab7
MT
44114 retval = copy_strings_kernel(1, &bprm->filename, bprm);
44115 if (retval < 0)
44116- goto out;
44117+ goto out_fail;
44118
44119 bprm->exec = bprm->p;
44120 retval = copy_strings(bprm->envc, envp, bprm);
44121 if (retval < 0)
44122- goto out;
44123+ goto out_fail;
44124
44125 retval = copy_strings(bprm->argc, argv, bprm);
44126 if (retval < 0)
44127- goto out;
44128+ goto out_fail;
44129+
44130+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
44131+
44132+ gr_handle_exec_args(bprm, argv);
44133
58c5fc13
MT
44134 retval = search_binary_handler(bprm,regs);
44135 if (retval < 0)
44136- goto out;
44137+ goto out_fail;
44138+#ifdef CONFIG_GRKERNSEC
44139+ if (old_exec_file)
44140+ fput(old_exec_file);
44141+#endif
44142
df50ba0c 44143 /* execve succeeded */
4c928ab7
MT
44144+
44145+ increment_exec_counter();
df50ba0c 44146 current->fs->in_exec = 0;
4c928ab7
MT
44147 current->in_execve = 0;
44148 acct_update_integrals(current);
c6e2a6c8 44149@@ -1554,6 +1678,14 @@ static int do_execve_common(const char *filename,
58c5fc13
MT
44150 put_files_struct(displaced);
44151 return retval;
44152
44153+out_fail:
44154+#ifdef CONFIG_GRKERNSEC
44155+ current->acl = old_acl;
44156+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
44157+ fput(current->exec_file);
44158+ current->exec_file = old_exec_file;
44159+#endif
44160+
44161 out:
bc901d79
MT
44162 if (bprm->mm) {
44163 acct_arg_size(bprm, 0);
c6e2a6c8 44164@@ -1627,7 +1759,7 @@ static int expand_corename(struct core_name *cn)
8308f9c9
MT
44165 {
44166 char *old_corename = cn->corename;
44167
44168- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
44169+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
44170 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
44171
44172 if (!cn->corename) {
c6e2a6c8 44173@@ -1724,7 +1856,7 @@ static int format_corename(struct core_name *cn, long signr)
8308f9c9
MT
44174 int pid_in_pattern = 0;
44175 int err = 0;
44176
44177- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
44178+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
44179 cn->corename = kmalloc(cn->size, GFP_KERNEL);
44180 cn->used = 0;
44181
572b4308 44182@@ -1821,6 +1953,250 @@ out:
58c5fc13
MT
44183 return ispipe;
44184 }
44185
44186+int pax_check_flags(unsigned long *flags)
44187+{
44188+ int retval = 0;
44189+
44190+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
44191+ if (*flags & MF_PAX_SEGMEXEC)
44192+ {
44193+ *flags &= ~MF_PAX_SEGMEXEC;
44194+ retval = -EINVAL;
44195+ }
44196+#endif
44197+
44198+ if ((*flags & MF_PAX_PAGEEXEC)
44199+
44200+#ifdef CONFIG_PAX_PAGEEXEC
44201+ && (*flags & MF_PAX_SEGMEXEC)
44202+#endif
44203+
44204+ )
44205+ {
44206+ *flags &= ~MF_PAX_PAGEEXEC;
44207+ retval = -EINVAL;
44208+ }
44209+
44210+ if ((*flags & MF_PAX_MPROTECT)
44211+
44212+#ifdef CONFIG_PAX_MPROTECT
44213+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44214+#endif
44215+
44216+ )
44217+ {
44218+ *flags &= ~MF_PAX_MPROTECT;
44219+ retval = -EINVAL;
44220+ }
44221+
44222+ if ((*flags & MF_PAX_EMUTRAMP)
44223+
44224+#ifdef CONFIG_PAX_EMUTRAMP
44225+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
44226+#endif
44227+
44228+ )
44229+ {
44230+ *flags &= ~MF_PAX_EMUTRAMP;
44231+ retval = -EINVAL;
44232+ }
44233+
44234+ return retval;
44235+}
44236+
44237+EXPORT_SYMBOL(pax_check_flags);
44238+
44239+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
44240+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
44241+{
44242+ struct task_struct *tsk = current;
44243+ struct mm_struct *mm = current->mm;
44244+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
44245+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
44246+ char *path_exec = NULL;
44247+ char *path_fault = NULL;
44248+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
44249+
44250+ if (buffer_exec && buffer_fault) {
44251+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
44252+
44253+ down_read(&mm->mmap_sem);
44254+ vma = mm->mmap;
44255+ while (vma && (!vma_exec || !vma_fault)) {
44256+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
44257+ vma_exec = vma;
44258+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
44259+ vma_fault = vma;
44260+ vma = vma->vm_next;
44261+ }
44262+ if (vma_exec) {
44263+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
44264+ if (IS_ERR(path_exec))
44265+ path_exec = "<path too long>";
44266+ else {
44267+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
44268+ if (path_exec) {
44269+ *path_exec = 0;
44270+ path_exec = buffer_exec;
44271+ } else
44272+ path_exec = "<path too long>";
44273+ }
44274+ }
44275+ if (vma_fault) {
44276+ start = vma_fault->vm_start;
44277+ end = vma_fault->vm_end;
44278+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
44279+ if (vma_fault->vm_file) {
44280+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
44281+ if (IS_ERR(path_fault))
44282+ path_fault = "<path too long>";
44283+ else {
44284+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
44285+ if (path_fault) {
44286+ *path_fault = 0;
44287+ path_fault = buffer_fault;
44288+ } else
44289+ path_fault = "<path too long>";
44290+ }
44291+ } else
44292+ path_fault = "<anonymous mapping>";
44293+ }
44294+ up_read(&mm->mmap_sem);
44295+ }
44296+ if (tsk->signal->curr_ip)
ae4e228f 44297+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
58c5fc13
MT
44298+ else
44299+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
44300+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
44301+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
44302+ task_uid(tsk), task_euid(tsk), pc, sp);
44303+ free_page((unsigned long)buffer_exec);
44304+ free_page((unsigned long)buffer_fault);
6e9df6a3 44305+ pax_report_insns(regs, pc, sp);
58c5fc13
MT
44306+ do_coredump(SIGKILL, SIGKILL, regs);
44307+}
44308+#endif
44309+
44310+#ifdef CONFIG_PAX_REFCOUNT
44311+void pax_report_refcount_overflow(struct pt_regs *regs)
44312+{
44313+ if (current->signal->curr_ip)
ae4e228f
MT
44314+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44315+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
58c5fc13
MT
44316+ else
44317+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
44318+ current->comm, task_pid_nr(current), current_uid(), current_euid());
44319+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
44320+ show_regs(regs);
ae4e228f 44321+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
58c5fc13
MT
44322+}
44323+#endif
44324+
44325+#ifdef CONFIG_PAX_USERCOPY
6892158b 44326+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
572b4308 44327+static noinline int check_stack_object(const void *obj, unsigned long len)
6892158b
MT
44328+{
44329+ const void * const stack = task_stack_page(current);
44330+ const void * const stackend = stack + THREAD_SIZE;
44331+
57199397 44332+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
6892158b
MT
44333+ const void *frame = NULL;
44334+ const void *oldframe;
57199397
MT
44335+#endif
44336+
6892158b
MT
44337+ if (obj + len < obj)
44338+ return -1;
57199397 44339+
6892158b
MT
44340+ if (obj + len <= stack || stackend <= obj)
44341+ return 0;
57199397 44342+
6892158b 44343+ if (obj < stack || stackend < obj + len)
57199397
MT
44344+ return -1;
44345+
57199397 44346+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
6892158b
MT
44347+ oldframe = __builtin_frame_address(1);
44348+ if (oldframe)
44349+ frame = __builtin_frame_address(2);
44350+ /*
44351+ low ----------------------------------------------> high
44352+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
44353+ ^----------------^
44354+ allow copies only within here
44355+ */
44356+ while (stack <= frame && frame < stackend) {
44357+ /* if obj + len extends past the last frame, this
44358+ check won't pass and the next frame will be 0,
44359+ causing us to bail out and correctly report
44360+ the copy as invalid
57199397 44361+ */
6892158b
MT
44362+ if (obj + len <= frame)
44363+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
44364+ oldframe = frame;
44365+ frame = *(const void * const *)frame;
57199397 44366+ }
57199397 44367+ return -1;
6892158b
MT
44368+#else
44369+ return 1;
44370+#endif
57199397
MT
44371+}
44372+
572b4308 44373+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
58c5fc13 44374+{
ae4e228f 44375+ if (current->signal->curr_ip)
71d190be
MT
44376+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44377+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
ae4e228f 44378+ else
71d190be
MT
44379+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
44380+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
58c5fc13 44381+ dump_stack();
71d190be 44382+ gr_handle_kernel_exploit();
58c5fc13
MT
44383+ do_group_exit(SIGKILL);
44384+}
44385+#endif
15a11c5b 44386+
572b4308
MT
44387+void check_object_size(const void *ptr, unsigned long n, bool to)
44388+{
44389+
44390+#ifdef CONFIG_PAX_USERCOPY
44391+ const char *type;
44392+
44393+ if (!n)
44394+ return;
44395+
44396+ type = check_heap_object(ptr, n, to);
44397+ if (!type) {
44398+ if (check_stack_object(ptr, n) != -1)
44399+ return;
44400+ type = "<process stack>";
44401+ }
44402+
44403+ pax_report_usercopy(ptr, n, to, type);
44404+#endif
44405+
44406+}
44407+EXPORT_SYMBOL(check_object_size);
44408+
15a11c5b
MT
44409+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
44410+void pax_track_stack(void)
44411+{
44412+ unsigned long sp = (unsigned long)&sp;
44413+ if (sp < current_thread_info()->lowest_stack &&
44414+ sp > (unsigned long)task_stack_page(current))
44415+ current_thread_info()->lowest_stack = sp;
44416+}
44417+EXPORT_SYMBOL(pax_track_stack);
44418+#endif
4c928ab7
MT
44419+
44420+#ifdef CONFIG_PAX_SIZE_OVERFLOW
44421+void report_size_overflow(const char *file, unsigned int line, const char *func)
44422+{
44423+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u\n", func, file, line);
44424+ dump_stack();
44425+ do_group_exit(SIGKILL);
44426+}
44427+EXPORT_SYMBOL(report_size_overflow);
44428+#endif
58c5fc13 44429+
df50ba0c 44430 static int zap_process(struct task_struct *start, int exit_code)
58c5fc13
MT
44431 {
44432 struct task_struct *t;
572b4308 44433@@ -2018,17 +2394,17 @@ static void wait_for_dump_helpers(struct file *file)
ae4e228f
MT
44434 pipe = file->f_path.dentry->d_inode->i_pipe;
44435
44436 pipe_lock(pipe);
44437- pipe->readers++;
44438- pipe->writers--;
44439+ atomic_inc(&pipe->readers);
44440+ atomic_dec(&pipe->writers);
44441
44442- while ((pipe->readers > 1) && (!signal_pending(current))) {
44443+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
44444 wake_up_interruptible_sync(&pipe->wait);
44445 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
44446 pipe_wait(pipe);
44447 }
44448
44449- pipe->readers--;
44450- pipe->writers++;
44451+ atomic_dec(&pipe->readers);
44452+ atomic_inc(&pipe->writers);
44453 pipe_unlock(pipe);
44454
44455 }
572b4308 44456@@ -2089,7 +2465,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8308f9c9
MT
44457 int retval = 0;
44458 int flag = 0;
44459 int ispipe;
44460- static atomic_t core_dump_count = ATOMIC_INIT(0);
44461+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
44462 struct coredump_params cprm = {
44463 .signr = signr,
44464 .regs = regs,
572b4308 44465@@ -2104,6 +2480,9 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
71d190be
MT
44466
44467 audit_core_dumps(signr);
44468
44469+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
44470+ gr_handle_brute_attach(current, cprm.mm_flags);
44471+
44472 binfmt = mm->binfmt;
44473 if (!binfmt || !binfmt->core_dump)
44474 goto fail;
572b4308 44475@@ -2171,7 +2550,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
8308f9c9
MT
44476 }
44477 cprm.limit = RLIM_INFINITY;
44478
44479- dump_count = atomic_inc_return(&core_dump_count);
44480+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
44481 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
44482 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
44483 task_tgid_vnr(current), current->comm);
572b4308 44484@@ -2198,6 +2577,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
6e9df6a3
MT
44485 } else {
44486 struct inode *inode;
44487
44488+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
44489+
44490 if (cprm.limit < binfmt->min_coredump)
44491 goto fail_unlock;
44492
572b4308 44493@@ -2241,7 +2622,7 @@ close_fail:
8308f9c9
MT
44494 filp_close(cprm.file, NULL);
44495 fail_dropcount:
44496 if (ispipe)
44497- atomic_dec(&core_dump_count);
44498+ atomic_dec_unchecked(&core_dump_count);
44499 fail_unlock:
44500 kfree(cn.corename);
44501 fail_corename:
572b4308 44502@@ -2260,7 +2641,7 @@ fail:
6e9df6a3
MT
44503 */
44504 int dump_write(struct file *file, const void *addr, int nr)
44505 {
44506- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
44507+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
44508 }
44509 EXPORT_SYMBOL(dump_write);
44510
fe2de317 44511diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
4c928ab7 44512index a8cbe1b..fed04cb 100644
fe2de317
MT
44513--- a/fs/ext2/balloc.c
44514+++ b/fs/ext2/balloc.c
44515@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
58c5fc13
MT
44516
44517 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44518 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44519- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
44520+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
44521 sbi->s_resuid != current_fsuid() &&
44522 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44523 return 0;
fe2de317 44524diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
c6e2a6c8 44525index baac1b1..1499b62 100644
fe2de317
MT
44526--- a/fs/ext3/balloc.c
44527+++ b/fs/ext3/balloc.c
c6e2a6c8 44528@@ -1438,9 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
58c5fc13
MT
44529
44530 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
44531 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
44532- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
4c928ab7
MT
44533+ if (free_blocks < root_blocks + 1 &&
44534 !use_reservation && sbi->s_resuid != current_fsuid() &&
44535- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
44536+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)) &&
44537+ !capable_nolog(CAP_SYS_RESOURCE)) {
58c5fc13 44538 return 0;
4c928ab7
MT
44539 }
44540 return 1;
fe2de317 44541diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
c6e2a6c8 44542index 8da837b..ed3835b 100644
fe2de317
MT
44543--- a/fs/ext4/balloc.c
44544+++ b/fs/ext4/balloc.c
c6e2a6c8 44545@@ -463,8 +463,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
4c928ab7 44546 /* Hm, nope. Are (enough) root reserved clusters available? */
58c5fc13
MT
44547 if (sbi->s_resuid == current_fsuid() ||
44548 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
15a11c5b
MT
44549- capable(CAP_SYS_RESOURCE) ||
44550- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
4c928ab7
MT
44551+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
44552+ capable_nolog(CAP_SYS_RESOURCE)) {
15a11c5b 44553
4c928ab7 44554 if (free_clusters >= (nclusters + dirty_clusters))
58c5fc13 44555 return 1;
fe2de317 44556diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
c6e2a6c8 44557index 0e01e90..ae2bd5e 100644
fe2de317
MT
44558--- a/fs/ext4/ext4.h
44559+++ b/fs/ext4/ext4.h
c6e2a6c8 44560@@ -1225,19 +1225,19 @@ struct ext4_sb_info {
bc901d79
MT
44561 unsigned long s_mb_last_start;
44562
44563 /* stats for buddy allocator */
44564- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
44565- atomic_t s_bal_success; /* we found long enough chunks */
44566- atomic_t s_bal_allocated; /* in blocks */
44567- atomic_t s_bal_ex_scanned; /* total extents scanned */
44568- atomic_t s_bal_goals; /* goal hits */
44569- atomic_t s_bal_breaks; /* too long searches */
44570- atomic_t s_bal_2orders; /* 2^order hits */
44571+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
44572+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
44573+ atomic_unchecked_t s_bal_allocated; /* in blocks */
44574+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
44575+ atomic_unchecked_t s_bal_goals; /* goal hits */
44576+ atomic_unchecked_t s_bal_breaks; /* too long searches */
44577+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
44578 spinlock_t s_bal_lock;
44579 unsigned long s_mb_buddies_generated;
44580 unsigned long long s_mb_generation_time;
44581- atomic_t s_mb_lost_chunks;
44582- atomic_t s_mb_preallocated;
44583- atomic_t s_mb_discarded;
44584+ atomic_unchecked_t s_mb_lost_chunks;
44585+ atomic_unchecked_t s_mb_preallocated;
44586+ atomic_unchecked_t s_mb_discarded;
44587 atomic_t s_lock_busy;
44588
44589 /* locality groups */
572b4308
MT
44590diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
44591index 1365903..9727522 100644
44592--- a/fs/ext4/ioctl.c
44593+++ b/fs/ext4/ioctl.c
44594@@ -261,7 +261,6 @@ group_extend_out:
44595 err = ext4_move_extents(filp, donor_filp, me.orig_start,
44596 me.donor_start, me.len, &me.moved_len);
44597 mnt_drop_write_file(filp);
44598- mnt_drop_write(filp->f_path.mnt);
44599
44600 if (copy_to_user((struct move_extent __user *)arg,
44601 &me, sizeof(me)))
fe2de317 44602diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
c6e2a6c8 44603index 6b0a57e..1955a44 100644
fe2de317
MT
44604--- a/fs/ext4/mballoc.c
44605+++ b/fs/ext4/mballoc.c
c6e2a6c8 44606@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
bc901d79
MT
44607 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
44608
44609 if (EXT4_SB(sb)->s_mb_stats)
44610- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
44611+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
44612
44613 break;
44614 }
c6e2a6c8 44615@@ -2041,7 +2041,7 @@ repeat:
bc901d79
MT
44616 ac->ac_status = AC_STATUS_CONTINUE;
44617 ac->ac_flags |= EXT4_MB_HINT_FIRST;
44618 cr = 3;
44619- atomic_inc(&sbi->s_mb_lost_chunks);
44620+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
44621 goto repeat;
44622 }
44623 }
c6e2a6c8 44624@@ -2545,25 +2545,25 @@ int ext4_mb_release(struct super_block *sb)
bc901d79 44625 if (sbi->s_mb_stats) {
6e9df6a3
MT
44626 ext4_msg(sb, KERN_INFO,
44627 "mballoc: %u blocks %u reqs (%u success)",
bc901d79
MT
44628- atomic_read(&sbi->s_bal_allocated),
44629- atomic_read(&sbi->s_bal_reqs),
44630- atomic_read(&sbi->s_bal_success));
44631+ atomic_read_unchecked(&sbi->s_bal_allocated),
44632+ atomic_read_unchecked(&sbi->s_bal_reqs),
44633+ atomic_read_unchecked(&sbi->s_bal_success));
6e9df6a3
MT
44634 ext4_msg(sb, KERN_INFO,
44635 "mballoc: %u extents scanned, %u goal hits, "
44636 "%u 2^N hits, %u breaks, %u lost",
bc901d79
MT
44637- atomic_read(&sbi->s_bal_ex_scanned),
44638- atomic_read(&sbi->s_bal_goals),
44639- atomic_read(&sbi->s_bal_2orders),
44640- atomic_read(&sbi->s_bal_breaks),
44641- atomic_read(&sbi->s_mb_lost_chunks));
44642+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
44643+ atomic_read_unchecked(&sbi->s_bal_goals),
44644+ atomic_read_unchecked(&sbi->s_bal_2orders),
44645+ atomic_read_unchecked(&sbi->s_bal_breaks),
44646+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
6e9df6a3
MT
44647 ext4_msg(sb, KERN_INFO,
44648 "mballoc: %lu generated and it took %Lu",
44649 sbi->s_mb_buddies_generated,
bc901d79 44650 sbi->s_mb_generation_time);
6e9df6a3
MT
44651 ext4_msg(sb, KERN_INFO,
44652 "mballoc: %u preallocated, %u discarded",
bc901d79
MT
44653- atomic_read(&sbi->s_mb_preallocated),
44654- atomic_read(&sbi->s_mb_discarded));
44655+ atomic_read_unchecked(&sbi->s_mb_preallocated),
44656+ atomic_read_unchecked(&sbi->s_mb_discarded));
44657 }
44658
44659 free_percpu(sbi->s_locality_groups);
c6e2a6c8 44660@@ -3045,16 +3045,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
bc901d79
MT
44661 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
44662
44663 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
44664- atomic_inc(&sbi->s_bal_reqs);
44665- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44666+ atomic_inc_unchecked(&sbi->s_bal_reqs);
44667+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
44668 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
44669- atomic_inc(&sbi->s_bal_success);
44670- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
44671+ atomic_inc_unchecked(&sbi->s_bal_success);
44672+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
44673 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
44674 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
44675- atomic_inc(&sbi->s_bal_goals);
44676+ atomic_inc_unchecked(&sbi->s_bal_goals);
44677 if (ac->ac_found > sbi->s_mb_max_to_scan)
44678- atomic_inc(&sbi->s_bal_breaks);
44679+ atomic_inc_unchecked(&sbi->s_bal_breaks);
44680 }
44681
44682 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
c6e2a6c8 44683@@ -3458,7 +3458,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
bc901d79
MT
44684 trace_ext4_mb_new_inode_pa(ac, pa);
44685
44686 ext4_mb_use_inode_pa(ac, pa);
4c928ab7
MT
44687- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
44688+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
bc901d79
MT
44689
44690 ei = EXT4_I(ac->ac_inode);
44691 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
c6e2a6c8 44692@@ -3518,7 +3518,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
bc901d79
MT
44693 trace_ext4_mb_new_group_pa(ac, pa);
44694
44695 ext4_mb_use_group_pa(ac, pa);
44696- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44697+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
44698
44699 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
44700 lg = ac->ac_lg;
c6e2a6c8 44701@@ -3607,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
bc901d79
MT
44702 * from the bitmap and continue.
44703 */
44704 }
44705- atomic_add(free, &sbi->s_mb_discarded);
44706+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
44707
44708 return err;
44709 }
c6e2a6c8 44710@@ -3625,7 +3625,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
bc901d79
MT
44711 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
44712 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
44713 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
44714- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44715+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
44716 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
44717
44718 return 0;
fe2de317 44719diff --git a/fs/fcntl.c b/fs/fcntl.c
c6e2a6c8 44720index 75e7c1f..1eb3e4d 100644
fe2de317
MT
44721--- a/fs/fcntl.c
44722+++ b/fs/fcntl.c
44723@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
57199397
MT
44724 if (err)
44725 return err;
44726
44727+ if (gr_handle_chroot_fowner(pid, type))
44728+ return -ENOENT;
44729+ if (gr_check_protected_task_fowner(pid, type))
44730+ return -EACCES;
44731+
44732 f_modown(filp, pid, type, force);
44733 return 0;
44734 }
6e9df6a3
MT
44735@@ -266,7 +271,7 @@ pid_t f_getown(struct file *filp)
44736
44737 static int f_setown_ex(struct file *filp, unsigned long arg)
44738 {
44739- struct f_owner_ex * __user owner_p = (void * __user)arg;
44740+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44741 struct f_owner_ex owner;
44742 struct pid *pid;
44743 int type;
fe2de317 44744@@ -306,7 +311,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
6e9df6a3
MT
44745
44746 static int f_getown_ex(struct file *filp, unsigned long arg)
44747 {
44748- struct f_owner_ex * __user owner_p = (void * __user)arg;
44749+ struct f_owner_ex __user *owner_p = (void __user *)arg;
44750 struct f_owner_ex owner;
44751 int ret = 0;
44752
fe2de317 44753@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
58c5fc13
MT
44754 switch (cmd) {
44755 case F_DUPFD:
44756 case F_DUPFD_CLOEXEC:
44757+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
df50ba0c 44758 if (arg >= rlimit(RLIMIT_NOFILE))
58c5fc13
MT
44759 break;
44760 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
fe2de317 44761diff --git a/fs/fifo.c b/fs/fifo.c
572b4308 44762index cf6f434..3d7942c 100644
fe2de317
MT
44763--- a/fs/fifo.c
44764+++ b/fs/fifo.c
572b4308 44765@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
ae4e228f
MT
44766 */
44767 filp->f_op = &read_pipefifo_fops;
44768 pipe->r_counter++;
44769- if (pipe->readers++ == 0)
44770+ if (atomic_inc_return(&pipe->readers) == 1)
44771 wake_up_partner(inode);
44772
44773- if (!pipe->writers) {
44774+ if (!atomic_read(&pipe->writers)) {
44775 if ((filp->f_flags & O_NONBLOCK)) {
44776 /* suppress POLLHUP until we have
44777 * seen a writer */
fe2de317 44778@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
ae4e228f
MT
44779 * errno=ENXIO when there is no process reading the FIFO.
44780 */
44781 ret = -ENXIO;
44782- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
44783+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
44784 goto err;
44785
44786 filp->f_op = &write_pipefifo_fops;
44787 pipe->w_counter++;
44788- if (!pipe->writers++)
44789+ if (atomic_inc_return(&pipe->writers) == 1)
44790 wake_up_partner(inode);
44791
44792- if (!pipe->readers) {
44793+ if (!atomic_read(&pipe->readers)) {
572b4308 44794 if (wait_for_partner(inode, &pipe->r_counter))
ae4e228f 44795 goto err_wr;
572b4308
MT
44796 }
44797@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
ae4e228f
MT
44798 */
44799 filp->f_op = &rdwr_pipefifo_fops;
44800
44801- pipe->readers++;
44802- pipe->writers++;
44803+ atomic_inc(&pipe->readers);
44804+ atomic_inc(&pipe->writers);
44805 pipe->r_counter++;
44806 pipe->w_counter++;
44807- if (pipe->readers == 1 || pipe->writers == 1)
44808+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
44809 wake_up_partner(inode);
44810 break;
44811
572b4308 44812@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
ae4e228f
MT
44813 return 0;
44814
44815 err_rd:
44816- if (!--pipe->readers)
44817+ if (atomic_dec_and_test(&pipe->readers))
44818 wake_up_interruptible(&pipe->wait);
44819 ret = -ERESTARTSYS;
44820 goto err;
44821
44822 err_wr:
44823- if (!--pipe->writers)
44824+ if (atomic_dec_and_test(&pipe->writers))
44825 wake_up_interruptible(&pipe->wait);
44826 ret = -ERESTARTSYS;
44827 goto err;
44828
44829 err:
44830- if (!pipe->readers && !pipe->writers)
44831+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
44832 free_pipe_info(inode);
44833
44834 err_nocleanup:
fe2de317 44835diff --git a/fs/file.c b/fs/file.c
c6e2a6c8 44836index ba3f605..fade102 100644
fe2de317
MT
44837--- a/fs/file.c
44838+++ b/fs/file.c
66a7e928 44839@@ -15,6 +15,7 @@
58c5fc13
MT
44840 #include <linux/slab.h>
44841 #include <linux/vmalloc.h>
44842 #include <linux/file.h>
44843+#include <linux/security.h>
44844 #include <linux/fdtable.h>
44845 #include <linux/bitops.h>
44846 #include <linux/interrupt.h>
c6e2a6c8 44847@@ -255,6 +256,7 @@ int expand_files(struct files_struct *files, int nr)
58c5fc13
MT
44848 * N.B. For clone tasks sharing a files structure, this test
44849 * will limit the total number of files that can be opened.
44850 */
58c5fc13 44851+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
df50ba0c 44852 if (nr >= rlimit(RLIMIT_NOFILE))
58c5fc13
MT
44853 return -EMFILE;
44854
fe2de317 44855diff --git a/fs/filesystems.c b/fs/filesystems.c
5e856224 44856index 96f2428..f5eeb8e 100644
fe2de317
MT
44857--- a/fs/filesystems.c
44858+++ b/fs/filesystems.c
5e856224 44859@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
71d190be
MT
44860 int len = dot ? dot - name : strlen(name);
44861
44862 fs = __get_fs_type(name, len);
44863+
44864+#ifdef CONFIG_GRKERNSEC_MODHARDEN
44865+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
44866+#else
44867 if (!fs && (request_module("%.*s", len, name) == 0))
44868+#endif
44869 fs = __get_fs_type(name, len);
44870
44871 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
fe2de317 44872diff --git a/fs/fs_struct.c b/fs/fs_struct.c
c6e2a6c8 44873index e159e68..e7d2a6f 100644
fe2de317
MT
44874--- a/fs/fs_struct.c
44875+++ b/fs/fs_struct.c
44876@@ -4,6 +4,7 @@
44877 #include <linux/path.h>
44878 #include <linux/slab.h>
44879 #include <linux/fs_struct.h>
44880+#include <linux/grsecurity.h>
44881 #include "internal.h"
44882
44883 static inline void path_get_longterm(struct path *path)
44884@@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
c6e2a6c8 44885 write_seqcount_begin(&fs->seq);
fe2de317
MT
44886 old_root = fs->root;
44887 fs->root = *path;
fe2de317
MT
44888+ gr_set_chroot_entries(current, path);
44889 write_seqcount_end(&fs->seq);
44890 spin_unlock(&fs->lock);
44891 if (old_root.dentry)
c6e2a6c8
MT
44892@@ -65,6 +67,17 @@ static inline int replace_path(struct path *p, const struct path *old, const str
44893 return 1;
44894 }
44895
44896+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
44897+{
44898+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
44899+ return 0;
44900+ *p = *new;
44901+
44902+ gr_set_chroot_entries(task, new);
44903+
44904+ return 1;
44905+}
44906+
44907 void chroot_fs_refs(struct path *old_root, struct path *new_root)
44908 {
44909 struct task_struct *g, *p;
44910@@ -79,7 +92,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
44911 int hits = 0;
44912 spin_lock(&fs->lock);
44913 write_seqcount_begin(&fs->seq);
44914- hits += replace_path(&fs->root, old_root, new_root);
44915+ hits += replace_root_path(p, &fs->root, old_root, new_root);
44916 hits += replace_path(&fs->pwd, old_root, new_root);
44917 write_seqcount_end(&fs->seq);
44918 while (hits--) {
44919@@ -111,7 +124,8 @@ void exit_fs(struct task_struct *tsk)
44920 task_lock(tsk);
fe2de317 44921 spin_lock(&fs->lock);
fe2de317
MT
44922 tsk->fs = NULL;
44923- kill = !--fs->users;
44924+ gr_clear_chroot_entries(tsk);
44925+ kill = !atomic_dec_return(&fs->users);
fe2de317
MT
44926 spin_unlock(&fs->lock);
44927 task_unlock(tsk);
c6e2a6c8
MT
44928 if (kill)
44929@@ -124,7 +138,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
fe2de317
MT
44930 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
44931 /* We don't need to lock fs - think why ;-) */
44932 if (fs) {
44933- fs->users = 1;
44934+ atomic_set(&fs->users, 1);
44935 fs->in_exec = 0;
44936 spin_lock_init(&fs->lock);
44937 seqcount_init(&fs->seq);
c6e2a6c8 44938@@ -133,6 +147,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
fe2de317
MT
44939 spin_lock(&old->lock);
44940 fs->root = old->root;
44941 path_get_longterm(&fs->root);
44942+ /* instead of calling gr_set_chroot_entries here,
44943+ we call it from every caller of this function
44944+ */
44945 fs->pwd = old->pwd;
44946 path_get_longterm(&fs->pwd);
44947 spin_unlock(&old->lock);
c6e2a6c8 44948@@ -151,8 +168,9 @@ int unshare_fs_struct(void)
fe2de317
MT
44949
44950 task_lock(current);
44951 spin_lock(&fs->lock);
44952- kill = !--fs->users;
44953+ kill = !atomic_dec_return(&fs->users);
44954 current->fs = new_fs;
44955+ gr_set_chroot_entries(current, &new_fs->root);
44956 spin_unlock(&fs->lock);
44957 task_unlock(current);
44958
c6e2a6c8 44959@@ -165,13 +183,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
4c928ab7
MT
44960
44961 int current_umask(void)
44962 {
44963- return current->fs->umask;
44964+ return current->fs->umask | gr_acl_umask();
44965 }
44966 EXPORT_SYMBOL(current_umask);
fe2de317
MT
44967
44968 /* to be mentioned only in INIT_TASK */
44969 struct fs_struct init_fs = {
44970- .users = 1,
44971+ .users = ATOMIC_INIT(1),
44972 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
44973 .seq = SEQCNT_ZERO,
44974 .umask = 0022,
c6e2a6c8 44975@@ -187,12 +205,13 @@ void daemonize_fs_struct(void)
fe2de317
MT
44976 task_lock(current);
44977
44978 spin_lock(&init_fs.lock);
44979- init_fs.users++;
44980+ atomic_inc(&init_fs.users);
44981 spin_unlock(&init_fs.lock);
44982
44983 spin_lock(&fs->lock);
44984 current->fs = &init_fs;
44985- kill = !--fs->users;
44986+ gr_set_chroot_entries(current, &current->fs->root);
44987+ kill = !atomic_dec_return(&fs->users);
44988 spin_unlock(&fs->lock);
44989
44990 task_unlock(current);
44991diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
44992index 9905350..02eaec4 100644
44993--- a/fs/fscache/cookie.c
44994+++ b/fs/fscache/cookie.c
44995@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
8308f9c9
MT
44996 parent ? (char *) parent->def->name : "<no-parent>",
44997 def->name, netfs_data);
44998
44999- fscache_stat(&fscache_n_acquires);
45000+ fscache_stat_unchecked(&fscache_n_acquires);
45001
45002 /* if there's no parent cookie, then we don't create one here either */
45003 if (!parent) {
45004- fscache_stat(&fscache_n_acquires_null);
45005+ fscache_stat_unchecked(&fscache_n_acquires_null);
45006 _leave(" [no parent]");
45007 return NULL;
45008 }
fe2de317 45009@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
8308f9c9
MT
45010 /* allocate and initialise a cookie */
45011 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
45012 if (!cookie) {
45013- fscache_stat(&fscache_n_acquires_oom);
45014+ fscache_stat_unchecked(&fscache_n_acquires_oom);
45015 _leave(" [ENOMEM]");
45016 return NULL;
45017 }
fe2de317 45018@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
8308f9c9
MT
45019
45020 switch (cookie->def->type) {
45021 case FSCACHE_COOKIE_TYPE_INDEX:
45022- fscache_stat(&fscache_n_cookie_index);
45023+ fscache_stat_unchecked(&fscache_n_cookie_index);
45024 break;
45025 case FSCACHE_COOKIE_TYPE_DATAFILE:
45026- fscache_stat(&fscache_n_cookie_data);
45027+ fscache_stat_unchecked(&fscache_n_cookie_data);
45028 break;
45029 default:
45030- fscache_stat(&fscache_n_cookie_special);
45031+ fscache_stat_unchecked(&fscache_n_cookie_special);
45032 break;
45033 }
45034
fe2de317 45035@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
8308f9c9
MT
45036 if (fscache_acquire_non_index_cookie(cookie) < 0) {
45037 atomic_dec(&parent->n_children);
45038 __fscache_cookie_put(cookie);
45039- fscache_stat(&fscache_n_acquires_nobufs);
45040+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
45041 _leave(" = NULL");
45042 return NULL;
45043 }
45044 }
45045
45046- fscache_stat(&fscache_n_acquires_ok);
45047+ fscache_stat_unchecked(&fscache_n_acquires_ok);
45048 _leave(" = %p", cookie);
45049 return cookie;
45050 }
fe2de317 45051@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
8308f9c9
MT
45052 cache = fscache_select_cache_for_object(cookie->parent);
45053 if (!cache) {
45054 up_read(&fscache_addremove_sem);
45055- fscache_stat(&fscache_n_acquires_no_cache);
45056+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
45057 _leave(" = -ENOMEDIUM [no cache]");
45058 return -ENOMEDIUM;
45059 }
fe2de317 45060@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
8308f9c9
MT
45061 object = cache->ops->alloc_object(cache, cookie);
45062 fscache_stat_d(&fscache_n_cop_alloc_object);
45063 if (IS_ERR(object)) {
45064- fscache_stat(&fscache_n_object_no_alloc);
45065+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
45066 ret = PTR_ERR(object);
45067 goto error;
45068 }
45069
45070- fscache_stat(&fscache_n_object_alloc);
45071+ fscache_stat_unchecked(&fscache_n_object_alloc);
45072
45073 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
45074
fe2de317 45075@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
8308f9c9
MT
45076 struct fscache_object *object;
45077 struct hlist_node *_p;
45078
45079- fscache_stat(&fscache_n_updates);
45080+ fscache_stat_unchecked(&fscache_n_updates);
45081
45082 if (!cookie) {
45083- fscache_stat(&fscache_n_updates_null);
45084+ fscache_stat_unchecked(&fscache_n_updates_null);
45085 _leave(" [no cookie]");
45086 return;
45087 }
fe2de317 45088@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
8308f9c9
MT
45089 struct fscache_object *object;
45090 unsigned long event;
45091
45092- fscache_stat(&fscache_n_relinquishes);
45093+ fscache_stat_unchecked(&fscache_n_relinquishes);
45094 if (retire)
45095- fscache_stat(&fscache_n_relinquishes_retire);
45096+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
45097
45098 if (!cookie) {
45099- fscache_stat(&fscache_n_relinquishes_null);
45100+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
45101 _leave(" [no cookie]");
45102 return;
45103 }
fe2de317 45104@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
8308f9c9
MT
45105
45106 /* wait for the cookie to finish being instantiated (or to fail) */
45107 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
45108- fscache_stat(&fscache_n_relinquishes_waitcrt);
45109+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
45110 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
45111 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
45112 }
fe2de317
MT
45113diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
45114index f6aad48..88dcf26 100644
45115--- a/fs/fscache/internal.h
45116+++ b/fs/fscache/internal.h
8308f9c9
MT
45117@@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
45118 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
45119 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
45120
45121-extern atomic_t fscache_n_op_pend;
45122-extern atomic_t fscache_n_op_run;
45123-extern atomic_t fscache_n_op_enqueue;
45124-extern atomic_t fscache_n_op_deferred_release;
45125-extern atomic_t fscache_n_op_release;
45126-extern atomic_t fscache_n_op_gc;
45127-extern atomic_t fscache_n_op_cancelled;
45128-extern atomic_t fscache_n_op_rejected;
fe2de317
MT
45129+extern atomic_unchecked_t fscache_n_op_pend;
45130+extern atomic_unchecked_t fscache_n_op_run;
45131+extern atomic_unchecked_t fscache_n_op_enqueue;
45132+extern atomic_unchecked_t fscache_n_op_deferred_release;
45133+extern atomic_unchecked_t fscache_n_op_release;
45134+extern atomic_unchecked_t fscache_n_op_gc;
45135+extern atomic_unchecked_t fscache_n_op_cancelled;
45136+extern atomic_unchecked_t fscache_n_op_rejected;
45137
8308f9c9
MT
45138-extern atomic_t fscache_n_attr_changed;
45139-extern atomic_t fscache_n_attr_changed_ok;
45140-extern atomic_t fscache_n_attr_changed_nobufs;
45141-extern atomic_t fscache_n_attr_changed_nomem;
45142-extern atomic_t fscache_n_attr_changed_calls;
fe2de317
MT
45143+extern atomic_unchecked_t fscache_n_attr_changed;
45144+extern atomic_unchecked_t fscache_n_attr_changed_ok;
45145+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
45146+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
45147+extern atomic_unchecked_t fscache_n_attr_changed_calls;
45148
8308f9c9
MT
45149-extern atomic_t fscache_n_allocs;
45150-extern atomic_t fscache_n_allocs_ok;
45151-extern atomic_t fscache_n_allocs_wait;
45152-extern atomic_t fscache_n_allocs_nobufs;
45153-extern atomic_t fscache_n_allocs_intr;
45154-extern atomic_t fscache_n_allocs_object_dead;
45155-extern atomic_t fscache_n_alloc_ops;
45156-extern atomic_t fscache_n_alloc_op_waits;
fe2de317
MT
45157+extern atomic_unchecked_t fscache_n_allocs;
45158+extern atomic_unchecked_t fscache_n_allocs_ok;
45159+extern atomic_unchecked_t fscache_n_allocs_wait;
45160+extern atomic_unchecked_t fscache_n_allocs_nobufs;
45161+extern atomic_unchecked_t fscache_n_allocs_intr;
45162+extern atomic_unchecked_t fscache_n_allocs_object_dead;
45163+extern atomic_unchecked_t fscache_n_alloc_ops;
45164+extern atomic_unchecked_t fscache_n_alloc_op_waits;
45165
8308f9c9
MT
45166-extern atomic_t fscache_n_retrievals;
45167-extern atomic_t fscache_n_retrievals_ok;
45168-extern atomic_t fscache_n_retrievals_wait;
45169-extern atomic_t fscache_n_retrievals_nodata;
45170-extern atomic_t fscache_n_retrievals_nobufs;
45171-extern atomic_t fscache_n_retrievals_intr;
45172-extern atomic_t fscache_n_retrievals_nomem;
45173-extern atomic_t fscache_n_retrievals_object_dead;
45174-extern atomic_t fscache_n_retrieval_ops;
45175-extern atomic_t fscache_n_retrieval_op_waits;
8308f9c9
MT
45176+extern atomic_unchecked_t fscache_n_retrievals;
45177+extern atomic_unchecked_t fscache_n_retrievals_ok;
45178+extern atomic_unchecked_t fscache_n_retrievals_wait;
45179+extern atomic_unchecked_t fscache_n_retrievals_nodata;
45180+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
45181+extern atomic_unchecked_t fscache_n_retrievals_intr;
45182+extern atomic_unchecked_t fscache_n_retrievals_nomem;
45183+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
45184+extern atomic_unchecked_t fscache_n_retrieval_ops;
45185+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
fe2de317
MT
45186
45187-extern atomic_t fscache_n_stores;
45188-extern atomic_t fscache_n_stores_ok;
45189-extern atomic_t fscache_n_stores_again;
45190-extern atomic_t fscache_n_stores_nobufs;
45191-extern atomic_t fscache_n_stores_oom;
45192-extern atomic_t fscache_n_store_ops;
45193-extern atomic_t fscache_n_store_calls;
45194-extern atomic_t fscache_n_store_pages;
45195-extern atomic_t fscache_n_store_radix_deletes;
45196-extern atomic_t fscache_n_store_pages_over_limit;
8308f9c9
MT
45197+extern atomic_unchecked_t fscache_n_stores;
45198+extern atomic_unchecked_t fscache_n_stores_ok;
45199+extern atomic_unchecked_t fscache_n_stores_again;
45200+extern atomic_unchecked_t fscache_n_stores_nobufs;
45201+extern atomic_unchecked_t fscache_n_stores_oom;
45202+extern atomic_unchecked_t fscache_n_store_ops;
45203+extern atomic_unchecked_t fscache_n_store_calls;
45204+extern atomic_unchecked_t fscache_n_store_pages;
45205+extern atomic_unchecked_t fscache_n_store_radix_deletes;
45206+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
fe2de317
MT
45207
45208-extern atomic_t fscache_n_store_vmscan_not_storing;
45209-extern atomic_t fscache_n_store_vmscan_gone;
45210-extern atomic_t fscache_n_store_vmscan_busy;
45211-extern atomic_t fscache_n_store_vmscan_cancelled;
8308f9c9
MT
45212+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
45213+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
45214+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
45215+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
fe2de317
MT
45216
45217-extern atomic_t fscache_n_marks;
45218-extern atomic_t fscache_n_uncaches;
8308f9c9
MT
45219+extern atomic_unchecked_t fscache_n_marks;
45220+extern atomic_unchecked_t fscache_n_uncaches;
fe2de317
MT
45221
45222-extern atomic_t fscache_n_acquires;
45223-extern atomic_t fscache_n_acquires_null;
45224-extern atomic_t fscache_n_acquires_no_cache;
45225-extern atomic_t fscache_n_acquires_ok;
45226-extern atomic_t fscache_n_acquires_nobufs;
45227-extern atomic_t fscache_n_acquires_oom;
8308f9c9
MT
45228+extern atomic_unchecked_t fscache_n_acquires;
45229+extern atomic_unchecked_t fscache_n_acquires_null;
45230+extern atomic_unchecked_t fscache_n_acquires_no_cache;
45231+extern atomic_unchecked_t fscache_n_acquires_ok;
45232+extern atomic_unchecked_t fscache_n_acquires_nobufs;
45233+extern atomic_unchecked_t fscache_n_acquires_oom;
fe2de317
MT
45234
45235-extern atomic_t fscache_n_updates;
45236-extern atomic_t fscache_n_updates_null;
45237-extern atomic_t fscache_n_updates_run;
8308f9c9
MT
45238+extern atomic_unchecked_t fscache_n_updates;
45239+extern atomic_unchecked_t fscache_n_updates_null;
45240+extern atomic_unchecked_t fscache_n_updates_run;
fe2de317
MT
45241
45242-extern atomic_t fscache_n_relinquishes;
45243-extern atomic_t fscache_n_relinquishes_null;
45244-extern atomic_t fscache_n_relinquishes_waitcrt;
45245-extern atomic_t fscache_n_relinquishes_retire;
8308f9c9
MT
45246+extern atomic_unchecked_t fscache_n_relinquishes;
45247+extern atomic_unchecked_t fscache_n_relinquishes_null;
45248+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
45249+extern atomic_unchecked_t fscache_n_relinquishes_retire;
fe2de317
MT
45250
45251-extern atomic_t fscache_n_cookie_index;
45252-extern atomic_t fscache_n_cookie_data;
45253-extern atomic_t fscache_n_cookie_special;
8308f9c9
MT
45254+extern atomic_unchecked_t fscache_n_cookie_index;
45255+extern atomic_unchecked_t fscache_n_cookie_data;
45256+extern atomic_unchecked_t fscache_n_cookie_special;
fe2de317
MT
45257
45258-extern atomic_t fscache_n_object_alloc;
45259-extern atomic_t fscache_n_object_no_alloc;
45260-extern atomic_t fscache_n_object_lookups;
45261-extern atomic_t fscache_n_object_lookups_negative;
45262-extern atomic_t fscache_n_object_lookups_positive;
45263-extern atomic_t fscache_n_object_lookups_timed_out;
45264-extern atomic_t fscache_n_object_created;
45265-extern atomic_t fscache_n_object_avail;
45266-extern atomic_t fscache_n_object_dead;
8308f9c9
MT
45267+extern atomic_unchecked_t fscache_n_object_alloc;
45268+extern atomic_unchecked_t fscache_n_object_no_alloc;
45269+extern atomic_unchecked_t fscache_n_object_lookups;
45270+extern atomic_unchecked_t fscache_n_object_lookups_negative;
45271+extern atomic_unchecked_t fscache_n_object_lookups_positive;
45272+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
45273+extern atomic_unchecked_t fscache_n_object_created;
45274+extern atomic_unchecked_t fscache_n_object_avail;
45275+extern atomic_unchecked_t fscache_n_object_dead;
fe2de317
MT
45276
45277-extern atomic_t fscache_n_checkaux_none;
45278-extern atomic_t fscache_n_checkaux_okay;
45279-extern atomic_t fscache_n_checkaux_update;
45280-extern atomic_t fscache_n_checkaux_obsolete;
8308f9c9
MT
45281+extern atomic_unchecked_t fscache_n_checkaux_none;
45282+extern atomic_unchecked_t fscache_n_checkaux_okay;
45283+extern atomic_unchecked_t fscache_n_checkaux_update;
45284+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
45285
45286 extern atomic_t fscache_n_cop_alloc_object;
45287 extern atomic_t fscache_n_cop_lookup_object;
fe2de317 45288@@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t *stat)
8308f9c9
MT
45289 atomic_inc(stat);
45290 }
45291
45292+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
45293+{
45294+ atomic_inc_unchecked(stat);
45295+}
45296+
45297 static inline void fscache_stat_d(atomic_t *stat)
45298 {
45299 atomic_dec(stat);
fe2de317 45300@@ -267,6 +272,7 @@ extern const struct file_operations fscache_stats_fops;
66a7e928
MT
45301
45302 #define __fscache_stat(stat) (NULL)
45303 #define fscache_stat(stat) do {} while (0)
45304+#define fscache_stat_unchecked(stat) do {} while (0)
45305 #define fscache_stat_d(stat) do {} while (0)
45306 #endif
45307
fe2de317
MT
45308diff --git a/fs/fscache/object.c b/fs/fscache/object.c
45309index b6b897c..0ffff9c 100644
45310--- a/fs/fscache/object.c
45311+++ b/fs/fscache/object.c
45312@@ -128,7 +128,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
8308f9c9
MT
45313 /* update the object metadata on disk */
45314 case FSCACHE_OBJECT_UPDATING:
45315 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
45316- fscache_stat(&fscache_n_updates_run);
45317+ fscache_stat_unchecked(&fscache_n_updates_run);
45318 fscache_stat(&fscache_n_cop_update_object);
45319 object->cache->ops->update_object(object);
45320 fscache_stat_d(&fscache_n_cop_update_object);
fe2de317 45321@@ -217,7 +217,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
8308f9c9
MT
45322 spin_lock(&object->lock);
45323 object->state = FSCACHE_OBJECT_DEAD;
45324 spin_unlock(&object->lock);
45325- fscache_stat(&fscache_n_object_dead);
45326+ fscache_stat_unchecked(&fscache_n_object_dead);
45327 goto terminal_transit;
45328
45329 /* handle the parent cache of this object being withdrawn from
fe2de317 45330@@ -232,7 +232,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
8308f9c9
MT
45331 spin_lock(&object->lock);
45332 object->state = FSCACHE_OBJECT_DEAD;
45333 spin_unlock(&object->lock);
45334- fscache_stat(&fscache_n_object_dead);
45335+ fscache_stat_unchecked(&fscache_n_object_dead);
45336 goto terminal_transit;
45337
45338 /* complain about the object being woken up once it is
fe2de317 45339@@ -461,7 +461,7 @@ static void fscache_lookup_object(struct fscache_object *object)
8308f9c9
MT
45340 parent->cookie->def->name, cookie->def->name,
45341 object->cache->tag->name);
45342
45343- fscache_stat(&fscache_n_object_lookups);
45344+ fscache_stat_unchecked(&fscache_n_object_lookups);
45345 fscache_stat(&fscache_n_cop_lookup_object);
45346 ret = object->cache->ops->lookup_object(object);
45347 fscache_stat_d(&fscache_n_cop_lookup_object);
fe2de317 45348@@ -472,7 +472,7 @@ static void fscache_lookup_object(struct fscache_object *object)
8308f9c9
MT
45349 if (ret == -ETIMEDOUT) {
45350 /* probably stuck behind another object, so move this one to
45351 * the back of the queue */
45352- fscache_stat(&fscache_n_object_lookups_timed_out);
45353+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
45354 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45355 }
45356
fe2de317 45357@@ -495,7 +495,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
8308f9c9
MT
45358
45359 spin_lock(&object->lock);
45360 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45361- fscache_stat(&fscache_n_object_lookups_negative);
45362+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
45363
45364 /* transit here to allow write requests to begin stacking up
45365 * and read requests to begin returning ENODATA */
fe2de317 45366@@ -541,7 +541,7 @@ void fscache_obtained_object(struct fscache_object *object)
8308f9c9
MT
45367 * result, in which case there may be data available */
45368 spin_lock(&object->lock);
45369 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
45370- fscache_stat(&fscache_n_object_lookups_positive);
45371+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
45372
45373 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
45374
fe2de317 45375@@ -555,7 +555,7 @@ void fscache_obtained_object(struct fscache_object *object)
8308f9c9
MT
45376 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
45377 } else {
45378 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
45379- fscache_stat(&fscache_n_object_created);
45380+ fscache_stat_unchecked(&fscache_n_object_created);
45381
45382 object->state = FSCACHE_OBJECT_AVAILABLE;
45383 spin_unlock(&object->lock);
fe2de317 45384@@ -602,7 +602,7 @@ static void fscache_object_available(struct fscache_object *object)
8308f9c9
MT
45385 fscache_enqueue_dependents(object);
45386
45387 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
45388- fscache_stat(&fscache_n_object_avail);
45389+ fscache_stat_unchecked(&fscache_n_object_avail);
45390
45391 _leave("");
45392 }
fe2de317 45393@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
8308f9c9
MT
45394 enum fscache_checkaux result;
45395
45396 if (!object->cookie->def->check_aux) {
45397- fscache_stat(&fscache_n_checkaux_none);
45398+ fscache_stat_unchecked(&fscache_n_checkaux_none);
45399 return FSCACHE_CHECKAUX_OKAY;
45400 }
45401
fe2de317 45402@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
8308f9c9
MT
45403 switch (result) {
45404 /* entry okay as is */
45405 case FSCACHE_CHECKAUX_OKAY:
45406- fscache_stat(&fscache_n_checkaux_okay);
45407+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
45408 break;
45409
45410 /* entry requires update */
45411 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
45412- fscache_stat(&fscache_n_checkaux_update);
45413+ fscache_stat_unchecked(&fscache_n_checkaux_update);
45414 break;
45415
45416 /* entry requires deletion */
45417 case FSCACHE_CHECKAUX_OBSOLETE:
45418- fscache_stat(&fscache_n_checkaux_obsolete);
45419+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
45420 break;
45421
45422 default:
fe2de317
MT
45423diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
45424index 30afdfa..2256596 100644
45425--- a/fs/fscache/operation.c
45426+++ b/fs/fscache/operation.c
8308f9c9
MT
45427@@ -17,7 +17,7 @@
45428 #include <linux/slab.h>
45429 #include "internal.h"
45430
45431-atomic_t fscache_op_debug_id;
45432+atomic_unchecked_t fscache_op_debug_id;
45433 EXPORT_SYMBOL(fscache_op_debug_id);
45434
45435 /**
fe2de317 45436@@ -38,7 +38,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
8308f9c9
MT
45437 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
45438 ASSERTCMP(atomic_read(&op->usage), >, 0);
45439
45440- fscache_stat(&fscache_n_op_enqueue);
45441+ fscache_stat_unchecked(&fscache_n_op_enqueue);
45442 switch (op->flags & FSCACHE_OP_TYPE) {
45443 case FSCACHE_OP_ASYNC:
45444 _debug("queue async");
fe2de317 45445@@ -69,7 +69,7 @@ static void fscache_run_op(struct fscache_object *object,
8308f9c9
MT
45446 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
45447 if (op->processor)
45448 fscache_enqueue_operation(op);
45449- fscache_stat(&fscache_n_op_run);
45450+ fscache_stat_unchecked(&fscache_n_op_run);
45451 }
45452
45453 /*
fe2de317 45454@@ -98,11 +98,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
8308f9c9
MT
45455 if (object->n_ops > 1) {
45456 atomic_inc(&op->usage);
45457 list_add_tail(&op->pend_link, &object->pending_ops);
45458- fscache_stat(&fscache_n_op_pend);
45459+ fscache_stat_unchecked(&fscache_n_op_pend);
45460 } else if (!list_empty(&object->pending_ops)) {
45461 atomic_inc(&op->usage);
45462 list_add_tail(&op->pend_link, &object->pending_ops);
45463- fscache_stat(&fscache_n_op_pend);
45464+ fscache_stat_unchecked(&fscache_n_op_pend);
45465 fscache_start_operations(object);
45466 } else {
45467 ASSERTCMP(object->n_in_progress, ==, 0);
fe2de317 45468@@ -118,7 +118,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
8308f9c9
MT
45469 object->n_exclusive++; /* reads and writes must wait */
45470 atomic_inc(&op->usage);
45471 list_add_tail(&op->pend_link, &object->pending_ops);
45472- fscache_stat(&fscache_n_op_pend);
45473+ fscache_stat_unchecked(&fscache_n_op_pend);
45474 ret = 0;
45475 } else {
45476 /* not allowed to submit ops in any other state */
fe2de317 45477@@ -203,11 +203,11 @@ int fscache_submit_op(struct fscache_object *object,
8308f9c9
MT
45478 if (object->n_exclusive > 0) {
45479 atomic_inc(&op->usage);
45480 list_add_tail(&op->pend_link, &object->pending_ops);
45481- fscache_stat(&fscache_n_op_pend);
45482+ fscache_stat_unchecked(&fscache_n_op_pend);
45483 } else if (!list_empty(&object->pending_ops)) {
45484 atomic_inc(&op->usage);
45485 list_add_tail(&op->pend_link, &object->pending_ops);
45486- fscache_stat(&fscache_n_op_pend);
45487+ fscache_stat_unchecked(&fscache_n_op_pend);
45488 fscache_start_operations(object);
45489 } else {
45490 ASSERTCMP(object->n_exclusive, ==, 0);
fe2de317 45491@@ -219,12 +219,12 @@ int fscache_submit_op(struct fscache_object *object,
8308f9c9
MT
45492 object->n_ops++;
45493 atomic_inc(&op->usage);
45494 list_add_tail(&op->pend_link, &object->pending_ops);
45495- fscache_stat(&fscache_n_op_pend);
45496+ fscache_stat_unchecked(&fscache_n_op_pend);
45497 ret = 0;
45498 } else if (object->state == FSCACHE_OBJECT_DYING ||
45499 object->state == FSCACHE_OBJECT_LC_DYING ||
45500 object->state == FSCACHE_OBJECT_WITHDRAWING) {
45501- fscache_stat(&fscache_n_op_rejected);
45502+ fscache_stat_unchecked(&fscache_n_op_rejected);
45503 ret = -ENOBUFS;
45504 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
45505 fscache_report_unexpected_submission(object, op, ostate);
fe2de317 45506@@ -294,7 +294,7 @@ int fscache_cancel_op(struct fscache_operation *op)
8308f9c9
MT
45507
45508 ret = -EBUSY;
45509 if (!list_empty(&op->pend_link)) {
45510- fscache_stat(&fscache_n_op_cancelled);
45511+ fscache_stat_unchecked(&fscache_n_op_cancelled);
45512 list_del_init(&op->pend_link);
45513 object->n_ops--;
45514 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
fe2de317 45515@@ -331,7 +331,7 @@ void fscache_put_operation(struct fscache_operation *op)
8308f9c9
MT
45516 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
45517 BUG();
45518
45519- fscache_stat(&fscache_n_op_release);
45520+ fscache_stat_unchecked(&fscache_n_op_release);
45521
45522 if (op->release) {
45523 op->release(op);
fe2de317 45524@@ -348,7 +348,7 @@ void fscache_put_operation(struct fscache_operation *op)
8308f9c9
MT
45525 * lock, and defer it otherwise */
45526 if (!spin_trylock(&object->lock)) {
45527 _debug("defer put");
45528- fscache_stat(&fscache_n_op_deferred_release);
45529+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
45530
45531 cache = object->cache;
45532 spin_lock(&cache->op_gc_list_lock);
fe2de317 45533@@ -410,7 +410,7 @@ void fscache_operation_gc(struct work_struct *work)
8308f9c9
MT
45534
45535 _debug("GC DEFERRED REL OBJ%x OP%x",
45536 object->debug_id, op->debug_id);
45537- fscache_stat(&fscache_n_op_gc);
45538+ fscache_stat_unchecked(&fscache_n_op_gc);
45539
45540 ASSERTCMP(atomic_read(&op->usage), ==, 0);
45541
fe2de317
MT
45542diff --git a/fs/fscache/page.c b/fs/fscache/page.c
45543index 3f7a59b..cf196cc 100644
45544--- a/fs/fscache/page.c
45545+++ b/fs/fscache/page.c
45546@@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
8308f9c9
MT
45547 val = radix_tree_lookup(&cookie->stores, page->index);
45548 if (!val) {
45549 rcu_read_unlock();
45550- fscache_stat(&fscache_n_store_vmscan_not_storing);
45551+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
45552 __fscache_uncache_page(cookie, page);
45553 return true;
45554 }
fe2de317 45555@@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
8308f9c9
MT
45556 spin_unlock(&cookie->stores_lock);
45557
45558 if (xpage) {
45559- fscache_stat(&fscache_n_store_vmscan_cancelled);
45560- fscache_stat(&fscache_n_store_radix_deletes);
45561+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
45562+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45563 ASSERTCMP(xpage, ==, page);
45564 } else {
45565- fscache_stat(&fscache_n_store_vmscan_gone);
45566+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
45567 }
45568
45569 wake_up_bit(&cookie->flags, 0);
45570@@ -107,7 +107,7 @@ page_busy:
45571 /* we might want to wait here, but that could deadlock the allocator as
45572 * the work threads writing to the cache may all end up sleeping
45573 * on memory allocation */
45574- fscache_stat(&fscache_n_store_vmscan_busy);
45575+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
45576 return false;
45577 }
45578 EXPORT_SYMBOL(__fscache_maybe_release_page);
fe2de317 45579@@ -131,7 +131,7 @@ static void fscache_end_page_write(struct fscache_object *object,
8308f9c9
MT
45580 FSCACHE_COOKIE_STORING_TAG);
45581 if (!radix_tree_tag_get(&cookie->stores, page->index,
45582 FSCACHE_COOKIE_PENDING_TAG)) {
45583- fscache_stat(&fscache_n_store_radix_deletes);
45584+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
45585 xpage = radix_tree_delete(&cookie->stores, page->index);
45586 }
45587 spin_unlock(&cookie->stores_lock);
fe2de317 45588@@ -152,7 +152,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
8308f9c9
MT
45589
45590 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
45591
45592- fscache_stat(&fscache_n_attr_changed_calls);
45593+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
45594
45595 if (fscache_object_is_active(object)) {
15a11c5b 45596 fscache_stat(&fscache_n_cop_attr_changed);
fe2de317 45597@@ -177,11 +177,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
8308f9c9
MT
45598
45599 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45600
45601- fscache_stat(&fscache_n_attr_changed);
45602+ fscache_stat_unchecked(&fscache_n_attr_changed);
45603
45604 op = kzalloc(sizeof(*op), GFP_KERNEL);
45605 if (!op) {
45606- fscache_stat(&fscache_n_attr_changed_nomem);
45607+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
45608 _leave(" = -ENOMEM");
45609 return -ENOMEM;
45610 }
fe2de317 45611@@ -199,7 +199,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
8308f9c9
MT
45612 if (fscache_submit_exclusive_op(object, op) < 0)
45613 goto nobufs;
45614 spin_unlock(&cookie->lock);
45615- fscache_stat(&fscache_n_attr_changed_ok);
45616+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
45617 fscache_put_operation(op);
45618 _leave(" = 0");
45619 return 0;
fe2de317 45620@@ -207,7 +207,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
8308f9c9
MT
45621 nobufs:
45622 spin_unlock(&cookie->lock);
45623 kfree(op);
45624- fscache_stat(&fscache_n_attr_changed_nobufs);
45625+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
45626 _leave(" = %d", -ENOBUFS);
45627 return -ENOBUFS;
45628 }
fe2de317 45629@@ -243,7 +243,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
8308f9c9
MT
45630 /* allocate a retrieval operation and attempt to submit it */
45631 op = kzalloc(sizeof(*op), GFP_NOIO);
45632 if (!op) {
45633- fscache_stat(&fscache_n_retrievals_nomem);
45634+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45635 return NULL;
45636 }
45637
fe2de317 45638@@ -271,13 +271,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
8308f9c9
MT
45639 return 0;
45640 }
45641
45642- fscache_stat(&fscache_n_retrievals_wait);
45643+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
45644
45645 jif = jiffies;
45646 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
45647 fscache_wait_bit_interruptible,
45648 TASK_INTERRUPTIBLE) != 0) {
45649- fscache_stat(&fscache_n_retrievals_intr);
45650+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45651 _leave(" = -ERESTARTSYS");
45652 return -ERESTARTSYS;
45653 }
fe2de317 45654@@ -295,8 +295,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
8308f9c9
MT
45655 */
45656 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
45657 struct fscache_retrieval *op,
45658- atomic_t *stat_op_waits,
45659- atomic_t *stat_object_dead)
45660+ atomic_unchecked_t *stat_op_waits,
45661+ atomic_unchecked_t *stat_object_dead)
45662 {
45663 int ret;
45664
fe2de317 45665@@ -304,7 +304,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
8308f9c9
MT
45666 goto check_if_dead;
45667
45668 _debug(">>> WT");
45669- fscache_stat(stat_op_waits);
45670+ fscache_stat_unchecked(stat_op_waits);
45671 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
45672 fscache_wait_bit_interruptible,
45673 TASK_INTERRUPTIBLE) < 0) {
fe2de317 45674@@ -321,7 +321,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
8308f9c9
MT
45675
45676 check_if_dead:
45677 if (unlikely(fscache_object_is_dead(object))) {
45678- fscache_stat(stat_object_dead);
45679+ fscache_stat_unchecked(stat_object_dead);
45680 return -ENOBUFS;
45681 }
45682 return 0;
fe2de317 45683@@ -348,7 +348,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
45684
45685 _enter("%p,%p,,,", cookie, page);
45686
45687- fscache_stat(&fscache_n_retrievals);
45688+ fscache_stat_unchecked(&fscache_n_retrievals);
45689
45690 if (hlist_empty(&cookie->backing_objects))
45691 goto nobufs;
fe2de317 45692@@ -381,7 +381,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
45693 goto nobufs_unlock;
45694 spin_unlock(&cookie->lock);
45695
45696- fscache_stat(&fscache_n_retrieval_ops);
45697+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45698
45699 /* pin the netfs read context in case we need to do the actual netfs
45700 * read because we've encountered a cache read failure */
fe2de317 45701@@ -411,15 +411,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
45702
45703 error:
45704 if (ret == -ENOMEM)
45705- fscache_stat(&fscache_n_retrievals_nomem);
45706+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45707 else if (ret == -ERESTARTSYS)
45708- fscache_stat(&fscache_n_retrievals_intr);
45709+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45710 else if (ret == -ENODATA)
45711- fscache_stat(&fscache_n_retrievals_nodata);
45712+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45713 else if (ret < 0)
45714- fscache_stat(&fscache_n_retrievals_nobufs);
45715+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45716 else
45717- fscache_stat(&fscache_n_retrievals_ok);
45718+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45719
45720 fscache_put_retrieval(op);
45721 _leave(" = %d", ret);
15a11c5b 45722@@ -429,7 +429,7 @@ nobufs_unlock:
8308f9c9
MT
45723 spin_unlock(&cookie->lock);
45724 kfree(op);
45725 nobufs:
45726- fscache_stat(&fscache_n_retrievals_nobufs);
45727+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45728 _leave(" = -ENOBUFS");
45729 return -ENOBUFS;
45730 }
fe2de317 45731@@ -467,7 +467,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
8308f9c9
MT
45732
45733 _enter("%p,,%d,,,", cookie, *nr_pages);
45734
45735- fscache_stat(&fscache_n_retrievals);
45736+ fscache_stat_unchecked(&fscache_n_retrievals);
45737
45738 if (hlist_empty(&cookie->backing_objects))
45739 goto nobufs;
fe2de317 45740@@ -497,7 +497,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
8308f9c9
MT
45741 goto nobufs_unlock;
45742 spin_unlock(&cookie->lock);
45743
45744- fscache_stat(&fscache_n_retrieval_ops);
45745+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
45746
45747 /* pin the netfs read context in case we need to do the actual netfs
45748 * read because we've encountered a cache read failure */
fe2de317 45749@@ -527,15 +527,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
8308f9c9
MT
45750
45751 error:
45752 if (ret == -ENOMEM)
45753- fscache_stat(&fscache_n_retrievals_nomem);
45754+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
45755 else if (ret == -ERESTARTSYS)
45756- fscache_stat(&fscache_n_retrievals_intr);
45757+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
45758 else if (ret == -ENODATA)
45759- fscache_stat(&fscache_n_retrievals_nodata);
45760+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
45761 else if (ret < 0)
45762- fscache_stat(&fscache_n_retrievals_nobufs);
45763+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45764 else
45765- fscache_stat(&fscache_n_retrievals_ok);
45766+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
45767
45768 fscache_put_retrieval(op);
45769 _leave(" = %d", ret);
15a11c5b 45770@@ -545,7 +545,7 @@ nobufs_unlock:
8308f9c9
MT
45771 spin_unlock(&cookie->lock);
45772 kfree(op);
45773 nobufs:
45774- fscache_stat(&fscache_n_retrievals_nobufs);
45775+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
45776 _leave(" = -ENOBUFS");
45777 return -ENOBUFS;
45778 }
fe2de317 45779@@ -569,7 +569,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
45780
45781 _enter("%p,%p,,,", cookie, page);
45782
45783- fscache_stat(&fscache_n_allocs);
45784+ fscache_stat_unchecked(&fscache_n_allocs);
45785
45786 if (hlist_empty(&cookie->backing_objects))
45787 goto nobufs;
fe2de317 45788@@ -595,7 +595,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
45789 goto nobufs_unlock;
45790 spin_unlock(&cookie->lock);
45791
45792- fscache_stat(&fscache_n_alloc_ops);
45793+ fscache_stat_unchecked(&fscache_n_alloc_ops);
45794
45795 ret = fscache_wait_for_retrieval_activation(
45796 object, op,
fe2de317 45797@@ -611,11 +611,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
8308f9c9
MT
45798
45799 error:
45800 if (ret == -ERESTARTSYS)
45801- fscache_stat(&fscache_n_allocs_intr);
45802+ fscache_stat_unchecked(&fscache_n_allocs_intr);
45803 else if (ret < 0)
45804- fscache_stat(&fscache_n_allocs_nobufs);
45805+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45806 else
45807- fscache_stat(&fscache_n_allocs_ok);
45808+ fscache_stat_unchecked(&fscache_n_allocs_ok);
45809
45810 fscache_put_retrieval(op);
45811 _leave(" = %d", ret);
15a11c5b 45812@@ -625,7 +625,7 @@ nobufs_unlock:
8308f9c9
MT
45813 spin_unlock(&cookie->lock);
45814 kfree(op);
45815 nobufs:
45816- fscache_stat(&fscache_n_allocs_nobufs);
45817+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
45818 _leave(" = -ENOBUFS");
45819 return -ENOBUFS;
45820 }
fe2de317 45821@@ -666,7 +666,7 @@ static void fscache_write_op(struct fscache_operation *_op)
8308f9c9
MT
45822
45823 spin_lock(&cookie->stores_lock);
45824
45825- fscache_stat(&fscache_n_store_calls);
45826+ fscache_stat_unchecked(&fscache_n_store_calls);
45827
45828 /* find a page to store */
45829 page = NULL;
fe2de317 45830@@ -677,7 +677,7 @@ static void fscache_write_op(struct fscache_operation *_op)
8308f9c9
MT
45831 page = results[0];
45832 _debug("gang %d [%lx]", n, page->index);
45833 if (page->index > op->store_limit) {
45834- fscache_stat(&fscache_n_store_pages_over_limit);
45835+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
45836 goto superseded;
45837 }
45838
fe2de317 45839@@ -689,7 +689,7 @@ static void fscache_write_op(struct fscache_operation *_op)
15a11c5b 45840 spin_unlock(&cookie->stores_lock);
8308f9c9
MT
45841 spin_unlock(&object->lock);
45842
8308f9c9
MT
45843- fscache_stat(&fscache_n_store_pages);
45844+ fscache_stat_unchecked(&fscache_n_store_pages);
45845 fscache_stat(&fscache_n_cop_write_page);
45846 ret = object->cache->ops->write_page(op, page);
45847 fscache_stat_d(&fscache_n_cop_write_page);
fe2de317 45848@@ -757,7 +757,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
8308f9c9
MT
45849 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45850 ASSERT(PageFsCache(page));
45851
45852- fscache_stat(&fscache_n_stores);
45853+ fscache_stat_unchecked(&fscache_n_stores);
45854
45855 op = kzalloc(sizeof(*op), GFP_NOIO);
45856 if (!op)
fe2de317 45857@@ -808,7 +808,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
8308f9c9
MT
45858 spin_unlock(&cookie->stores_lock);
45859 spin_unlock(&object->lock);
45860
45861- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
45862+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
45863 op->store_limit = object->store_limit;
45864
45865 if (fscache_submit_op(object, &op->op) < 0)
fe2de317 45866@@ -816,8 +816,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
8308f9c9
MT
45867
45868 spin_unlock(&cookie->lock);
45869 radix_tree_preload_end();
45870- fscache_stat(&fscache_n_store_ops);
45871- fscache_stat(&fscache_n_stores_ok);
45872+ fscache_stat_unchecked(&fscache_n_store_ops);
45873+ fscache_stat_unchecked(&fscache_n_stores_ok);
45874
45875 /* the work queue now carries its own ref on the object */
45876 fscache_put_operation(&op->op);
fe2de317 45877@@ -825,14 +825,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
8308f9c9
MT
45878 return 0;
45879
45880 already_queued:
45881- fscache_stat(&fscache_n_stores_again);
45882+ fscache_stat_unchecked(&fscache_n_stores_again);
45883 already_pending:
45884 spin_unlock(&cookie->stores_lock);
45885 spin_unlock(&object->lock);
45886 spin_unlock(&cookie->lock);
45887 radix_tree_preload_end();
45888 kfree(op);
45889- fscache_stat(&fscache_n_stores_ok);
45890+ fscache_stat_unchecked(&fscache_n_stores_ok);
45891 _leave(" = 0");
45892 return 0;
45893
15a11c5b 45894@@ -851,14 +851,14 @@ nobufs:
8308f9c9
MT
45895 spin_unlock(&cookie->lock);
45896 radix_tree_preload_end();
45897 kfree(op);
45898- fscache_stat(&fscache_n_stores_nobufs);
45899+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
45900 _leave(" = -ENOBUFS");
45901 return -ENOBUFS;
45902
45903 nomem_free:
45904 kfree(op);
45905 nomem:
45906- fscache_stat(&fscache_n_stores_oom);
45907+ fscache_stat_unchecked(&fscache_n_stores_oom);
45908 _leave(" = -ENOMEM");
45909 return -ENOMEM;
45910 }
fe2de317 45911@@ -876,7 +876,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
8308f9c9
MT
45912 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
45913 ASSERTCMP(page, !=, NULL);
45914
45915- fscache_stat(&fscache_n_uncaches);
45916+ fscache_stat_unchecked(&fscache_n_uncaches);
45917
45918 /* cache withdrawal may beat us to it */
45919 if (!PageFsCache(page))
fe2de317 45920@@ -929,7 +929,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
8308f9c9
MT
45921 unsigned long loop;
45922
45923 #ifdef CONFIG_FSCACHE_STATS
45924- atomic_add(pagevec->nr, &fscache_n_marks);
45925+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
45926 #endif
45927
45928 for (loop = 0; loop < pagevec->nr; loop++) {
fe2de317
MT
45929diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
45930index 4765190..2a067f2 100644
45931--- a/fs/fscache/stats.c
45932+++ b/fs/fscache/stats.c
8308f9c9
MT
45933@@ -18,95 +18,95 @@
45934 /*
45935 * operation counters
45936 */
45937-atomic_t fscache_n_op_pend;
45938-atomic_t fscache_n_op_run;
45939-atomic_t fscache_n_op_enqueue;
45940-atomic_t fscache_n_op_requeue;
45941-atomic_t fscache_n_op_deferred_release;
45942-atomic_t fscache_n_op_release;
45943-atomic_t fscache_n_op_gc;
45944-atomic_t fscache_n_op_cancelled;
45945-atomic_t fscache_n_op_rejected;
fe2de317
MT
45946+atomic_unchecked_t fscache_n_op_pend;
45947+atomic_unchecked_t fscache_n_op_run;
45948+atomic_unchecked_t fscache_n_op_enqueue;
45949+atomic_unchecked_t fscache_n_op_requeue;
45950+atomic_unchecked_t fscache_n_op_deferred_release;
45951+atomic_unchecked_t fscache_n_op_release;
45952+atomic_unchecked_t fscache_n_op_gc;
45953+atomic_unchecked_t fscache_n_op_cancelled;
45954+atomic_unchecked_t fscache_n_op_rejected;
45955
8308f9c9
MT
45956-atomic_t fscache_n_attr_changed;
45957-atomic_t fscache_n_attr_changed_ok;
45958-atomic_t fscache_n_attr_changed_nobufs;
45959-atomic_t fscache_n_attr_changed_nomem;
45960-atomic_t fscache_n_attr_changed_calls;
fe2de317
MT
45961+atomic_unchecked_t fscache_n_attr_changed;
45962+atomic_unchecked_t fscache_n_attr_changed_ok;
45963+atomic_unchecked_t fscache_n_attr_changed_nobufs;
45964+atomic_unchecked_t fscache_n_attr_changed_nomem;
45965+atomic_unchecked_t fscache_n_attr_changed_calls;
45966
8308f9c9
MT
45967-atomic_t fscache_n_allocs;
45968-atomic_t fscache_n_allocs_ok;
45969-atomic_t fscache_n_allocs_wait;
45970-atomic_t fscache_n_allocs_nobufs;
45971-atomic_t fscache_n_allocs_intr;
45972-atomic_t fscache_n_allocs_object_dead;
45973-atomic_t fscache_n_alloc_ops;
45974-atomic_t fscache_n_alloc_op_waits;
fe2de317
MT
45975+atomic_unchecked_t fscache_n_allocs;
45976+atomic_unchecked_t fscache_n_allocs_ok;
45977+atomic_unchecked_t fscache_n_allocs_wait;
45978+atomic_unchecked_t fscache_n_allocs_nobufs;
45979+atomic_unchecked_t fscache_n_allocs_intr;
45980+atomic_unchecked_t fscache_n_allocs_object_dead;
45981+atomic_unchecked_t fscache_n_alloc_ops;
45982+atomic_unchecked_t fscache_n_alloc_op_waits;
45983
8308f9c9
MT
45984-atomic_t fscache_n_retrievals;
45985-atomic_t fscache_n_retrievals_ok;
45986-atomic_t fscache_n_retrievals_wait;
45987-atomic_t fscache_n_retrievals_nodata;
45988-atomic_t fscache_n_retrievals_nobufs;
45989-atomic_t fscache_n_retrievals_intr;
45990-atomic_t fscache_n_retrievals_nomem;
45991-atomic_t fscache_n_retrievals_object_dead;
45992-atomic_t fscache_n_retrieval_ops;
45993-atomic_t fscache_n_retrieval_op_waits;
8308f9c9
MT
45994+atomic_unchecked_t fscache_n_retrievals;
45995+atomic_unchecked_t fscache_n_retrievals_ok;
45996+atomic_unchecked_t fscache_n_retrievals_wait;
45997+atomic_unchecked_t fscache_n_retrievals_nodata;
45998+atomic_unchecked_t fscache_n_retrievals_nobufs;
45999+atomic_unchecked_t fscache_n_retrievals_intr;
46000+atomic_unchecked_t fscache_n_retrievals_nomem;
46001+atomic_unchecked_t fscache_n_retrievals_object_dead;
46002+atomic_unchecked_t fscache_n_retrieval_ops;
46003+atomic_unchecked_t fscache_n_retrieval_op_waits;
fe2de317
MT
46004
46005-atomic_t fscache_n_stores;
46006-atomic_t fscache_n_stores_ok;
46007-atomic_t fscache_n_stores_again;
46008-atomic_t fscache_n_stores_nobufs;
46009-atomic_t fscache_n_stores_oom;
46010-atomic_t fscache_n_store_ops;
46011-atomic_t fscache_n_store_calls;
46012-atomic_t fscache_n_store_pages;
46013-atomic_t fscache_n_store_radix_deletes;
46014-atomic_t fscache_n_store_pages_over_limit;
8308f9c9
MT
46015+atomic_unchecked_t fscache_n_stores;
46016+atomic_unchecked_t fscache_n_stores_ok;
46017+atomic_unchecked_t fscache_n_stores_again;
46018+atomic_unchecked_t fscache_n_stores_nobufs;
46019+atomic_unchecked_t fscache_n_stores_oom;
46020+atomic_unchecked_t fscache_n_store_ops;
46021+atomic_unchecked_t fscache_n_store_calls;
46022+atomic_unchecked_t fscache_n_store_pages;
46023+atomic_unchecked_t fscache_n_store_radix_deletes;
46024+atomic_unchecked_t fscache_n_store_pages_over_limit;
fe2de317
MT
46025
46026-atomic_t fscache_n_store_vmscan_not_storing;
46027-atomic_t fscache_n_store_vmscan_gone;
46028-atomic_t fscache_n_store_vmscan_busy;
46029-atomic_t fscache_n_store_vmscan_cancelled;
8308f9c9
MT
46030+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
46031+atomic_unchecked_t fscache_n_store_vmscan_gone;
46032+atomic_unchecked_t fscache_n_store_vmscan_busy;
46033+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
fe2de317
MT
46034
46035-atomic_t fscache_n_marks;
46036-atomic_t fscache_n_uncaches;
8308f9c9
MT
46037+atomic_unchecked_t fscache_n_marks;
46038+atomic_unchecked_t fscache_n_uncaches;
fe2de317
MT
46039
46040-atomic_t fscache_n_acquires;
46041-atomic_t fscache_n_acquires_null;
46042-atomic_t fscache_n_acquires_no_cache;
46043-atomic_t fscache_n_acquires_ok;
46044-atomic_t fscache_n_acquires_nobufs;
46045-atomic_t fscache_n_acquires_oom;
8308f9c9
MT
46046+atomic_unchecked_t fscache_n_acquires;
46047+atomic_unchecked_t fscache_n_acquires_null;
46048+atomic_unchecked_t fscache_n_acquires_no_cache;
46049+atomic_unchecked_t fscache_n_acquires_ok;
46050+atomic_unchecked_t fscache_n_acquires_nobufs;
46051+atomic_unchecked_t fscache_n_acquires_oom;
fe2de317
MT
46052
46053-atomic_t fscache_n_updates;
46054-atomic_t fscache_n_updates_null;
46055-atomic_t fscache_n_updates_run;
8308f9c9
MT
46056+atomic_unchecked_t fscache_n_updates;
46057+atomic_unchecked_t fscache_n_updates_null;
46058+atomic_unchecked_t fscache_n_updates_run;
fe2de317
MT
46059
46060-atomic_t fscache_n_relinquishes;
46061-atomic_t fscache_n_relinquishes_null;
46062-atomic_t fscache_n_relinquishes_waitcrt;
46063-atomic_t fscache_n_relinquishes_retire;
8308f9c9
MT
46064+atomic_unchecked_t fscache_n_relinquishes;
46065+atomic_unchecked_t fscache_n_relinquishes_null;
46066+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
46067+atomic_unchecked_t fscache_n_relinquishes_retire;
fe2de317
MT
46068
46069-atomic_t fscache_n_cookie_index;
46070-atomic_t fscache_n_cookie_data;
46071-atomic_t fscache_n_cookie_special;
8308f9c9
MT
46072+atomic_unchecked_t fscache_n_cookie_index;
46073+atomic_unchecked_t fscache_n_cookie_data;
46074+atomic_unchecked_t fscache_n_cookie_special;
fe2de317
MT
46075
46076-atomic_t fscache_n_object_alloc;
46077-atomic_t fscache_n_object_no_alloc;
46078-atomic_t fscache_n_object_lookups;
46079-atomic_t fscache_n_object_lookups_negative;
46080-atomic_t fscache_n_object_lookups_positive;
46081-atomic_t fscache_n_object_lookups_timed_out;
46082-atomic_t fscache_n_object_created;
46083-atomic_t fscache_n_object_avail;
46084-atomic_t fscache_n_object_dead;
8308f9c9
MT
46085+atomic_unchecked_t fscache_n_object_alloc;
46086+atomic_unchecked_t fscache_n_object_no_alloc;
46087+atomic_unchecked_t fscache_n_object_lookups;
46088+atomic_unchecked_t fscache_n_object_lookups_negative;
46089+atomic_unchecked_t fscache_n_object_lookups_positive;
46090+atomic_unchecked_t fscache_n_object_lookups_timed_out;
46091+atomic_unchecked_t fscache_n_object_created;
46092+atomic_unchecked_t fscache_n_object_avail;
46093+atomic_unchecked_t fscache_n_object_dead;
fe2de317
MT
46094
46095-atomic_t fscache_n_checkaux_none;
46096-atomic_t fscache_n_checkaux_okay;
46097-atomic_t fscache_n_checkaux_update;
46098-atomic_t fscache_n_checkaux_obsolete;
8308f9c9
MT
46099+atomic_unchecked_t fscache_n_checkaux_none;
46100+atomic_unchecked_t fscache_n_checkaux_okay;
46101+atomic_unchecked_t fscache_n_checkaux_update;
46102+atomic_unchecked_t fscache_n_checkaux_obsolete;
46103
46104 atomic_t fscache_n_cop_alloc_object;
46105 atomic_t fscache_n_cop_lookup_object;
fe2de317 46106@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
8308f9c9
MT
46107 seq_puts(m, "FS-Cache statistics\n");
46108
46109 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
46110- atomic_read(&fscache_n_cookie_index),
46111- atomic_read(&fscache_n_cookie_data),
46112- atomic_read(&fscache_n_cookie_special));
46113+ atomic_read_unchecked(&fscache_n_cookie_index),
46114+ atomic_read_unchecked(&fscache_n_cookie_data),
46115+ atomic_read_unchecked(&fscache_n_cookie_special));
46116
46117 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
46118- atomic_read(&fscache_n_object_alloc),
46119- atomic_read(&fscache_n_object_no_alloc),
46120- atomic_read(&fscache_n_object_avail),
46121- atomic_read(&fscache_n_object_dead));
46122+ atomic_read_unchecked(&fscache_n_object_alloc),
46123+ atomic_read_unchecked(&fscache_n_object_no_alloc),
46124+ atomic_read_unchecked(&fscache_n_object_avail),
46125+ atomic_read_unchecked(&fscache_n_object_dead));
46126 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
46127- atomic_read(&fscache_n_checkaux_none),
46128- atomic_read(&fscache_n_checkaux_okay),
46129- atomic_read(&fscache_n_checkaux_update),
46130- atomic_read(&fscache_n_checkaux_obsolete));
46131+ atomic_read_unchecked(&fscache_n_checkaux_none),
46132+ atomic_read_unchecked(&fscache_n_checkaux_okay),
46133+ atomic_read_unchecked(&fscache_n_checkaux_update),
46134+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
46135
46136 seq_printf(m, "Pages : mrk=%u unc=%u\n",
46137- atomic_read(&fscache_n_marks),
46138- atomic_read(&fscache_n_uncaches));
46139+ atomic_read_unchecked(&fscache_n_marks),
46140+ atomic_read_unchecked(&fscache_n_uncaches));
46141
46142 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
46143 " oom=%u\n",
46144- atomic_read(&fscache_n_acquires),
46145- atomic_read(&fscache_n_acquires_null),
46146- atomic_read(&fscache_n_acquires_no_cache),
46147- atomic_read(&fscache_n_acquires_ok),
46148- atomic_read(&fscache_n_acquires_nobufs),
46149- atomic_read(&fscache_n_acquires_oom));
46150+ atomic_read_unchecked(&fscache_n_acquires),
46151+ atomic_read_unchecked(&fscache_n_acquires_null),
46152+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
46153+ atomic_read_unchecked(&fscache_n_acquires_ok),
46154+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
46155+ atomic_read_unchecked(&fscache_n_acquires_oom));
46156
46157 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
46158- atomic_read(&fscache_n_object_lookups),
46159- atomic_read(&fscache_n_object_lookups_negative),
46160- atomic_read(&fscache_n_object_lookups_positive),
46161- atomic_read(&fscache_n_object_created),
46162- atomic_read(&fscache_n_object_lookups_timed_out));
46163+ atomic_read_unchecked(&fscache_n_object_lookups),
46164+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
46165+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
46166+ atomic_read_unchecked(&fscache_n_object_created),
46167+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
46168
46169 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
46170- atomic_read(&fscache_n_updates),
46171- atomic_read(&fscache_n_updates_null),
46172- atomic_read(&fscache_n_updates_run));
46173+ atomic_read_unchecked(&fscache_n_updates),
46174+ atomic_read_unchecked(&fscache_n_updates_null),
46175+ atomic_read_unchecked(&fscache_n_updates_run));
46176
46177 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
46178- atomic_read(&fscache_n_relinquishes),
46179- atomic_read(&fscache_n_relinquishes_null),
46180- atomic_read(&fscache_n_relinquishes_waitcrt),
46181- atomic_read(&fscache_n_relinquishes_retire));
46182+ atomic_read_unchecked(&fscache_n_relinquishes),
46183+ atomic_read_unchecked(&fscache_n_relinquishes_null),
46184+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
46185+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
46186
46187 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
46188- atomic_read(&fscache_n_attr_changed),
46189- atomic_read(&fscache_n_attr_changed_ok),
46190- atomic_read(&fscache_n_attr_changed_nobufs),
46191- atomic_read(&fscache_n_attr_changed_nomem),
46192- atomic_read(&fscache_n_attr_changed_calls));
46193+ atomic_read_unchecked(&fscache_n_attr_changed),
46194+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
46195+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
46196+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
46197+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
46198
46199 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
46200- atomic_read(&fscache_n_allocs),
46201- atomic_read(&fscache_n_allocs_ok),
46202- atomic_read(&fscache_n_allocs_wait),
46203- atomic_read(&fscache_n_allocs_nobufs),
46204- atomic_read(&fscache_n_allocs_intr));
46205+ atomic_read_unchecked(&fscache_n_allocs),
46206+ atomic_read_unchecked(&fscache_n_allocs_ok),
46207+ atomic_read_unchecked(&fscache_n_allocs_wait),
46208+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
46209+ atomic_read_unchecked(&fscache_n_allocs_intr));
46210 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
46211- atomic_read(&fscache_n_alloc_ops),
46212- atomic_read(&fscache_n_alloc_op_waits),
46213- atomic_read(&fscache_n_allocs_object_dead));
46214+ atomic_read_unchecked(&fscache_n_alloc_ops),
46215+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
46216+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
46217
46218 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
46219 " int=%u oom=%u\n",
46220- atomic_read(&fscache_n_retrievals),
46221- atomic_read(&fscache_n_retrievals_ok),
46222- atomic_read(&fscache_n_retrievals_wait),
46223- atomic_read(&fscache_n_retrievals_nodata),
46224- atomic_read(&fscache_n_retrievals_nobufs),
46225- atomic_read(&fscache_n_retrievals_intr),
46226- atomic_read(&fscache_n_retrievals_nomem));
46227+ atomic_read_unchecked(&fscache_n_retrievals),
46228+ atomic_read_unchecked(&fscache_n_retrievals_ok),
46229+ atomic_read_unchecked(&fscache_n_retrievals_wait),
46230+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
46231+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
46232+ atomic_read_unchecked(&fscache_n_retrievals_intr),
46233+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
46234 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
46235- atomic_read(&fscache_n_retrieval_ops),
46236- atomic_read(&fscache_n_retrieval_op_waits),
46237- atomic_read(&fscache_n_retrievals_object_dead));
46238+ atomic_read_unchecked(&fscache_n_retrieval_ops),
46239+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
46240+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
46241
46242 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
46243- atomic_read(&fscache_n_stores),
46244- atomic_read(&fscache_n_stores_ok),
46245- atomic_read(&fscache_n_stores_again),
46246- atomic_read(&fscache_n_stores_nobufs),
46247- atomic_read(&fscache_n_stores_oom));
46248+ atomic_read_unchecked(&fscache_n_stores),
46249+ atomic_read_unchecked(&fscache_n_stores_ok),
46250+ atomic_read_unchecked(&fscache_n_stores_again),
46251+ atomic_read_unchecked(&fscache_n_stores_nobufs),
46252+ atomic_read_unchecked(&fscache_n_stores_oom));
46253 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
46254- atomic_read(&fscache_n_store_ops),
46255- atomic_read(&fscache_n_store_calls),
46256- atomic_read(&fscache_n_store_pages),
46257- atomic_read(&fscache_n_store_radix_deletes),
46258- atomic_read(&fscache_n_store_pages_over_limit));
46259+ atomic_read_unchecked(&fscache_n_store_ops),
46260+ atomic_read_unchecked(&fscache_n_store_calls),
46261+ atomic_read_unchecked(&fscache_n_store_pages),
46262+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
46263+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
46264
46265 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
46266- atomic_read(&fscache_n_store_vmscan_not_storing),
46267- atomic_read(&fscache_n_store_vmscan_gone),
46268- atomic_read(&fscache_n_store_vmscan_busy),
46269- atomic_read(&fscache_n_store_vmscan_cancelled));
46270+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
46271+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
46272+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
46273+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
46274
46275 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
46276- atomic_read(&fscache_n_op_pend),
46277- atomic_read(&fscache_n_op_run),
46278- atomic_read(&fscache_n_op_enqueue),
46279- atomic_read(&fscache_n_op_cancelled),
46280- atomic_read(&fscache_n_op_rejected));
46281+ atomic_read_unchecked(&fscache_n_op_pend),
46282+ atomic_read_unchecked(&fscache_n_op_run),
46283+ atomic_read_unchecked(&fscache_n_op_enqueue),
46284+ atomic_read_unchecked(&fscache_n_op_cancelled),
46285+ atomic_read_unchecked(&fscache_n_op_rejected));
46286 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
46287- atomic_read(&fscache_n_op_deferred_release),
46288- atomic_read(&fscache_n_op_release),
46289- atomic_read(&fscache_n_op_gc));
46290+ atomic_read_unchecked(&fscache_n_op_deferred_release),
46291+ atomic_read_unchecked(&fscache_n_op_release),
46292+ atomic_read_unchecked(&fscache_n_op_gc));
46293
46294 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
46295 atomic_read(&fscache_n_cop_alloc_object),
fe2de317 46296diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
4c928ab7 46297index 3426521..3b75162 100644
fe2de317
MT
46298--- a/fs/fuse/cuse.c
46299+++ b/fs/fuse/cuse.c
4c928ab7 46300@@ -587,10 +587,12 @@ static int __init cuse_init(void)
ae4e228f
MT
46301 INIT_LIST_HEAD(&cuse_conntbl[i]);
46302
15a11c5b 46303 /* inherit and extend fuse_dev_operations */
ae4e228f
MT
46304- cuse_channel_fops = fuse_dev_operations;
46305- cuse_channel_fops.owner = THIS_MODULE;
46306- cuse_channel_fops.open = cuse_channel_open;
46307- cuse_channel_fops.release = cuse_channel_release;
15a11c5b
MT
46308+ pax_open_kernel();
46309+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
46310+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
46311+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
46312+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
46313+ pax_close_kernel();
46314
ae4e228f
MT
46315 cuse_class = class_create(THIS_MODULE, "cuse");
46316 if (IS_ERR(cuse_class))
fe2de317 46317diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
c6e2a6c8 46318index 7df2b5e..5804aa7 100644
fe2de317
MT
46319--- a/fs/fuse/dev.c
46320+++ b/fs/fuse/dev.c
46321@@ -1242,7 +1242,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
57199397
MT
46322 ret = 0;
46323 pipe_lock(pipe);
46324
46325- if (!pipe->readers) {
46326+ if (!atomic_read(&pipe->readers)) {
46327 send_sig(SIGPIPE, current, 0);
46328 if (!ret)
46329 ret = -EPIPE;
fe2de317 46330diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
c6e2a6c8 46331index bc43832..0cfe5a6 100644
fe2de317
MT
46332--- a/fs/fuse/dir.c
46333+++ b/fs/fuse/dir.c
c6e2a6c8 46334@@ -1181,7 +1181,7 @@ static char *read_link(struct dentry *dentry)
58c5fc13
MT
46335 return link;
46336 }
46337
46338-static void free_link(char *link)
46339+static void free_link(const char *link)
46340 {
46341 if (!IS_ERR(link))
46342 free_page((unsigned long) link);
fe2de317 46343diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
c6e2a6c8 46344index a9ba244..d9df391 100644
fe2de317
MT
46345--- a/fs/gfs2/inode.c
46346+++ b/fs/gfs2/inode.c
c6e2a6c8 46347@@ -1496,7 +1496,7 @@ out:
66a7e928
MT
46348
46349 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46350 {
46351- char *s = nd_get_link(nd);
46352+ const char *s = nd_get_link(nd);
46353 if (!IS_ERR(s))
46354 kfree(s);
46355 }
fe2de317 46356diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
c6e2a6c8 46357index 001ef01..f7d5f07 100644
fe2de317
MT
46358--- a/fs/hugetlbfs/inode.c
46359+++ b/fs/hugetlbfs/inode.c
c6e2a6c8 46360@@ -920,7 +920,7 @@ static struct file_system_type hugetlbfs_fs_type = {
df50ba0c
MT
46361 .kill_sb = kill_litter_super,
46362 };
46363
46364-static struct vfsmount *hugetlbfs_vfsmount;
46365+struct vfsmount *hugetlbfs_vfsmount;
46366
46367 static int can_do_hugetlb_shm(void)
46368 {
fe2de317 46369diff --git a/fs/inode.c b/fs/inode.c
c6e2a6c8 46370index 9f4f5fe..6214688 100644
fe2de317
MT
46371--- a/fs/inode.c
46372+++ b/fs/inode.c
c6e2a6c8 46373@@ -860,8 +860,8 @@ unsigned int get_next_ino(void)
58c5fc13 46374
71d190be
MT
46375 #ifdef CONFIG_SMP
46376 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
46377- static atomic_t shared_last_ino;
46378- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
46379+ static atomic_unchecked_t shared_last_ino;
46380+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
58c5fc13 46381
71d190be
MT
46382 res = next - LAST_INO_BATCH;
46383 }
fe2de317 46384diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
c6e2a6c8 46385index 4a6cf28..d3a29d3 100644
fe2de317
MT
46386--- a/fs/jffs2/erase.c
46387+++ b/fs/jffs2/erase.c
c6e2a6c8 46388@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
58c5fc13
MT
46389 struct jffs2_unknown_node marker = {
46390 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
46391 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46392- .totlen = cpu_to_je32(c->cleanmarker_size)
46393+ .totlen = cpu_to_je32(c->cleanmarker_size),
46394+ .hdr_crc = cpu_to_je32(0)
46395 };
46396
46397 jffs2_prealloc_raw_node_refs(c, jeb, 1);
fe2de317 46398diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
c6e2a6c8 46399index 74d9be1..d5dd140 100644
fe2de317
MT
46400--- a/fs/jffs2/wbuf.c
46401+++ b/fs/jffs2/wbuf.c
c6e2a6c8 46402@@ -1022,7 +1022,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
58c5fc13
MT
46403 {
46404 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
46405 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
46406- .totlen = constant_cpu_to_je32(8)
46407+ .totlen = constant_cpu_to_je32(8),
46408+ .hdr_crc = constant_cpu_to_je32(0)
46409 };
46410
46411 /*
fe2de317 46412diff --git a/fs/jfs/super.c b/fs/jfs/super.c
c6e2a6c8 46413index 4a82950..bcaa0cb 100644
fe2de317
MT
46414--- a/fs/jfs/super.c
46415+++ b/fs/jfs/super.c
5e856224 46416@@ -801,7 +801,7 @@ static int __init init_jfs_fs(void)
15a11c5b
MT
46417
46418 jfs_inode_cachep =
46419 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
46420- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
46421+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
46422 init_once);
46423 if (jfs_inode_cachep == NULL)
46424 return -ENOMEM;
fe2de317 46425diff --git a/fs/libfs.c b/fs/libfs.c
c6e2a6c8 46426index 18d08f5..fe3dc64 100644
fe2de317
MT
46427--- a/fs/libfs.c
46428+++ b/fs/libfs.c
46429@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
66a7e928
MT
46430
46431 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
46432 struct dentry *next;
46433+ char d_name[sizeof(next->d_iname)];
46434+ const unsigned char *name;
46435+
46436 next = list_entry(p, struct dentry, d_u.d_child);
46437 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
46438 if (!simple_positive(next)) {
fe2de317 46439@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
71d190be
MT
46440
46441 spin_unlock(&next->d_lock);
46442 spin_unlock(&dentry->d_lock);
46443- if (filldir(dirent, next->d_name.name,
66a7e928
MT
46444+ name = next->d_name.name;
46445+ if (name == next->d_iname) {
46446+ memcpy(d_name, name, next->d_name.len);
71d190be 46447+ name = d_name;
66a7e928 46448+ }
71d190be
MT
46449+ if (filldir(dirent, name,
46450 next->d_name.len, filp->f_pos,
46451 next->d_inode->i_ino,
46452 dt_type(next->d_inode)) < 0)
fe2de317 46453diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
4c928ab7 46454index 8392cb8..80d6193 100644
fe2de317
MT
46455--- a/fs/lockd/clntproc.c
46456+++ b/fs/lockd/clntproc.c
46457@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
8308f9c9
MT
46458 /*
46459 * Cookie counter for NLM requests
46460 */
46461-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
46462+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
46463
46464 void nlmclnt_next_cookie(struct nlm_cookie *c)
46465 {
46466- u32 cookie = atomic_inc_return(&nlm_cookie);
46467+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
46468
46469 memcpy(c->data, &cookie, 4);
46470 c->len=4;
fe2de317 46471diff --git a/fs/locks.c b/fs/locks.c
572b4308 46472index 6a64f15..c3dacf2 100644
fe2de317
MT
46473--- a/fs/locks.c
46474+++ b/fs/locks.c
572b4308
MT
46475@@ -308,7 +308,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
46476 return 0;
46477 }
46478
46479-static int assign_type(struct file_lock *fl, int type)
46480+static int assign_type(struct file_lock *fl, long type)
46481 {
46482 switch (type) {
46483 case F_RDLCK:
46484@@ -445,7 +445,7 @@ static const struct lock_manager_operations lease_manager_ops = {
46485 /*
46486 * Initialize a lease, use the default lock manager operations
46487 */
46488-static int lease_init(struct file *filp, int type, struct file_lock *fl)
46489+static int lease_init(struct file *filp, long type, struct file_lock *fl)
46490 {
46491 if (assign_type(fl, type) != 0)
46492 return -EINVAL;
46493@@ -463,7 +463,7 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
46494 }
46495
46496 /* Allocate a file_lock initialised to this type of lease */
46497-static struct file_lock *lease_alloc(struct file *filp, int type)
46498+static struct file_lock *lease_alloc(struct file *filp, long type)
46499 {
46500 struct file_lock *fl = locks_alloc_lock();
46501 int error = -ENOMEM;
5e856224 46502@@ -2075,16 +2075,16 @@ void locks_remove_flock(struct file *filp)
58c5fc13
MT
46503 return;
46504
46505 if (filp->f_op && filp->f_op->flock) {
46506- struct file_lock fl = {
46507+ struct file_lock flock = {
46508 .fl_pid = current->tgid,
46509 .fl_file = filp,
46510 .fl_flags = FL_FLOCK,
46511 .fl_type = F_UNLCK,
46512 .fl_end = OFFSET_MAX,
46513 };
46514- filp->f_op->flock(filp, F_SETLKW, &fl);
46515- if (fl.fl_ops && fl.fl_ops->fl_release_private)
46516- fl.fl_ops->fl_release_private(&fl);
46517+ filp->f_op->flock(filp, F_SETLKW, &flock);
46518+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
46519+ flock.fl_ops->fl_release_private(&flock);
46520 }
46521
bc901d79 46522 lock_flocks();
fe2de317 46523diff --git a/fs/namei.c b/fs/namei.c
572b4308 46524index c427919..232326c 100644
fe2de317
MT
46525--- a/fs/namei.c
46526+++ b/fs/namei.c
5e856224 46527@@ -278,16 +278,32 @@ int generic_permission(struct inode *inode, int mask)
fe2de317
MT
46528 if (ret != -EACCES)
46529 return ret;
46530
46531+#ifdef CONFIG_GRKERNSEC
46532+ /* we'll block if we have to log due to a denied capability use */
46533+ if (mask & MAY_NOT_BLOCK)
46534+ return -ECHILD;
46535+#endif
46536+
6e9df6a3
MT
46537 if (S_ISDIR(inode->i_mode)) {
46538 /* DACs are overridable for directories */
46539- if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46540- return 0;
46541 if (!(mask & MAY_WRITE))
fe2de317
MT
46542- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
46543+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46544+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
6e9df6a3
MT
46545 return 0;
46546+ if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
46547+ return 0;
46548 return -EACCES;
46549 }
bc901d79 46550 /*
66a7e928 46551+ * Searching includes executable on directories, else just read.
6e9df6a3 46552+ */
66a7e928 46553+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
6e9df6a3 46554+ if (mask == MAY_READ)
fe2de317
MT
46555+ if (ns_capable_nolog(inode_userns(inode), CAP_DAC_OVERRIDE) ||
46556+ ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
6e9df6a3
MT
46557+ return 0;
46558+
46559+ /*
46560 * Read/write DACs are always overridable.
46561 * Executable DACs are overridable when there is
46562 * at least one exec bit set.
5e856224 46563@@ -296,14 +312,6 @@ int generic_permission(struct inode *inode, int mask)
6e9df6a3 46564 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
bc901d79
MT
46565 return 0;
46566
6e9df6a3 46567- /*
66a7e928 46568- * Searching includes executable on directories, else just read.
6e9df6a3 46569- */
66a7e928 46570- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
6e9df6a3 46571- if (mask == MAY_READ)
66a7e928 46572- if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
6e9df6a3
MT
46573- return 0;
46574-
bc901d79
MT
46575 return -EACCES;
46576 }
bc901d79 46577
5e856224 46578@@ -652,11 +660,19 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
66a7e928
MT
46579 return error;
46580 }
46581
46582+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
46583+ dentry->d_inode, dentry, nd->path.mnt)) {
46584+ error = -EACCES;
46585+ *p = ERR_PTR(error); /* no ->put_link(), please */
46586+ path_put(&nd->path);
46587+ return error;
46588+ }
46589+
46590 nd->last_type = LAST_BIND;
df50ba0c
MT
46591 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
46592 error = PTR_ERR(*p);
46593 if (!IS_ERR(*p)) {
58c5fc13
MT
46594- char *s = nd_get_link(nd);
46595+ const char *s = nd_get_link(nd);
46596 error = 0;
46597 if (s)
46598 error = __vfs_follow_link(nd, s);
572b4308
MT
46599@@ -1355,6 +1371,9 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
46600 if (!res)
46601 res = walk_component(nd, path, &nd->last,
46602 nd->last_type, LOOKUP_FOLLOW);
46603+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode)) {
46604+ res = -EACCES;
46605+ }
46606 put_link(nd, &link, cookie);
46607 } while (res > 0);
46608
46609@@ -1746,6 +1765,9 @@ static int path_lookupat(int dfd, const char *name,
46610 err = follow_link(&link, nd, &cookie);
46611 if (!err)
46612 err = lookup_last(nd, &path);
46613+ if (!err && gr_handle_symlink_owner(&link, nd->inode)) {
46614+ err = -EACCES;
46615+ }
46616 put_link(nd, &link, cookie);
46617 }
46618 }
46619@@ -1753,6 +1775,21 @@ static int path_lookupat(int dfd, const char *name,
6e9df6a3
MT
46620 if (!err)
46621 err = complete_walk(nd);
46622
fe2de317
MT
46623+ if (!(nd->flags & LOOKUP_PARENT)) {
46624+#ifdef CONFIG_GRKERNSEC
46625+ if (flags & LOOKUP_RCU) {
46626+ if (!err)
46627+ path_put(&nd->path);
46628+ err = -ECHILD;
46629+ } else
46630+#endif
46631+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46632+ if (!err)
46633+ path_put(&nd->path);
46634+ err = -ENOENT;
46635+ }
6e9df6a3
MT
46636+ }
46637+
46638 if (!err && nd->flags & LOOKUP_DIRECTORY) {
46639 if (!nd->inode->i_op->lookup) {
46640 path_put(&nd->path);
572b4308 46641@@ -1780,6 +1817,15 @@ static int do_path_lookup(int dfd, const char *name,
66a7e928 46642 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
16454cff
MT
46643
46644 if (likely(!retval)) {
fe2de317
MT
46645+ if (*name != '/' && nd->path.dentry && nd->inode) {
46646+#ifdef CONFIG_GRKERNSEC
46647+ if (flags & LOOKUP_RCU)
46648+ return -ECHILD;
46649+#endif
46650+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
46651+ return -ENOENT;
46652+ }
16454cff
MT
46653+
46654 if (unlikely(!audit_dummy_context())) {
46655 if (nd->path.dentry && nd->inode)
46656 audit_inode(name, nd->path.dentry);
572b4308 46657@@ -2126,6 +2172,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
4c928ab7
MT
46658 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
46659 return -EPERM;
46660
46661+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
46662+ return -EPERM;
46663+ if (gr_handle_rawio(inode))
46664+ return -EPERM;
46665+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
46666+ return -EACCES;
bc901d79 46667+
4c928ab7 46668 return 0;
bc901d79
MT
46669 }
46670
572b4308 46671@@ -2187,6 +2240,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
6e9df6a3
MT
46672 error = complete_walk(nd);
46673 if (error)
46674 return ERR_PTR(error);
fe2de317
MT
46675+#ifdef CONFIG_GRKERNSEC
46676+ if (nd->flags & LOOKUP_RCU) {
46677+ error = -ECHILD;
46678+ goto exit;
46679+ }
46680+#endif
6e9df6a3
MT
46681+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46682+ error = -ENOENT;
46683+ goto exit;
46684+ }
46685 audit_inode(pathname, nd->path.dentry);
46686 if (open_flag & O_CREAT) {
46687 error = -EISDIR;
572b4308 46688@@ -2197,6 +2260,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
6e9df6a3
MT
46689 error = complete_walk(nd);
46690 if (error)
46691 return ERR_PTR(error);
fe2de317
MT
46692+#ifdef CONFIG_GRKERNSEC
46693+ if (nd->flags & LOOKUP_RCU) {
46694+ error = -ECHILD;
46695+ goto exit;
46696+ }
46697+#endif
6e9df6a3
MT
46698+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
46699+ error = -ENOENT;
46700+ goto exit;
46701+ }
46702 audit_inode(pathname, dir);
46703 goto ok;
46704 }
572b4308 46705@@ -2218,6 +2291,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
fe2de317 46706 error = complete_walk(nd);
6e9df6a3 46707 if (error)
4c928ab7 46708 return ERR_PTR(error);
fe2de317
MT
46709+#ifdef CONFIG_GRKERNSEC
46710+ if (nd->flags & LOOKUP_RCU) {
46711+ error = -ECHILD;
46712+ goto exit;
46713+ }
46714+#endif
6e9df6a3
MT
46715+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
46716+ error = -ENOENT;
46717+ goto exit;
46718+ }
fe2de317 46719
6e9df6a3
MT
46720 error = -ENOTDIR;
46721 if (nd->flags & LOOKUP_DIRECTORY) {
572b4308 46722@@ -2258,6 +2341,12 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
66a7e928
MT
46723 /* Negative dentry, just create the file */
46724 if (!dentry->d_inode) {
5e856224 46725 umode_t mode = op->mode;
66a7e928 46726+
6e9df6a3 46727+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, open_flag, acc_mode, mode)) {
66a7e928
MT
46728+ error = -EACCES;
46729+ goto exit_mutex_unlock;
46730+ }
46731+
46732 if (!IS_POSIXACL(dir->d_inode))
46733 mode &= ~current_umask();
46734 /*
572b4308 46735@@ -2281,6 +2370,8 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
66a7e928
MT
46736 error = vfs_create(dir->d_inode, dentry, mode, nd);
46737 if (error)
46738 goto exit_mutex_unlock;
46739+ else
46740+ gr_handle_create(path->dentry, path->mnt);
46741 mutex_unlock(&dir->d_inode->i_mutex);
46742 dput(nd->path.dentry);
46743 nd->path.dentry = dentry;
572b4308 46744@@ -2290,6 +2381,19 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
58c5fc13
MT
46745 /*
46746 * It already exists.
46747 */
46748+
6e9df6a3
MT
46749+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
46750+ error = -ENOENT;
46751+ goto exit_mutex_unlock;
46752+ }
46753+
bc901d79
MT
46754+ /* only check if O_CREAT is specified, all other checks need to go
46755+ into may_open */
6e9df6a3 46756+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
58c5fc13
MT
46757+ error = -EACCES;
46758+ goto exit_mutex_unlock;
46759+ }
46760+
46761 mutex_unlock(&dir->d_inode->i_mutex);
df50ba0c 46762 audit_inode(pathname, path->dentry);
58c5fc13 46763
572b4308
MT
46764@@ -2407,8 +2511,14 @@ static struct file *path_openat(int dfd, const char *pathname,
46765 error = follow_link(&link, nd, &cookie);
46766 if (unlikely(error))
46767 filp = ERR_PTR(error);
46768- else
46769+ else {
46770 filp = do_last(nd, &path, op, pathname);
46771+ if (!IS_ERR(filp) && gr_handle_symlink_owner(&link, nd->inode)) {
46772+ if (filp)
46773+ fput(filp);
46774+ filp = ERR_PTR(-EACCES);
46775+ }
46776+ }
46777 put_link(nd, &link, cookie);
46778 }
46779 out:
46780@@ -2502,6 +2612,11 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
6e9df6a3
MT
46781 *path = nd.path;
46782 return dentry;
46783 eexist:
46784+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
46785+ dput(dentry);
46786+ dentry = ERR_PTR(-ENOENT);
46787+ goto fail;
46788+ }
46789 dput(dentry);
46790 dentry = ERR_PTR(-EEXIST);
46791 fail:
572b4308 46792@@ -2524,6 +2639,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname, struct pat
6e9df6a3
MT
46793 }
46794 EXPORT_SYMBOL(user_path_create);
46795
46796+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, char **to, int is_dir)
46797+{
46798+ char *tmp = getname(pathname);
46799+ struct dentry *res;
46800+ if (IS_ERR(tmp))
46801+ return ERR_CAST(tmp);
46802+ res = kern_path_create(dfd, tmp, path, is_dir);
46803+ if (IS_ERR(res))
46804+ putname(tmp);
46805+ else
46806+ *to = tmp;
46807+ return res;
46808+}
46809+
5e856224 46810 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
6e9df6a3
MT
46811 {
46812 int error = may_create(dir, dentry);
572b4308 46813@@ -2591,6 +2720,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
6e9df6a3 46814 error = mnt_want_write(path.mnt);
58c5fc13
MT
46815 if (error)
46816 goto out_dput;
46817+
6e9df6a3 46818+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
58c5fc13 46819+ error = -EPERM;
6e9df6a3 46820+ goto out_drop_write;
58c5fc13
MT
46821+ }
46822+
6e9df6a3 46823+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
58c5fc13 46824+ error = -EACCES;
6e9df6a3 46825+ goto out_drop_write;
58c5fc13
MT
46826+ }
46827+
6e9df6a3 46828 error = security_path_mknod(&path, dentry, mode, dev);
58c5fc13 46829 if (error)
6e9df6a3 46830 goto out_drop_write;
572b4308 46831@@ -2608,6 +2748,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
58c5fc13
MT
46832 }
46833 out_drop_write:
6e9df6a3 46834 mnt_drop_write(path.mnt);
58c5fc13
MT
46835+
46836+ if (!error)
6e9df6a3 46837+ gr_handle_create(dentry, path.mnt);
58c5fc13
MT
46838 out_dput:
46839 dput(dentry);
6e9df6a3 46840 mutex_unlock(&path.dentry->d_inode->i_mutex);
572b4308 46841@@ -2661,12 +2804,21 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
6e9df6a3
MT
46842 error = mnt_want_write(path.mnt);
46843 if (error)
46844 goto out_dput;
46845+
46846+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
58c5fc13 46847+ error = -EACCES;
6e9df6a3 46848+ goto out_drop_write;
58c5fc13
MT
46849+ }
46850+
6e9df6a3
MT
46851 error = security_path_mkdir(&path, dentry, mode);
46852 if (error)
46853 goto out_drop_write;
46854 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
58c5fc13 46855 out_drop_write:
6e9df6a3 46856 mnt_drop_write(path.mnt);
58c5fc13
MT
46857+
46858+ if (!error)
6e9df6a3 46859+ gr_handle_create(dentry, path.mnt);
58c5fc13
MT
46860 out_dput:
46861 dput(dentry);
6e9df6a3 46862 mutex_unlock(&path.dentry->d_inode->i_mutex);
572b4308 46863@@ -2746,6 +2898,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
58c5fc13
MT
46864 char * name;
46865 struct dentry *dentry;
46866 struct nameidata nd;
46867+ ino_t saved_ino = 0;
46868+ dev_t saved_dev = 0;
46869
46870 error = user_path_parent(dfd, pathname, &nd, &name);
46871 if (error)
572b4308 46872@@ -2774,6 +2928,15 @@ static long do_rmdir(int dfd, const char __user *pathname)
15a11c5b
MT
46873 error = -ENOENT;
46874 goto exit3;
46875 }
58c5fc13 46876+
6e9df6a3
MT
46877+ saved_ino = dentry->d_inode->i_ino;
46878+ saved_dev = gr_get_dev_from_dentry(dentry);
58c5fc13 46879+
15a11c5b
MT
46880+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
46881+ error = -EACCES;
46882+ goto exit3;
58c5fc13
MT
46883+ }
46884+
46885 error = mnt_want_write(nd.path.mnt);
46886 if (error)
46887 goto exit3;
572b4308 46888@@ -2781,6 +2944,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
58c5fc13
MT
46889 if (error)
46890 goto exit4;
46891 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
46892+ if (!error && (saved_dev || saved_ino))
46893+ gr_handle_delete(saved_ino, saved_dev);
46894 exit4:
46895 mnt_drop_write(nd.path.mnt);
46896 exit3:
572b4308 46897@@ -2843,6 +3008,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
58c5fc13
MT
46898 struct dentry *dentry;
46899 struct nameidata nd;
46900 struct inode *inode = NULL;
46901+ ino_t saved_ino = 0;
46902+ dev_t saved_dev = 0;
46903
46904 error = user_path_parent(dfd, pathname, &nd, &name);
46905 if (error)
572b4308 46906@@ -2865,6 +3032,16 @@ static long do_unlinkat(int dfd, const char __user *pathname)
15a11c5b 46907 if (!inode)
58c5fc13 46908 goto slashes;
15a11c5b
MT
46909 ihold(inode);
46910+
46911+ if (inode->i_nlink <= 1) {
46912+ saved_ino = inode->i_ino;
46913+ saved_dev = gr_get_dev_from_dentry(dentry);
46914+ }
46915+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
46916+ error = -EACCES;
46917+ goto exit2;
58c5fc13 46918+ }
15a11c5b 46919+
58c5fc13
MT
46920 error = mnt_want_write(nd.path.mnt);
46921 if (error)
46922 goto exit2;
572b4308 46923@@ -2872,6 +3049,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
58c5fc13
MT
46924 if (error)
46925 goto exit3;
46926 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
46927+ if (!error && (saved_ino || saved_dev))
46928+ gr_handle_delete(saved_ino, saved_dev);
46929 exit3:
46930 mnt_drop_write(nd.path.mnt);
46931 exit2:
572b4308 46932@@ -2947,10 +3126,18 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
6e9df6a3
MT
46933 error = mnt_want_write(path.mnt);
46934 if (error)
46935 goto out_dput;
46936+
46937+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
58c5fc13 46938+ error = -EACCES;
6e9df6a3 46939+ goto out_drop_write;
58c5fc13
MT
46940+ }
46941+
6e9df6a3 46942 error = security_path_symlink(&path, dentry, from);
58c5fc13
MT
46943 if (error)
46944 goto out_drop_write;
6e9df6a3 46945 error = vfs_symlink(path.dentry->d_inode, dentry, from);
58c5fc13 46946+ if (!error)
6e9df6a3 46947+ gr_handle_create(dentry, path.mnt);
58c5fc13 46948 out_drop_write:
6e9df6a3 46949 mnt_drop_write(path.mnt);
58c5fc13 46950 out_dput:
572b4308 46951@@ -3025,6 +3212,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
6e9df6a3
MT
46952 {
46953 struct dentry *new_dentry;
46954 struct path old_path, new_path;
fe2de317 46955+ char *to = NULL;
6e9df6a3
MT
46956 int how = 0;
46957 int error;
46958
572b4308 46959@@ -3048,7 +3236,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
6e9df6a3
MT
46960 if (error)
46961 return error;
46962
46963- new_dentry = user_path_create(newdfd, newname, &new_path, 0);
46964+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to, 0);
58c5fc13
MT
46965 error = PTR_ERR(new_dentry);
46966 if (IS_ERR(new_dentry))
6e9df6a3 46967 goto out;
572b4308 46968@@ -3059,13 +3247,30 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
6e9df6a3
MT
46969 error = mnt_want_write(new_path.mnt);
46970 if (error)
46971 goto out_dput;
58c5fc13
MT
46972+
46973+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
46974+ old_path.dentry->d_inode,
46975+ old_path.dentry->d_inode->i_mode, to)) {
46976+ error = -EACCES;
6e9df6a3 46977+ goto out_drop_write;
58c5fc13
MT
46978+ }
46979+
6e9df6a3 46980+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
58c5fc13
MT
46981+ old_path.dentry, old_path.mnt, to)) {
46982+ error = -EACCES;
6e9df6a3 46983+ goto out_drop_write;
58c5fc13
MT
46984+ }
46985+
6e9df6a3 46986 error = security_path_link(old_path.dentry, &new_path, new_dentry);
58c5fc13
MT
46987 if (error)
46988 goto out_drop_write;
6e9df6a3 46989 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
58c5fc13 46990+ if (!error)
6e9df6a3 46991+ gr_handle_create(new_dentry, new_path.mnt);
58c5fc13 46992 out_drop_write:
6e9df6a3 46993 mnt_drop_write(new_path.mnt);
58c5fc13 46994 out_dput:
6e9df6a3
MT
46995+ putname(to);
46996 dput(new_dentry);
46997 mutex_unlock(&new_path.dentry->d_inode->i_mutex);
46998 path_put(&new_path);
572b4308 46999@@ -3299,6 +3504,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
58c5fc13
MT
47000 if (new_dentry == trap)
47001 goto exit5;
47002
47003+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
47004+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
47005+ to);
47006+ if (error)
47007+ goto exit5;
47008+
47009 error = mnt_want_write(oldnd.path.mnt);
47010 if (error)
47011 goto exit5;
572b4308 47012@@ -3308,6 +3519,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
58c5fc13
MT
47013 goto exit6;
47014 error = vfs_rename(old_dir->d_inode, old_dentry,
47015 new_dir->d_inode, new_dentry);
47016+ if (!error)
47017+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
47018+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
47019 exit6:
47020 mnt_drop_write(oldnd.path.mnt);
47021 exit5:
572b4308 47022@@ -3333,6 +3547,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
71d190be
MT
47023
47024 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
47025 {
47026+ char tmpbuf[64];
47027+ const char *newlink;
47028 int len;
47029
47030 len = PTR_ERR(link);
572b4308 47031@@ -3342,7 +3558,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
71d190be
MT
47032 len = strlen(link);
47033 if (len > (unsigned) buflen)
47034 len = buflen;
47035- if (copy_to_user(buffer, link, len))
47036+
47037+ if (len < sizeof(tmpbuf)) {
47038+ memcpy(tmpbuf, link, len);
47039+ newlink = tmpbuf;
47040+ } else
47041+ newlink = link;
47042+
47043+ if (copy_to_user(buffer, newlink, len))
47044 len = -EFAULT;
47045 out:
47046 return len;
fe2de317 47047diff --git a/fs/namespace.c b/fs/namespace.c
c6e2a6c8 47048index 4e46539..b28253c 100644
fe2de317
MT
47049--- a/fs/namespace.c
47050+++ b/fs/namespace.c
c6e2a6c8 47051@@ -1156,6 +1156,9 @@ static int do_umount(struct mount *mnt, int flags)
58c5fc13
MT
47052 if (!(sb->s_flags & MS_RDONLY))
47053 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
47054 up_write(&sb->s_umount);
47055+
47056+ gr_log_remount(mnt->mnt_devname, retval);
47057+
47058 return retval;
47059 }
47060
c6e2a6c8 47061@@ -1175,6 +1178,9 @@ static int do_umount(struct mount *mnt, int flags)
6892158b 47062 br_write_unlock(vfsmount_lock);
58c5fc13
MT
47063 up_write(&namespace_sem);
47064 release_mounts(&umount_list);
47065+
47066+ gr_log_unmount(mnt->mnt_devname, retval);
47067+
47068 return retval;
47069 }
47070
c6e2a6c8 47071@@ -2176,6 +2182,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
ae4e228f
MT
47072 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
47073 MS_STRICTATIME);
58c5fc13 47074
ae4e228f
MT
47075+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
47076+ retval = -EPERM;
47077+ goto dput_out;
47078+ }
47079+
58c5fc13
MT
47080+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
47081+ retval = -EPERM;
47082+ goto dput_out;
47083+ }
47084+
47085 if (flags & MS_REMOUNT)
47086 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
47087 data_page);
c6e2a6c8 47088@@ -2190,6 +2206,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
58c5fc13
MT
47089 dev_name, data_page);
47090 dput_out:
47091 path_put(&path);
47092+
47093+ gr_log_mount(dev_name, dir_name, retval);
47094+
47095 return retval;
47096 }
47097
c6e2a6c8 47098@@ -2471,6 +2490,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
66a7e928
MT
47099 if (error)
47100 goto out2;
58c5fc13
MT
47101
47102+ if (gr_handle_chroot_pivot()) {
47103+ error = -EPERM;
66a7e928 47104+ goto out2;
58c5fc13
MT
47105+ }
47106+
6892158b 47107 get_fs_root(current->fs, &root);
66a7e928
MT
47108 error = lock_mount(&old);
47109 if (error)
fe2de317 47110diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
c6e2a6c8 47111index e8bbfa5..864f936 100644
fe2de317
MT
47112--- a/fs/nfs/inode.c
47113+++ b/fs/nfs/inode.c
c6e2a6c8 47114@@ -152,7 +152,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
15a11c5b
MT
47115 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
47116 nfsi->attrtimeo_timestamp = jiffies;
47117
47118- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
47119+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
47120 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
47121 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
47122 else
c6e2a6c8 47123@@ -1005,16 +1005,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
ae4e228f
MT
47124 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
47125 }
47126
47127-static atomic_long_t nfs_attr_generation_counter;
47128+static atomic_long_unchecked_t nfs_attr_generation_counter;
47129
47130 static unsigned long nfs_read_attr_generation_counter(void)
47131 {
47132- return atomic_long_read(&nfs_attr_generation_counter);
47133+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
47134 }
47135
47136 unsigned long nfs_inc_attr_generation_counter(void)
47137 {
47138- return atomic_long_inc_return(&nfs_attr_generation_counter);
47139+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
47140 }
47141
47142 void nfs_fattr_init(struct nfs_fattr *fattr)
fe2de317 47143diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
c6e2a6c8 47144index 5686661..80a9a3a 100644
fe2de317
MT
47145--- a/fs/nfsd/vfs.c
47146+++ b/fs/nfsd/vfs.c
c6e2a6c8 47147@@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
ae4e228f
MT
47148 } else {
47149 oldfs = get_fs();
47150 set_fs(KERNEL_DS);
47151- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
6e9df6a3 47152+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
ae4e228f
MT
47153 set_fs(oldfs);
47154 }
47155
c6e2a6c8 47156@@ -1037,7 +1037,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
ae4e228f
MT
47157
47158 /* Write the data. */
47159 oldfs = get_fs(); set_fs(KERNEL_DS);
47160- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
6e9df6a3 47161+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
ae4e228f
MT
47162 set_fs(oldfs);
47163 if (host_err < 0)
47164 goto out_nfserr;
c6e2a6c8 47165@@ -1573,7 +1573,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
ae4e228f 47166 */
58c5fc13 47167
ae4e228f 47168 oldfs = get_fs(); set_fs(KERNEL_DS);
c6e2a6c8
MT
47169- host_err = inode->i_op->readlink(path.dentry, buf, *lenp);
47170+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
ae4e228f 47171 set_fs(oldfs);
58c5fc13 47172
ae4e228f 47173 if (host_err < 0)
fe2de317 47174diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
5e856224 47175index 3568c8a..e0240d8 100644
fe2de317
MT
47176--- a/fs/notify/fanotify/fanotify_user.c
47177+++ b/fs/notify/fanotify/fanotify_user.c
5e856224 47178@@ -278,7 +278,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
15a11c5b 47179 goto out_close_fd;
66a7e928 47180
15a11c5b
MT
47181 ret = -EFAULT;
47182- if (copy_to_user(buf, &fanotify_event_metadata,
47183+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
47184+ copy_to_user(buf, &fanotify_event_metadata,
47185 fanotify_event_metadata.event_len))
47186 goto out_kill_access_response;
47187
fe2de317 47188diff --git a/fs/notify/notification.c b/fs/notify/notification.c
c6e2a6c8 47189index c887b13..0fdf472 100644
fe2de317
MT
47190--- a/fs/notify/notification.c
47191+++ b/fs/notify/notification.c
47192@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
8308f9c9
MT
47193 * get set to 0 so it will never get 'freed'
47194 */
47195 static struct fsnotify_event *q_overflow_event;
47196-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47197+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
47198
47199 /**
47200 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
fe2de317 47201@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
8308f9c9
MT
47202 */
47203 u32 fsnotify_get_cookie(void)
47204 {
47205- return atomic_inc_return(&fsnotify_sync_cookie);
47206+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
47207 }
47208 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
47209
fe2de317
MT
47210diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
47211index 99e3610..02c1068 100644
47212--- a/fs/ntfs/dir.c
47213+++ b/fs/ntfs/dir.c
6892158b
MT
47214@@ -1329,7 +1329,7 @@ find_next_index_buffer:
47215 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
47216 ~(s64)(ndir->itype.index.block_size - 1)));
47217 /* Bounds checks. */
47218- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47219+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
47220 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
47221 "inode 0x%lx or driver bug.", vdir->i_ino);
47222 goto err_out;
fe2de317 47223diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
c6e2a6c8 47224index 8639169..76697aa 100644
fe2de317
MT
47225--- a/fs/ntfs/file.c
47226+++ b/fs/ntfs/file.c
47227@@ -2229,6 +2229,6 @@ const struct inode_operations ntfs_file_inode_ops = {
58c5fc13
MT
47228 #endif /* NTFS_RW */
47229 };
47230
47231-const struct file_operations ntfs_empty_file_ops = {};
ae4e228f 47232+const struct file_operations ntfs_empty_file_ops __read_only;
58c5fc13
MT
47233
47234-const struct inode_operations ntfs_empty_inode_ops = {};
ae4e228f 47235+const struct inode_operations ntfs_empty_inode_ops __read_only;
fe2de317
MT
47236diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
47237index 210c352..a174f83 100644
47238--- a/fs/ocfs2/localalloc.c
47239+++ b/fs/ocfs2/localalloc.c
47240@@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
58c5fc13
MT
47241 goto bail;
47242 }
47243
47244- atomic_inc(&osb->alloc_stats.moves);
47245+ atomic_inc_unchecked(&osb->alloc_stats.moves);
47246
58c5fc13 47247 bail:
57199397 47248 if (handle)
fe2de317 47249diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
4c928ab7 47250index d355e6e..578d905 100644
fe2de317
MT
47251--- a/fs/ocfs2/ocfs2.h
47252+++ b/fs/ocfs2/ocfs2.h
66a7e928 47253@@ -235,11 +235,11 @@ enum ocfs2_vol_state
58c5fc13
MT
47254
47255 struct ocfs2_alloc_stats
47256 {
47257- atomic_t moves;
47258- atomic_t local_data;
47259- atomic_t bitmap_data;
47260- atomic_t bg_allocs;
47261- atomic_t bg_extends;
47262+ atomic_unchecked_t moves;
47263+ atomic_unchecked_t local_data;
47264+ atomic_unchecked_t bitmap_data;
47265+ atomic_unchecked_t bg_allocs;
47266+ atomic_unchecked_t bg_extends;
47267 };
47268
47269 enum ocfs2_local_alloc_state
fe2de317 47270diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
5e856224 47271index f169da4..9112253 100644
fe2de317
MT
47272--- a/fs/ocfs2/suballoc.c
47273+++ b/fs/ocfs2/suballoc.c
47274@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
58c5fc13
MT
47275 mlog_errno(status);
47276 goto bail;
47277 }
47278- atomic_inc(&osb->alloc_stats.bg_extends);
47279+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
47280
47281 /* You should never ask for this much metadata */
47282 BUG_ON(bits_wanted >
fe2de317 47283@@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handle,
58c5fc13
MT
47284 mlog_errno(status);
47285 goto bail;
47286 }
57199397
MT
47287- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47288+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58c5fc13 47289
57199397
MT
47290 *suballoc_loc = res.sr_bg_blkno;
47291 *suballoc_bit_start = res.sr_bit_offset;
fe2de317 47292@@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
66a7e928
MT
47293 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
47294 res->sr_bits);
47295
47296- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47297+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47298
47299 BUG_ON(res->sr_bits != 1);
47300
fe2de317 47301@@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
58c5fc13
MT
47302 mlog_errno(status);
47303 goto bail;
47304 }
57199397
MT
47305- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
47306+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
58c5fc13 47307
57199397 47308 BUG_ON(res.sr_bits != 1);
58c5fc13 47309
fe2de317 47310@@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
58c5fc13
MT
47311 cluster_start,
47312 num_clusters);
47313 if (!status)
47314- atomic_inc(&osb->alloc_stats.local_data);
47315+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
47316 } else {
47317 if (min_clusters > (osb->bitmap_cpg - 1)) {
47318 /* The only paths asking for contiguousness
fe2de317 47319@@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
58c5fc13 47320 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
57199397
MT
47321 res.sr_bg_blkno,
47322 res.sr_bit_offset);
58c5fc13
MT
47323- atomic_inc(&osb->alloc_stats.bitmap_data);
47324+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
57199397 47325 *num_clusters = res.sr_bits;
58c5fc13
MT
47326 }
47327 }
fe2de317 47328diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
c6e2a6c8 47329index 68f4541..89cfe6a 100644
fe2de317
MT
47330--- a/fs/ocfs2/super.c
47331+++ b/fs/ocfs2/super.c
4c928ab7 47332@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
58c5fc13
MT
47333 "%10s => GlobalAllocs: %d LocalAllocs: %d "
47334 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
47335 "Stats",
47336- atomic_read(&osb->alloc_stats.bitmap_data),
47337- atomic_read(&osb->alloc_stats.local_data),
47338- atomic_read(&osb->alloc_stats.bg_allocs),
47339- atomic_read(&osb->alloc_stats.moves),
47340- atomic_read(&osb->alloc_stats.bg_extends));
47341+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
47342+ atomic_read_unchecked(&osb->alloc_stats.local_data),
47343+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
47344+ atomic_read_unchecked(&osb->alloc_stats.moves),
47345+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
47346
47347 out += snprintf(buf + out, len - out,
47348 "%10s => State: %u Descriptor: %llu Size: %u bits "
c6e2a6c8 47349@@ -2116,11 +2116,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
58c5fc13 47350 spin_lock_init(&osb->osb_xattr_lock);
df50ba0c 47351 ocfs2_init_steal_slots(osb);
58c5fc13
MT
47352
47353- atomic_set(&osb->alloc_stats.moves, 0);
47354- atomic_set(&osb->alloc_stats.local_data, 0);
47355- atomic_set(&osb->alloc_stats.bitmap_data, 0);
47356- atomic_set(&osb->alloc_stats.bg_allocs, 0);
47357- atomic_set(&osb->alloc_stats.bg_extends, 0);
47358+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
47359+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
47360+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
47361+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
47362+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
47363
47364 /* Copy the blockcheck stats from the superblock probe */
47365 osb->osb_ecc_stats = *stats;
fe2de317
MT
47366diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
47367index 5d22872..523db20 100644
47368--- a/fs/ocfs2/symlink.c
47369+++ b/fs/ocfs2/symlink.c
66a7e928 47370@@ -142,7 +142,7 @@ bail:
58c5fc13 47371
ae4e228f
MT
47372 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47373 {
47374- char *link = nd_get_link(nd);
47375+ const char *link = nd_get_link(nd);
47376 if (!IS_ERR(link))
47377 kfree(link);
58c5fc13 47378 }
fe2de317 47379diff --git a/fs/open.c b/fs/open.c
572b4308 47380index 3f1108b..822d7f7 100644
fe2de317
MT
47381--- a/fs/open.c
47382+++ b/fs/open.c
c6e2a6c8
MT
47383@@ -31,6 +31,8 @@
47384 #include <linux/ima.h>
47385 #include <linux/dnotify.h>
47386
47387+#define CREATE_TRACE_POINTS
47388+#include <trace/events/fs.h>
47389 #include "internal.h"
47390
47391 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
47392@@ -112,6 +114,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
bc901d79
MT
47393 error = locks_verify_truncate(inode, NULL, length);
47394 if (!error)
47395 error = security_path_truncate(&path);
58c5fc13 47396+
bc901d79
MT
47397+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
47398+ error = -EACCES;
47399+
47400 if (!error)
47401 error = do_truncate(path.dentry, length, 0, NULL);
47402
c6e2a6c8 47403@@ -358,6 +364,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
58c5fc13
MT
47404 if (__mnt_is_readonly(path.mnt))
47405 res = -EROFS;
47406
47407+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
47408+ res = -EACCES;
47409+
47410 out_path_release:
47411 path_put(&path);
47412 out:
c6e2a6c8 47413@@ -384,6 +393,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
58c5fc13
MT
47414 if (error)
47415 goto dput_and_out;
47416
47417+ gr_log_chdir(path.dentry, path.mnt);
47418+
47419 set_fs_pwd(current->fs, &path);
47420
47421 dput_and_out:
c6e2a6c8 47422@@ -410,6 +421,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
58c5fc13
MT
47423 goto out_putf;
47424
6892158b 47425 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
58c5fc13
MT
47426+
47427+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
47428+ error = -EPERM;
47429+
47430+ if (!error)
47431+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
47432+
47433 if (!error)
47434 set_fs_pwd(current->fs, &file->f_path);
47435 out_putf:
c6e2a6c8 47436@@ -438,7 +456,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
ae4e228f 47437 if (error)
58c5fc13
MT
47438 goto dput_and_out;
47439
47440+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
47441+ goto dput_and_out;
58c5fc13
MT
47442+
47443 set_fs_root(current->fs, &path);
47444+
47445+ gr_handle_chroot_chdir(&path);
47446+
47447 error = 0;
47448 dput_and_out:
47449 path_put(&path);
c6e2a6c8 47450@@ -456,6 +480,16 @@ static int chmod_common(struct path *path, umode_t mode)
58c5fc13 47451 if (error)
6e9df6a3 47452 return error;
6892158b 47453 mutex_lock(&inode->i_mutex);
58c5fc13 47454+
4c928ab7 47455+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
58c5fc13 47456+ error = -EACCES;
6892158b 47457+ goto out_unlock;
58c5fc13 47458+ }
6e9df6a3 47459+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
58c5fc13 47460+ error = -EACCES;
ae4e228f 47461+ goto out_unlock;
58c5fc13
MT
47462+ }
47463+
5e856224 47464 error = security_path_chmod(path, mode);
6e9df6a3
MT
47465 if (error)
47466 goto out_unlock;
c6e2a6c8 47467@@ -506,6 +540,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
58c5fc13
MT
47468 int error;
47469 struct iattr newattrs;
47470
ae4e228f 47471+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
58c5fc13
MT
47472+ return -EACCES;
47473+
47474 newattrs.ia_valid = ATTR_CTIME;
47475 if (user != (uid_t) -1) {
47476 newattrs.ia_valid |= ATTR_UID;
c6e2a6c8
MT
47477@@ -987,6 +1024,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
47478 } else {
47479 fsnotify_open(f);
47480 fd_install(fd, f);
47481+ trace_do_sys_open(tmp, flags, mode);
47482 }
47483 }
47484 putname(tmp);
fe2de317 47485diff --git a/fs/pipe.c b/fs/pipe.c
c6e2a6c8 47486index fec5e4a..f4210f9 100644
fe2de317
MT
47487--- a/fs/pipe.c
47488+++ b/fs/pipe.c
c6e2a6c8 47489@@ -438,9 +438,9 @@ redo:
ae4e228f
MT
47490 }
47491 if (bufs) /* More to do? */
47492 continue;
47493- if (!pipe->writers)
47494+ if (!atomic_read(&pipe->writers))
47495 break;
47496- if (!pipe->waiting_writers) {
47497+ if (!atomic_read(&pipe->waiting_writers)) {
47498 /* syscall merging: Usually we must not sleep
47499 * if O_NONBLOCK is set, or if we got some data.
47500 * But if a writer sleeps in kernel space, then
c6e2a6c8 47501@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
ae4e228f
MT
47502 mutex_lock(&inode->i_mutex);
47503 pipe = inode->i_pipe;
47504
47505- if (!pipe->readers) {
47506+ if (!atomic_read(&pipe->readers)) {
47507 send_sig(SIGPIPE, current, 0);
47508 ret = -EPIPE;
47509 goto out;
c6e2a6c8 47510@@ -553,7 +553,7 @@ redo1:
ae4e228f
MT
47511 for (;;) {
47512 int bufs;
47513
47514- if (!pipe->readers) {
47515+ if (!atomic_read(&pipe->readers)) {
47516 send_sig(SIGPIPE, current, 0);
47517 if (!ret)
47518 ret = -EPIPE;
c6e2a6c8 47519@@ -644,9 +644,9 @@ redo2:
ae4e228f
MT
47520 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
47521 do_wakeup = 0;
47522 }
47523- pipe->waiting_writers++;
47524+ atomic_inc(&pipe->waiting_writers);
47525 pipe_wait(pipe);
47526- pipe->waiting_writers--;
47527+ atomic_dec(&pipe->waiting_writers);
47528 }
47529 out:
47530 mutex_unlock(&inode->i_mutex);
c6e2a6c8 47531@@ -713,7 +713,7 @@ pipe_poll(struct file *filp, poll_table *wait)
ae4e228f
MT
47532 mask = 0;
47533 if (filp->f_mode & FMODE_READ) {
47534 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
47535- if (!pipe->writers && filp->f_version != pipe->w_counter)
47536+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
47537 mask |= POLLHUP;
47538 }
47539
c6e2a6c8 47540@@ -723,7 +723,7 @@ pipe_poll(struct file *filp, poll_table *wait)
ae4e228f
MT
47541 * Most Unices do not set POLLERR for FIFOs but on Linux they
47542 * behave exactly like pipes for poll().
47543 */
47544- if (!pipe->readers)
47545+ if (!atomic_read(&pipe->readers))
47546 mask |= POLLERR;
47547 }
47548
c6e2a6c8 47549@@ -737,10 +737,10 @@ pipe_release(struct inode *inode, int decr, int decw)
ae4e228f
MT
47550
47551 mutex_lock(&inode->i_mutex);
47552 pipe = inode->i_pipe;
47553- pipe->readers -= decr;
47554- pipe->writers -= decw;
47555+ atomic_sub(decr, &pipe->readers);
47556+ atomic_sub(decw, &pipe->writers);
47557
47558- if (!pipe->readers && !pipe->writers) {
47559+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
47560 free_pipe_info(inode);
47561 } else {
16454cff 47562 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
c6e2a6c8 47563@@ -830,7 +830,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
ae4e228f
MT
47564
47565 if (inode->i_pipe) {
47566 ret = 0;
47567- inode->i_pipe->readers++;
47568+ atomic_inc(&inode->i_pipe->readers);
47569 }
47570
47571 mutex_unlock(&inode->i_mutex);
c6e2a6c8 47572@@ -847,7 +847,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
ae4e228f
MT
47573
47574 if (inode->i_pipe) {
47575 ret = 0;
47576- inode->i_pipe->writers++;
47577+ atomic_inc(&inode->i_pipe->writers);
47578 }
47579
47580 mutex_unlock(&inode->i_mutex);
c6e2a6c8 47581@@ -865,9 +865,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
ae4e228f
MT
47582 if (inode->i_pipe) {
47583 ret = 0;
47584 if (filp->f_mode & FMODE_READ)
47585- inode->i_pipe->readers++;
47586+ atomic_inc(&inode->i_pipe->readers);
47587 if (filp->f_mode & FMODE_WRITE)
47588- inode->i_pipe->writers++;
47589+ atomic_inc(&inode->i_pipe->writers);
47590 }
47591
47592 mutex_unlock(&inode->i_mutex);
c6e2a6c8 47593@@ -959,7 +959,7 @@ void free_pipe_info(struct inode *inode)
58c5fc13
MT
47594 inode->i_pipe = NULL;
47595 }
47596
47597-static struct vfsmount *pipe_mnt __read_mostly;
47598+struct vfsmount *pipe_mnt __read_mostly;
ae4e228f
MT
47599
47600 /*
47601 * pipefs_dname() is called from d_path().
c6e2a6c8 47602@@ -989,7 +989,8 @@ static struct inode * get_pipe_inode(void)
ae4e228f
MT
47603 goto fail_iput;
47604 inode->i_pipe = pipe;
47605
47606- pipe->readers = pipe->writers = 1;
47607+ atomic_set(&pipe->readers, 1);
47608+ atomic_set(&pipe->writers, 1);
47609 inode->i_fop = &rdwr_pipefifo_fops;
47610
58c5fc13 47611 /*
fe2de317
MT
47612diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
47613index 15af622..0e9f4467 100644
47614--- a/fs/proc/Kconfig
47615+++ b/fs/proc/Kconfig
47616@@ -30,12 +30,12 @@ config PROC_FS
47617
47618 config PROC_KCORE
47619 bool "/proc/kcore support" if !ARM
47620- depends on PROC_FS && MMU
47621+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
47622
47623 config PROC_VMCORE
47624 bool "/proc/vmcore support"
47625- depends on PROC_FS && CRASH_DUMP
47626- default y
47627+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
47628+ default n
47629 help
47630 Exports the dump image of crashed kernel in ELF format.
47631
47632@@ -59,8 +59,8 @@ config PROC_SYSCTL
47633 limited in memory.
47634
47635 config PROC_PAGE_MONITOR
47636- default y
47637- depends on PROC_FS && MMU
47638+ default n
47639+ depends on PROC_FS && MMU && !GRKERNSEC
47640 bool "Enable /proc page monitoring" if EXPERT
47641 help
47642 Various /proc files exist to monitor process memory utilization:
47643diff --git a/fs/proc/array.c b/fs/proc/array.c
c6e2a6c8 47644index f9bd395..acb7847 100644
fe2de317
MT
47645--- a/fs/proc/array.c
47646+++ b/fs/proc/array.c
6892158b
MT
47647@@ -60,6 +60,7 @@
47648 #include <linux/tty.h>
47649 #include <linux/string.h>
47650 #include <linux/mman.h>
47651+#include <linux/grsecurity.h>
47652 #include <linux/proc_fs.h>
47653 #include <linux/ioport.h>
47654 #include <linux/uaccess.h>
fe2de317 47655@@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
16454cff 47656 seq_putc(m, '\n');
57199397
MT
47657 }
47658
47659+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47660+static inline void task_pax(struct seq_file *m, struct task_struct *p)
47661+{
47662+ if (p->mm)
47663+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
47664+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
47665+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
47666+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
47667+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
47668+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
47669+ else
47670+ seq_printf(m, "PaX:\t-----\n");
47671+}
47672+#endif
47673+
47674 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
47675 struct pid *pid, struct task_struct *task)
47676 {
fe2de317 47677@@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
c52201e0
MT
47678 task_cpus_allowed(m, task);
47679 cpuset_task_status_allowed(m, task);
57199397
MT
47680 task_context_switch_counts(m, task);
47681+
47682+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47683+ task_pax(m, task);
47684+#endif
6892158b
MT
47685+
47686+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
47687+ task_grsec_rbac(m, task);
47688+#endif
57199397
MT
47689+
47690 return 0;
47691 }
47692
47693+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47694+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47695+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47696+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47697+#endif
47698+
47699 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47700 struct pid *pid, struct task_struct *task, int whole)
47701 {
4c928ab7 47702@@ -378,6 +409,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
6e9df6a3 47703 char tcomm[sizeof(task->comm)];
66a7e928
MT
47704 unsigned long flags;
47705
4c928ab7
MT
47706+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47707+ if (current->exec_id != m->exec_id) {
47708+ gr_log_badprocpid("stat");
47709+ return 0;
47710+ }
47711+#endif
66a7e928
MT
47712+
47713 state = *get_task_state(task);
47714 vsize = eip = esp = 0;
5e856224 47715 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
4c928ab7 47716@@ -449,6 +487,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
57199397
MT
47717 gtime = task->gtime;
47718 }
47719
47720+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47721+ if (PAX_RAND_FLAGS(mm)) {
47722+ eip = 0;
47723+ esp = 0;
47724+ wchan = 0;
47725+ }
47726+#endif
47727+#ifdef CONFIG_GRKERNSEC_HIDESYM
47728+ wchan = 0;
47729+ eip =0;
47730+ esp =0;
47731+#endif
47732+
47733 /* scale priority and nice values from timeslices to -20..20 */
47734 /* to make it look like a "normal" Unix priority/nice value */
47735 priority = task_prio(task);
c6e2a6c8
MT
47736@@ -485,9 +536,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47737 seq_put_decimal_ull(m, ' ', vsize);
47738 seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0);
47739 seq_put_decimal_ull(m, ' ', rsslim);
47740+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47741+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
47742+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
47743+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
47744+#else
47745 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
47746 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
47747 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
47748+#endif
47749 seq_put_decimal_ull(m, ' ', esp);
47750 seq_put_decimal_ull(m, ' ', eip);
47751 /* The signal information here is obsolete.
47752@@ -508,9 +565,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
47753 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
47754 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
47755 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
57199397 47756+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
c6e2a6c8
MT
47757+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_data : 0));
47758+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->end_data : 0));
47759+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((mm && permitted) ? mm->start_brk : 0));
57199397 47760+#else
c6e2a6c8
MT
47761 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0);
47762 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0);
47763 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0);
47764+#endif
47765 seq_putc(m, '\n');
47766 if (mm)
47767 mmput(mm);
47768@@ -533,8 +596,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
4c928ab7
MT
47769 struct pid *pid, struct task_struct *task)
47770 {
47771 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
47772- struct mm_struct *mm = get_task_mm(task);
47773+ struct mm_struct *mm;
47774
47775+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47776+ if (current->exec_id != m->exec_id) {
47777+ gr_log_badprocpid("statm");
47778+ return 0;
47779+ }
47780+#endif
47781+ mm = get_task_mm(task);
47782 if (mm) {
47783 size = task_statm(mm, &shared, &text, &data, &resident);
47784 mmput(mm);
c6e2a6c8 47785@@ -556,3 +626,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
57199397
MT
47786
47787 return 0;
47788 }
47789+
47790+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
47791+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
47792+{
71d190be
MT
47793+ u32 curr_ip = 0;
47794+ unsigned long flags;
47795+
47796+ if (lock_task_sighand(task, &flags)) {
47797+ curr_ip = task->signal->curr_ip;
47798+ unlock_task_sighand(task, &flags);
47799+ }
47800+
47801+ return sprintf(buffer, "%pI4\n", &curr_ip);
57199397
MT
47802+}
47803+#endif
fe2de317 47804diff --git a/fs/proc/base.c b/fs/proc/base.c
572b4308 47805index 9fc77b4..4877d08 100644
fe2de317
MT
47806--- a/fs/proc/base.c
47807+++ b/fs/proc/base.c
5e856224 47808@@ -109,6 +109,14 @@ struct pid_entry {
57199397
MT
47809 union proc_op op;
47810 };
47811
47812+struct getdents_callback {
47813+ struct linux_dirent __user * current_dir;
47814+ struct linux_dirent __user * previous;
47815+ struct file * file;
47816+ int count;
47817+ int error;
47818+};
57199397
MT
47819+
47820 #define NOD(NAME, MODE, IOP, FOP, OP) { \
47821 .name = (NAME), \
47822 .len = sizeof(NAME) - 1, \
572b4308
MT
47823@@ -198,11 +206,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
47824 return result;
47825 }
47826
47827-struct mm_struct *mm_for_maps(struct task_struct *task)
47828-{
47829- return mm_access(task, PTRACE_MODE_READ);
47830-}
47831-
47832 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
47833 {
47834 int res = 0;
47835@@ -213,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
57199397
MT
47836 if (!mm->arg_end)
47837 goto out_mm; /* Shh! No looking before we're done */
47838
47839+ if (gr_acl_handle_procpidmem(task))
47840+ goto out_mm;
47841+
47842 len = mm->arg_end - mm->arg_start;
47843
47844 if (len > PAGE_SIZE)
572b4308 47845@@ -240,12 +246,28 @@ out:
57199397
MT
47846 return res;
47847 }
47848
47849+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
47850+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
47851+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
47852+ _mm->pax_flags & MF_PAX_SEGMEXEC))
47853+#endif
47854+
47855 static int proc_pid_auxv(struct task_struct *task, char *buffer)
47856 {
572b4308
MT
47857- struct mm_struct *mm = mm_for_maps(task);
47858+ struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
66a7e928
MT
47859 int res = PTR_ERR(mm);
47860 if (mm && !IS_ERR(mm)) {
57199397
MT
47861 unsigned int nwords = 0;
47862+
47863+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
6892158b
MT
47864+ /* allow if we're currently ptracing this task */
47865+ if (PAX_RAND_FLAGS(mm) &&
47866+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
57199397 47867+ mmput(mm);
15a11c5b 47868+ return 0;
57199397
MT
47869+ }
47870+#endif
47871+
47872 do {
47873 nwords += 2;
47874 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
572b4308 47875@@ -259,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
6892158b
MT
47876 }
47877
47878
47879-#ifdef CONFIG_KALLSYMS
47880+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47881 /*
47882 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
47883 * Returns the resolved symbol. If that fails, simply return the address.
572b4308 47884@@ -298,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
66a7e928 47885 mutex_unlock(&task->signal->cred_guard_mutex);
57199397 47886 }
57199397
MT
47887
47888-#ifdef CONFIG_STACKTRACE
47889+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
47890
47891 #define MAX_STACK_TRACE_DEPTH 64
47892
572b4308 47893@@ -489,7 +511,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
57199397
MT
47894 return count;
47895 }
47896
47897-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
47898+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
47899 static int proc_pid_syscall(struct task_struct *task, char *buffer)
47900 {
47901 long nr;
572b4308 47902@@ -518,7 +540,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
16454cff
MT
47903 /************************************************************************/
47904
47905 /* permission checks */
47906-static int proc_fd_access_allowed(struct inode *inode)
47907+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
47908 {
47909 struct task_struct *task;
47910 int allowed = 0;
572b4308 47911@@ -528,7 +550,10 @@ static int proc_fd_access_allowed(struct inode *inode)
16454cff
MT
47912 */
47913 task = get_proc_task(inode);
47914 if (task) {
47915- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
47916+ if (log)
16454cff 47917+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
5e856224
MT
47918+ else
47919+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
16454cff
MT
47920 put_task_struct(task);
47921 }
47922 return allowed;
572b4308 47923@@ -566,10 +591,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
5e856224
MT
47924 struct task_struct *task,
47925 int hide_pid_min)
47926 {
47927+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
47928+ return false;
47929+
47930+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47931+ rcu_read_lock();
47932+ {
47933+ const struct cred *tmpcred = current_cred();
47934+ const struct cred *cred = __task_cred(task);
47935+
47936+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
47937+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
47938+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
47939+#endif
47940+ ) {
47941+ rcu_read_unlock();
47942+ return true;
47943+ }
47944+ }
47945+ rcu_read_unlock();
47946+
47947+ if (!pid->hide_pid)
47948+ return false;
47949+#endif
47950+
47951 if (pid->hide_pid < hide_pid_min)
47952 return true;
47953 if (in_group_p(pid->pid_gid))
47954 return true;
47955+
47956 return ptrace_may_access(task, PTRACE_MODE_READ);
47957 }
47958
572b4308 47959@@ -587,7 +637,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
5e856224
MT
47960 put_task_struct(task);
47961
47962 if (!has_perms) {
47963+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47964+ {
47965+#else
47966 if (pid->hide_pid == 2) {
47967+#endif
47968 /*
47969 * Let's make getdents(), stat(), and open()
47970 * consistent with each other. If a process
572b4308
MT
47971@@ -677,7 +731,7 @@ static const struct file_operations proc_single_file_operations = {
47972 .release = single_release,
47973 };
47974
47975-static int mem_open(struct inode* inode, struct file* file)
47976+static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
47977 {
47978 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
47979 struct mm_struct *mm;
47980@@ -685,7 +739,12 @@ static int mem_open(struct inode* inode, struct file* file)
47981 if (!task)
47982 return -ESRCH;
47983
47984- mm = mm_access(task, PTRACE_MODE_ATTACH);
47985+ if (gr_acl_handle_procpidmem(task)) {
47986+ put_task_struct(task);
47987+ return -EPERM;
47988+ }
47989+
47990+ mm = mm_access(task, mode);
47991 put_task_struct(task);
47992
47993 if (IS_ERR(mm))
47994@@ -698,11 +757,24 @@ static int mem_open(struct inode* inode, struct file* file)
47995 mmput(mm);
47996 }
4c928ab7 47997
572b4308
MT
47998+ file->private_data = mm;
47999+
4c928ab7
MT
48000+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48001+ file->f_version = current->exec_id;
48002+#endif
48003+
572b4308
MT
48004+ return 0;
48005+}
48006+
48007+static int mem_open(struct inode *inode, struct file *file)
48008+{
48009+ int ret;
48010+ ret = __mem_open(inode, file, PTRACE_MODE_ATTACH);
48011+
48012 /* OK to pass negative loff_t, we can catch out-of-range */
48013 file->f_mode |= FMODE_UNSIGNED_OFFSET;
48014- file->private_data = mm;
48015
48016- return 0;
48017+ return ret;
4c928ab7
MT
48018 }
48019
572b4308
MT
48020 static ssize_t mem_rw(struct file *file, char __user *buf,
48021@@ -713,6 +785,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
4c928ab7
MT
48022 ssize_t copied;
48023 char *page;
48024
48025+#ifdef CONFIG_GRKERNSEC
48026+ if (write)
48027+ return -EPERM;
48028+#endif
48029+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48030+ if (file->f_version != current->exec_id) {
48031+ gr_log_badprocpid("mem");
48032+ return 0;
48033+ }
48034+#endif
48035+
48036 if (!mm)
48037 return 0;
48038
572b4308
MT
48039@@ -801,42 +884,49 @@ static const struct file_operations proc_mem_operations = {
48040 .release = mem_release,
48041 };
57199397 48042
572b4308
MT
48043+static int environ_open(struct inode *inode, struct file *file)
48044+{
48045+ return __mem_open(inode, file, PTRACE_MODE_READ);
48046+}
57199397 48047+
572b4308
MT
48048 static ssize_t environ_read(struct file *file, char __user *buf,
48049 size_t count, loff_t *ppos)
48050 {
48051- struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
48052 char *page;
48053 unsigned long src = *ppos;
48054- int ret = -ESRCH;
48055- struct mm_struct *mm;
48056+ int ret = 0;
48057+ struct mm_struct *mm = file->private_data;
48058
48059- if (!task)
48060- goto out_no_task;
48061+ if (!mm)
48062+ return 0;
48063+
48064+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48065+ if (file->f_version != current->exec_id) {
48066+ gr_log_badprocpid("environ");
48067+ return 0;
48068+ }
48069+#endif
48070
48071- ret = -ENOMEM;
66a7e928
MT
48072 page = (char *)__get_free_page(GFP_TEMPORARY);
48073 if (!page)
572b4308
MT
48074- goto out;
48075-
48076-
48077- mm = mm_for_maps(task);
48078- ret = PTR_ERR(mm);
48079- if (!mm || IS_ERR(mm))
48080- goto out_free;
48081+ return -ENOMEM;
48082
48083 ret = 0;
48084+ if (!atomic_inc_not_zero(&mm->mm_users))
48085+ goto free;
48086 while (count > 0) {
48087- int this_len, retval, max_len;
48088+ size_t this_len, max_len;
48089+ int retval;
48090+
48091+ if (src >= (mm->env_end - mm->env_start))
48092+ break;
48093
48094 this_len = mm->env_end - (mm->env_start + src);
48095
48096- if (this_len <= 0)
48097- break;
48098+ max_len = min_t(size_t, PAGE_SIZE, count);
48099+ this_len = min(max_len, this_len);
48100
48101- max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
48102- this_len = (this_len > max_len) ? max_len : this_len;
48103-
48104- retval = access_process_vm(task, (mm->env_start + src),
48105+ retval = access_remote_vm(mm, (mm->env_start + src),
48106 page, this_len, 0);
48107
48108 if (retval <= 0) {
48109@@ -855,19 +945,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
48110 count -= retval;
48111 }
48112 *ppos = src;
48113-
48114 mmput(mm);
48115-out_free:
48116+
48117+free:
48118 free_page((unsigned long) page);
48119-out:
48120- put_task_struct(task);
48121-out_no_task:
48122 return ret;
48123 }
48124
48125 static const struct file_operations proc_environ_operations = {
48126+ .open = environ_open,
48127 .read = environ_read,
48128 .llseek = generic_file_llseek,
48129+ .release = mem_release,
48130 };
48131
48132 static ssize_t oom_adjust_read(struct file *file, char __user *buf,
48133@@ -1433,7 +1522,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
16454cff
MT
48134 path_put(&nd->path);
48135
48136 /* Are we allowed to snoop on the tasks file descriptors? */
48137- if (!proc_fd_access_allowed(inode))
5e856224 48138+ if (!proc_fd_access_allowed(inode, 0))
16454cff
MT
48139 goto out;
48140
5e856224 48141 error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
572b4308 48142@@ -1472,8 +1561,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
16454cff
MT
48143 struct path path;
48144
48145 /* Are we allowed to snoop on the tasks file descriptors? */
48146- if (!proc_fd_access_allowed(inode))
48147- goto out;
48148+ /* logging this is needed for learning on chromium to work properly,
48149+ but we don't want to flood the logs from 'ps' which does a readlink
48150+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
48151+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
48152+ */
48153+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
48154+ if (!proc_fd_access_allowed(inode,0))
48155+ goto out;
48156+ } else {
48157+ if (!proc_fd_access_allowed(inode,1))
48158+ goto out;
48159+ }
48160
5e856224 48161 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
16454cff 48162 if (error)
572b4308 48163@@ -1538,7 +1637,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
57199397
MT
48164 rcu_read_lock();
48165 cred = __task_cred(task);
48166 inode->i_uid = cred->euid;
48167+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48168+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48169+#else
48170 inode->i_gid = cred->egid;
48171+#endif
48172 rcu_read_unlock();
48173 }
48174 security_task_to_inode(task, inode);
572b4308 48175@@ -1574,10 +1677,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
5e856224
MT
48176 return -ENOENT;
48177 }
57199397
MT
48178 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48179+#ifdef CONFIG_GRKERNSEC_PROC_USER
48180+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48181+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48182+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48183+#endif
48184 task_dumpable(task)) {
5e856224 48185 cred = __task_cred(task);
57199397
MT
48186 stat->uid = cred->euid;
48187+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48188+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
48189+#else
48190 stat->gid = cred->egid;
48191+#endif
48192 }
48193 }
48194 rcu_read_unlock();
572b4308 48195@@ -1615,11 +1727,20 @@ int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
57199397
MT
48196
48197 if (task) {
48198 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
48199+#ifdef CONFIG_GRKERNSEC_PROC_USER
48200+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
48201+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48202+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
48203+#endif
48204 task_dumpable(task)) {
48205 rcu_read_lock();
48206 cred = __task_cred(task);
48207 inode->i_uid = cred->euid;
48208+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48209+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48210+#else
48211 inode->i_gid = cred->egid;
48212+#endif
48213 rcu_read_unlock();
48214 } else {
48215 inode->i_uid = 0;
572b4308 48216@@ -1737,7 +1858,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
57199397
MT
48217 int fd = proc_fd(inode);
48218
48219 if (task) {
48220- files = get_files_struct(task);
48221+ if (!gr_acl_handle_procpidmem(task))
48222+ files = get_files_struct(task);
48223 put_task_struct(task);
48224 }
48225 if (files) {
572b4308
MT
48226@@ -2025,11 +2147,8 @@ static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd)
48227 if (!task)
48228 goto out_notask;
48229
48230- if (!ptrace_may_access(task, PTRACE_MODE_READ))
48231- goto out;
48232-
48233- mm = get_task_mm(task);
48234- if (!mm)
48235+ mm = mm_access(task, PTRACE_MODE_READ);
48236+ if (IS_ERR_OR_NULL(mm))
48237 goto out;
48238
48239 if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
48240@@ -2338,11 +2457,21 @@ static const struct file_operations proc_map_files_operations = {
16454cff 48241 */
6e9df6a3 48242 static int proc_fd_permission(struct inode *inode, int mask)
57199397 48243 {
57199397 48244+ struct task_struct *task;
6e9df6a3 48245 int rv = generic_permission(inode, mask);
57199397
MT
48246- if (rv == 0)
48247- return 0;
48248+
48249 if (task_pid(current) == proc_pid(inode))
48250 rv = 0;
48251+
48252+ task = get_proc_task(inode);
48253+ if (task == NULL)
48254+ return rv;
48255+
48256+ if (gr_acl_handle_procpidmem(task))
48257+ rv = -EACCES;
48258+
48259+ put_task_struct(task);
48260+
48261 return rv;
48262 }
48263
572b4308 48264@@ -2452,6 +2581,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
57199397
MT
48265 if (!task)
48266 goto out_no_task;
48267
48268+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48269+ goto out;
48270+
48271 /*
48272 * Yes, it does not scale. And it should not. Don't add
48273 * new entries into /proc/<tgid>/ without very good reasons.
572b4308 48274@@ -2496,6 +2628,9 @@ static int proc_pident_readdir(struct file *filp,
57199397
MT
48275 if (!task)
48276 goto out_no_task;
48277
48278+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48279+ goto out;
48280+
48281 ret = 0;
48282 i = filp->f_pos;
48283 switch (i) {
572b4308 48284@@ -2766,7 +2901,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
57199397
MT
48285 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
48286 void *cookie)
48287 {
48288- char *s = nd_get_link(nd);
48289+ const char *s = nd_get_link(nd);
48290 if (!IS_ERR(s))
48291 __putname(s);
48292 }
572b4308 48293@@ -2967,7 +3102,7 @@ static const struct pid_entry tgid_base_stuff[] = {
16454cff 48294 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
57199397
MT
48295 #endif
48296 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48297-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48298+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66a7e928 48299 INF("syscall", S_IRUGO, proc_pid_syscall),
57199397
MT
48300 #endif
48301 INF("cmdline", S_IRUGO, proc_pid_cmdline),
572b4308 48302@@ -2992,10 +3127,10 @@ static const struct pid_entry tgid_base_stuff[] = {
6892158b
MT
48303 #ifdef CONFIG_SECURITY
48304 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48305 #endif
48306-#ifdef CONFIG_KALLSYMS
48307+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57199397
MT
48308 INF("wchan", S_IRUGO, proc_pid_wchan),
48309 #endif
48310-#ifdef CONFIG_STACKTRACE
48311+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66a7e928 48312 ONE("stack", S_IRUGO, proc_pid_stack),
57199397
MT
48313 #endif
48314 #ifdef CONFIG_SCHEDSTATS
572b4308 48315@@ -3029,6 +3164,9 @@ static const struct pid_entry tgid_base_stuff[] = {
15a11c5b
MT
48316 #ifdef CONFIG_HARDWALL
48317 INF("hardwall", S_IRUGO, proc_pid_hardwall),
57199397
MT
48318 #endif
48319+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48320+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
48321+#endif
48322 };
48323
48324 static int proc_tgid_base_readdir(struct file * filp,
572b4308 48325@@ -3155,7 +3293,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
57199397
MT
48326 if (!inode)
48327 goto out;
48328
48329+#ifdef CONFIG_GRKERNSEC_PROC_USER
48330+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
48331+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48332+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48333+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
48334+#else
48335 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
48336+#endif
48337 inode->i_op = &proc_tgid_base_inode_operations;
48338 inode->i_fop = &proc_tgid_base_operations;
48339 inode->i_flags|=S_IMMUTABLE;
572b4308 48340@@ -3197,7 +3342,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
57199397
MT
48341 if (!task)
48342 goto out;
48343
48344+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
48345+ goto out_put_task;
48346+
48347 result = proc_pid_instantiate(dir, dentry, task, NULL);
48348+out_put_task:
48349 put_task_struct(task);
48350 out:
48351 return result;
572b4308 48352@@ -3260,6 +3409,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
5e856224
MT
48353 static int fake_filldir(void *buf, const char *name, int namelen,
48354 loff_t offset, u64 ino, unsigned d_type)
57199397 48355 {
5e856224
MT
48356+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
48357+ __buf->error = -EINVAL;
48358 return 0;
48359 }
57199397 48360
572b4308 48361@@ -3326,7 +3477,7 @@ static const struct pid_entry tid_base_stuff[] = {
57199397
MT
48362 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
48363 #endif
48364 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
48365-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
48366+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
66a7e928 48367 INF("syscall", S_IRUGO, proc_pid_syscall),
57199397
MT
48368 #endif
48369 INF("cmdline", S_IRUGO, proc_pid_cmdline),
572b4308 48370@@ -3350,10 +3501,10 @@ static const struct pid_entry tid_base_stuff[] = {
6892158b
MT
48371 #ifdef CONFIG_SECURITY
48372 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
48373 #endif
48374-#ifdef CONFIG_KALLSYMS
48375+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57199397
MT
48376 INF("wchan", S_IRUGO, proc_pid_wchan),
48377 #endif
48378-#ifdef CONFIG_STACKTRACE
48379+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66a7e928 48380 ONE("stack", S_IRUGO, proc_pid_stack),
57199397
MT
48381 #endif
48382 #ifdef CONFIG_SCHEDSTATS
fe2de317
MT
48383diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
48384index 82676e3..5f8518a 100644
48385--- a/fs/proc/cmdline.c
48386+++ b/fs/proc/cmdline.c
48387@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
57199397
MT
48388
48389 static int __init proc_cmdline_init(void)
48390 {
48391+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48392+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
48393+#else
48394 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
48395+#endif
48396 return 0;
48397 }
48398 module_init(proc_cmdline_init);
fe2de317
MT
48399diff --git a/fs/proc/devices.c b/fs/proc/devices.c
48400index b143471..bb105e5 100644
48401--- a/fs/proc/devices.c
48402+++ b/fs/proc/devices.c
48403@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
57199397
MT
48404
48405 static int __init proc_devices_init(void)
48406 {
48407+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48408+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
48409+#else
48410 proc_create("devices", 0, NULL, &proc_devinfo_operations);
48411+#endif
48412 return 0;
48413 }
48414 module_init(proc_devices_init);
fe2de317 48415diff --git a/fs/proc/inode.c b/fs/proc/inode.c
c6e2a6c8 48416index 205c922..2ee4c57 100644
fe2de317
MT
48417--- a/fs/proc/inode.c
48418+++ b/fs/proc/inode.c
c6e2a6c8 48419@@ -21,11 +21,17 @@
5e856224 48420 #include <linux/seq_file.h>
6e9df6a3 48421 #include <linux/slab.h>
5e856224 48422 #include <linux/mount.h>
6e9df6a3
MT
48423+#include <linux/grsecurity.h>
48424
6e9df6a3
MT
48425 #include <asm/uaccess.h>
48426
48427 #include "internal.h"
48428
48429+#ifdef CONFIG_PROC_SYSCTL
48430+extern const struct inode_operations proc_sys_inode_operations;
48431+extern const struct inode_operations proc_sys_dir_operations;
48432+#endif
48433+
48434 static void proc_evict_inode(struct inode *inode)
48435 {
48436 struct proc_dir_entry *de;
c6e2a6c8 48437@@ -51,6 +57,13 @@ static void proc_evict_inode(struct inode *inode)
6e9df6a3
MT
48438 ns_ops = PROC_I(inode)->ns_ops;
48439 if (ns_ops && ns_ops->put)
48440 ns_ops->put(PROC_I(inode)->ns);
48441+
48442+#ifdef CONFIG_PROC_SYSCTL
48443+ if (inode->i_op == &proc_sys_inode_operations ||
48444+ inode->i_op == &proc_sys_dir_operations)
48445+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
48446+#endif
48447+
48448 }
48449
48450 static struct kmem_cache * proc_inode_cachep;
c6e2a6c8 48451@@ -456,7 +469,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
57199397
MT
48452 if (de->mode) {
48453 inode->i_mode = de->mode;
48454 inode->i_uid = de->uid;
48455+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
48456+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
48457+#else
48458 inode->i_gid = de->gid;
48459+#endif
48460 }
48461 if (de->size)
48462 inode->i_size = de->size;
fe2de317 48463diff --git a/fs/proc/internal.h b/fs/proc/internal.h
572b4308 48464index 5f79bb8..e9ab85d 100644
fe2de317
MT
48465--- a/fs/proc/internal.h
48466+++ b/fs/proc/internal.h
572b4308
MT
48467@@ -31,8 +31,6 @@ struct vmalloc_info {
48468 unsigned long largest_chunk;
48469 };
48470
48471-extern struct mm_struct *mm_for_maps(struct task_struct *);
48472-
48473 #ifdef CONFIG_MMU
48474 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
48475 extern void get_vmalloc_info(struct vmalloc_info *vmi);
48476@@ -54,6 +52,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
57199397
MT
48477 struct pid *pid, struct task_struct *task);
48478 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
48479 struct pid *pid, struct task_struct *task);
48480+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
48481+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
48482+#endif
48483 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
48484
c6e2a6c8 48485 extern const struct file_operations proc_pid_maps_operations;
fe2de317 48486diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
c6e2a6c8 48487index 86c67ee..cdca321 100644
fe2de317
MT
48488--- a/fs/proc/kcore.c
48489+++ b/fs/proc/kcore.c
c6e2a6c8 48490@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
57199397
MT
48491 * the addresses in the elf_phdr on our list.
48492 */
48493 start = kc_offset_to_vaddr(*fpos - elf_buflen);
48494- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
48495+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
48496+ if (tsz > buflen)
48497 tsz = buflen;
48498-
58c5fc13 48499+
57199397
MT
48500 while (buflen) {
48501 struct kcore_list *m;
58c5fc13 48502
c6e2a6c8 48503@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
57199397 48504 kfree(elf_buf);
58c5fc13 48505 } else {
57199397
MT
48506 if (kern_addr_valid(start)) {
48507- unsigned long n;
48508+ char *elf_buf;
bc901d79 48509+ mm_segment_t oldfs;
57199397
MT
48510
48511- n = copy_to_user(buffer, (char *)start, tsz);
48512- /*
c6e2a6c8 48513- * We cannot distinguish between fault on source
57199397
MT
48514- * and fault on destination. When this happens
48515- * we clear too and hope it will trigger the
48516- * EFAULT again.
48517- */
48518- if (n) {
48519- if (clear_user(buffer + tsz - n,
48520- n))
48521+ elf_buf = kmalloc(tsz, GFP_KERNEL);
48522+ if (!elf_buf)
48523+ return -ENOMEM;
bc901d79
MT
48524+ oldfs = get_fs();
48525+ set_fs(KERNEL_DS);
57199397 48526+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
bc901d79 48527+ set_fs(oldfs);
57199397
MT
48528+ if (copy_to_user(buffer, elf_buf, tsz)) {
48529+ kfree(elf_buf);
48530 return -EFAULT;
48531+ }
48532 }
bc901d79 48533+ set_fs(oldfs);
57199397
MT
48534+ kfree(elf_buf);
48535 } else {
48536 if (clear_user(buffer, tsz))
48537 return -EFAULT;
c6e2a6c8 48538@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
58c5fc13 48539
ae4e228f 48540 static int open_kcore(struct inode *inode, struct file *filp)
58c5fc13 48541 {
ae4e228f
MT
48542+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
48543+ return -EPERM;
58c5fc13 48544+#endif
ae4e228f
MT
48545 if (!capable(CAP_SYS_RAWIO))
48546 return -EPERM;
48547 if (kcore_need_update)
fe2de317 48548diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
4c928ab7 48549index 80e4645..53e5fcf 100644
fe2de317
MT
48550--- a/fs/proc/meminfo.c
48551+++ b/fs/proc/meminfo.c
4c928ab7 48552@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
ae4e228f
MT
48553 vmi.used >> 10,
48554 vmi.largest_chunk >> 10
48555 #ifdef CONFIG_MEMORY_FAILURE
48556- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
48557+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
48558 #endif
16454cff
MT
48559 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
48560 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
fe2de317
MT
48561diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
48562index b1822dd..df622cb 100644
48563--- a/fs/proc/nommu.c
48564+++ b/fs/proc/nommu.c
48565@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
58c5fc13
MT
48566 if (len < 1)
48567 len = 1;
48568 seq_printf(m, "%*c", len, ' ');
48569- seq_path(m, &file->f_path, "");
48570+ seq_path(m, &file->f_path, "\n\\");
48571 }
48572
48573 seq_putc(m, '\n');
fe2de317 48574diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
5e856224 48575index 06e1cc1..177cd98 100644
fe2de317
MT
48576--- a/fs/proc/proc_net.c
48577+++ b/fs/proc/proc_net.c
48578@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(struct inode *dir)
58c5fc13
MT
48579 struct task_struct *task;
48580 struct nsproxy *ns;
48581 struct net *net = NULL;
48582+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48583+ const struct cred *cred = current_cred();
48584+#endif
48585+
48586+#ifdef CONFIG_GRKERNSEC_PROC_USER
48587+ if (cred->fsuid)
48588+ return net;
48589+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48590+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
48591+ return net;
48592+#endif
48593
48594 rcu_read_lock();
48595 task = pid_task(proc_pid(dir), PIDTYPE_PID);
fe2de317 48596diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
c6e2a6c8 48597index 21d836f..bebf3ee 100644
fe2de317
MT
48598--- a/fs/proc/proc_sysctl.c
48599+++ b/fs/proc/proc_sysctl.c
c6e2a6c8
MT
48600@@ -12,11 +12,15 @@
48601 #include <linux/module.h>
58c5fc13
MT
48602 #include "internal.h"
48603
c6e2a6c8
MT
48604+extern int gr_handle_chroot_sysctl(const int op);
48605+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
48606+ const int op);
58c5fc13
MT
48607+
48608 static const struct dentry_operations proc_sys_dentry_operations;
48609 static const struct file_operations proc_sys_file_operations;
6e9df6a3
MT
48610-static const struct inode_operations proc_sys_inode_operations;
48611+const struct inode_operations proc_sys_inode_operations;
48612 static const struct file_operations proc_sys_dir_file_operations;
48613-static const struct inode_operations proc_sys_dir_operations;
48614+const struct inode_operations proc_sys_dir_operations;
48615
4c928ab7
MT
48616 void proc_sys_poll_notify(struct ctl_table_poll *poll)
48617 {
c6e2a6c8 48618@@ -470,8 +474,14 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
6e9df6a3
MT
48619
48620 err = NULL;
48621 d_set_d_op(dentry, &proc_sys_dentry_operations);
48622+
48623+ gr_handle_proc_create(dentry, inode);
48624+
48625 d_add(dentry, inode);
58c5fc13 48626
c6e2a6c8 48627+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt))
6e9df6a3
MT
48628+ err = ERR_PTR(-ENOENT);
48629+
48630 out:
48631 sysctl_head_finish(head);
48632 return err;
c6e2a6c8
MT
48633@@ -483,18 +493,20 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
48634 struct inode *inode = filp->f_path.dentry->d_inode;
48635 struct ctl_table_header *head = grab_header(inode);
48636 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
48637+ int op = write ? MAY_WRITE : MAY_READ;
48638 ssize_t error;
48639 size_t res;
48640
48641 if (IS_ERR(head))
48642 return PTR_ERR(head);
48643
48644+
48645 /*
48646 * At this point we know that the sysctl was not unregistered
48647 * and won't be until we finish.
48648 */
48649 error = -EPERM;
48650- if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
48651+ if (sysctl_perm(head->root, table, op))
48652 goto out;
48653
48654 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
48655@@ -502,6 +514,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
4c928ab7
MT
48656 if (!table->proc_handler)
48657 goto out;
48658
48659+#ifdef CONFIG_GRKERNSEC
48660+ error = -EPERM;
c6e2a6c8
MT
48661+ if (gr_handle_chroot_sysctl(op))
48662+ goto out;
48663+ dget(filp->f_path.dentry);
48664+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
48665+ dput(filp->f_path.dentry);
48666+ goto out;
48667+ }
48668+ dput(filp->f_path.dentry);
48669+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
48670+ goto out;
4c928ab7
MT
48671+ if (write && !capable(CAP_SYS_ADMIN))
48672+ goto out;
48673+#endif
48674+
48675 /* careful: calling conventions are nasty here */
48676 res = count;
48677 error = table->proc_handler(table, write, buf, &res, ppos);
c6e2a6c8 48678@@ -599,6 +627,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
6e9df6a3
MT
48679 return -ENOMEM;
48680 } else {
48681 d_set_d_op(child, &proc_sys_dentry_operations);
48682+
48683+ gr_handle_proc_create(child, inode);
58c5fc13 48684+
6e9df6a3
MT
48685 d_add(child, inode);
48686 }
48687 } else {
c6e2a6c8
MT
48688@@ -642,6 +673,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
48689 if ((*pos)++ < file->f_pos)
48690 return 0;
58c5fc13 48691
c6e2a6c8
MT
48692+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
48693+ return 0;
58c5fc13 48694+
c6e2a6c8
MT
48695 if (unlikely(S_ISLNK(table->mode)))
48696 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
48697 else
48698@@ -759,6 +793,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
58c5fc13
MT
48699 if (IS_ERR(head))
48700 return PTR_ERR(head);
48701
c6e2a6c8 48702+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
58c5fc13
MT
48703+ return -ENOENT;
48704+
48705 generic_fillattr(inode, stat);
48706 if (table)
48707 stat->mode = (stat->mode & S_IFMT) | table->mode;
c6e2a6c8 48708@@ -781,13 +818,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
6e9df6a3
MT
48709 .llseek = generic_file_llseek,
48710 };
48711
48712-static const struct inode_operations proc_sys_inode_operations = {
48713+const struct inode_operations proc_sys_inode_operations = {
48714 .permission = proc_sys_permission,
48715 .setattr = proc_sys_setattr,
48716 .getattr = proc_sys_getattr,
48717 };
48718
48719-static const struct inode_operations proc_sys_dir_operations = {
48720+const struct inode_operations proc_sys_dir_operations = {
48721 .lookup = proc_sys_lookup,
48722 .permission = proc_sys_permission,
48723 .setattr = proc_sys_setattr,
fe2de317 48724diff --git a/fs/proc/root.c b/fs/proc/root.c
c6e2a6c8 48725index eed44bf..abeb499 100644
fe2de317
MT
48726--- a/fs/proc/root.c
48727+++ b/fs/proc/root.c
c6e2a6c8 48728@@ -188,7 +188,15 @@ void __init proc_root_init(void)
58c5fc13
MT
48729 #ifdef CONFIG_PROC_DEVICETREE
48730 proc_device_tree_init();
48731 #endif
48732+#ifdef CONFIG_GRKERNSEC_PROC_ADD
48733+#ifdef CONFIG_GRKERNSEC_PROC_USER
48734+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
48735+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48736+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
48737+#endif
48738+#else
48739 proc_mkdir("bus", NULL);
48740+#endif
48741 proc_sys_init();
48742 }
48743
fe2de317 48744diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
572b4308 48745index 7faaf2a..7793015 100644
fe2de317
MT
48746--- a/fs/proc/task_mmu.c
48747+++ b/fs/proc/task_mmu.c
c6e2a6c8 48748@@ -11,12 +11,19 @@
4c928ab7
MT
48749 #include <linux/rmap.h>
48750 #include <linux/swap.h>
48751 #include <linux/swapops.h>
48752+#include <linux/grsecurity.h>
48753
48754 #include <asm/elf.h>
48755 #include <asm/uaccess.h>
c6e2a6c8
MT
48756 #include <asm/tlbflush.h>
48757 #include "internal.h"
48758
48759+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48760+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
48761+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
48762+ _mm->pax_flags & MF_PAX_SEGMEXEC))
48763+#endif
48764+
48765 void task_mem(struct seq_file *m, struct mm_struct *mm)
48766 {
48767 unsigned long data, text, lib, swap;
48768@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58c5fc13
MT
48769 "VmExe:\t%8lu kB\n"
48770 "VmLib:\t%8lu kB\n"
df50ba0c
MT
48771 "VmPTE:\t%8lu kB\n"
48772- "VmSwap:\t%8lu kB\n",
58c5fc13 48773- hiwater_vm << (PAGE_SHIFT-10),
df50ba0c 48774+ "VmSwap:\t%8lu kB\n"
58c5fc13
MT
48775+
48776+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
48777+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
48778+#endif
48779+
48780+ ,hiwater_vm << (PAGE_SHIFT-10),
48781 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
48782 mm->locked_vm << (PAGE_SHIFT-10),
4c928ab7 48783 mm->pinned_vm << (PAGE_SHIFT-10),
c6e2a6c8 48784@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58c5fc13
MT
48785 data << (PAGE_SHIFT-10),
48786 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
df50ba0c
MT
48787 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
48788- swap << (PAGE_SHIFT-10));
48789+ swap << (PAGE_SHIFT-10)
58c5fc13
MT
48790+
48791+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
c6e2a6c8
MT
48792+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48793+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
48794+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
48795+#else
48796+ , mm->context.user_cs_base
48797+ , mm->context.user_cs_limit
48798+#endif
58c5fc13
MT
48799+#endif
48800+
48801+ );
48802 }
48803
48804 unsigned long task_vsize(struct mm_struct *mm)
572b4308
MT
48805@@ -125,7 +149,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
48806 if (!priv->task)
48807 return ERR_PTR(-ESRCH);
48808
48809- mm = mm_for_maps(priv->task);
48810+ mm = mm_access(priv->task, PTRACE_MODE_READ);
48811 if (!mm || IS_ERR(mm))
48812 return mm;
48813 down_read(&mm->mmap_sem);
c6e2a6c8 48814@@ -231,13 +255,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
57199397 48815 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
58c5fc13
MT
48816 }
48817
57199397 48818- /* We don't show the stack guard page in /proc/maps */
58c5fc13 48819+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
66a7e928
MT
48820+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
48821+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
58c5fc13 48822+#else
66a7e928
MT
48823 start = vma->vm_start;
48824- if (stack_guard_page_start(vma, start))
48825- start += PAGE_SIZE;
48826 end = vma->vm_end;
48827- if (stack_guard_page_end(vma, end))
48828- end -= PAGE_SIZE;
58c5fc13 48829+#endif
66a7e928
MT
48830
48831 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
48832 start,
c6e2a6c8 48833@@ -246,7 +270,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
58c5fc13
MT
48834 flags & VM_WRITE ? 'w' : '-',
48835 flags & VM_EXEC ? 'x' : '-',
48836 flags & VM_MAYSHARE ? 's' : 'p',
48837+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48838+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
48839+#else
48840 pgoff,
48841+#endif
48842 MAJOR(dev), MINOR(dev), ino, &len);
48843
48844 /*
c6e2a6c8 48845@@ -255,7 +283,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
58c5fc13
MT
48846 */
48847 if (file) {
48848 pad_len_spaces(m, len);
48849- seq_path(m, &file->f_path, "\n");
48850+ seq_path(m, &file->f_path, "\n\\");
c6e2a6c8
MT
48851 goto done;
48852 }
48853
48854@@ -281,8 +309,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
48855 * Thread stack in /proc/PID/task/TID/maps or
48856 * the main process stack.
48857 */
48858- if (!is_pid || (vma->vm_start <= mm->start_stack &&
48859- vma->vm_end >= mm->start_stack)) {
48860+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
48861+ (vma->vm_start <= mm->start_stack &&
48862+ vma->vm_end >= mm->start_stack)) {
48863 name = "[stack]";
df50ba0c 48864 } else {
c6e2a6c8
MT
48865 /* Thread stack in /proc/PID/maps */
48866@@ -306,6 +335,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
4c928ab7
MT
48867 struct proc_maps_private *priv = m->private;
48868 struct task_struct *task = priv->task;
48869
48870+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48871+ if (current->exec_id != m->exec_id) {
48872+ gr_log_badprocpid("maps");
48873+ return 0;
48874+ }
48875+#endif
48876+
c6e2a6c8 48877 show_map_vma(m, vma, is_pid);
4c928ab7
MT
48878
48879 if (m->count < m->size) /* vma is copied successfully */
c6e2a6c8 48880@@ -482,12 +518,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
4c928ab7 48881 .private = &mss,
58c5fc13
MT
48882 };
48883
4c928ab7
MT
48884+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48885+ if (current->exec_id != m->exec_id) {
48886+ gr_log_badprocpid("smaps");
48887+ return 0;
48888+ }
48889+#endif
58c5fc13
MT
48890 memset(&mss, 0, sizeof mss);
48891- mss.vma = vma;
df50ba0c 48892- /* mmap_sem is held in m_start */
58c5fc13
MT
48893- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48894- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
df50ba0c 48895-
58c5fc13
MT
48896+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48897+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
48898+#endif
48899+ mss.vma = vma;
df50ba0c 48900+ /* mmap_sem is held in m_start */
58c5fc13
MT
48901+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
48902+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
48903+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48904+ }
48905+#endif
c6e2a6c8 48906 show_map_vma(m, vma, is_pid);
58c5fc13 48907
df50ba0c 48908 seq_printf(m,
c6e2a6c8 48909@@ -505,7 +552,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
58c5fc13 48910 "KernelPageSize: %8lu kB\n"
16454cff
MT
48911 "MMUPageSize: %8lu kB\n"
48912 "Locked: %8lu kB\n",
58c5fc13
MT
48913+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48914+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
48915+#else
48916 (vma->vm_end - vma->vm_start) >> 10,
48917+#endif
48918 mss.resident >> 10,
48919 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
48920 mss.shared_clean >> 10,
572b4308
MT
48921@@ -919,7 +970,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
48922 if (!pm.buffer)
48923 goto out_task;
48924
48925- mm = mm_for_maps(task);
48926+ mm = mm_access(task, PTRACE_MODE_READ);
48927 ret = PTR_ERR(mm);
48928 if (!mm || IS_ERR(mm))
48929 goto out_free;
c6e2a6c8 48930@@ -1138,6 +1189,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
4c928ab7
MT
48931 int n;
48932 char buffer[50];
48933
48934+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48935+ if (current->exec_id != m->exec_id) {
48936+ gr_log_badprocpid("numa_maps");
48937+ return 0;
48938+ }
48939+#endif
48940+
48941 if (!mm)
48942 return 0;
48943
c6e2a6c8 48944@@ -1155,11 +1213,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
4c928ab7
MT
48945 mpol_to_str(buffer, sizeof(buffer), pol, 0);
48946 mpol_cond_put(pol);
48947
48948+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48949+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
48950+#else
48951 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
48952+#endif
15a11c5b
MT
48953
48954 if (file) {
48955 seq_printf(m, " file=");
48956- seq_path(m, &file->f_path, "\n\t= ");
48957+ seq_path(m, &file->f_path, "\n\t\\= ");
48958 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
48959 seq_printf(m, " heap");
c6e2a6c8 48960 } else {
fe2de317 48961diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
572b4308 48962index 74fe164..0848f95 100644
fe2de317
MT
48963--- a/fs/proc/task_nommu.c
48964+++ b/fs/proc/task_nommu.c
48965@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
58c5fc13
MT
48966 else
48967 bytes += kobjsize(mm);
48968
48969- if (current->fs && current->fs->users > 1)
48970+ if (current->fs && atomic_read(&current->fs->users) > 1)
48971 sbytes += kobjsize(current->fs);
48972 else
48973 bytes += kobjsize(current->fs);
c6e2a6c8 48974@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
57199397
MT
48975
48976 if (file) {
48977 pad_len_spaces(m, len);
58c5fc13
MT
48978- seq_path(m, &file->f_path, "");
48979+ seq_path(m, &file->f_path, "\n\\");
57199397 48980 } else if (mm) {
c6e2a6c8
MT
48981 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
48982
572b4308
MT
48983@@ -223,7 +223,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
48984 if (!priv->task)
48985 return ERR_PTR(-ESRCH);
48986
48987- mm = mm_for_maps(priv->task);
48988+ mm = mm_access(priv->task, PTRACE_MODE_READ);
48989 if (!mm || IS_ERR(mm)) {
48990 put_task_struct(priv->task);
48991 priv->task = NULL;
fe2de317
MT
48992diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
48993index d67908b..d13f6a6 100644
48994--- a/fs/quota/netlink.c
48995+++ b/fs/quota/netlink.c
48996@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
8308f9c9
MT
48997 void quota_send_warning(short type, unsigned int id, dev_t dev,
48998 const char warntype)
48999 {
49000- static atomic_t seq;
49001+ static atomic_unchecked_t seq;
49002 struct sk_buff *skb;
49003 void *msg_head;
49004 int ret;
fe2de317 49005@@ -49,7 +49,7 @@ void quota_send_warning(short type, unsigned int id, dev_t dev,
8308f9c9
MT
49006 "VFS: Not enough memory to send quota warning.\n");
49007 return;
49008 }
49009- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
49010+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
49011 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
49012 if (!msg_head) {
49013 printk(KERN_ERR
fe2de317 49014diff --git a/fs/readdir.c b/fs/readdir.c
c6e2a6c8 49015index cc0a822..43cb195 100644
fe2de317
MT
49016--- a/fs/readdir.c
49017+++ b/fs/readdir.c
6892158b 49018@@ -17,6 +17,7 @@
58c5fc13
MT
49019 #include <linux/security.h>
49020 #include <linux/syscalls.h>
49021 #include <linux/unistd.h>
49022+#include <linux/namei.h>
49023
49024 #include <asm/uaccess.h>
49025
49026@@ -67,6 +68,7 @@ struct old_linux_dirent {
49027
49028 struct readdir_callback {
49029 struct old_linux_dirent __user * dirent;
49030+ struct file * file;
49031 int result;
49032 };
49033
fe2de317 49034@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
58c5fc13
MT
49035 buf->result = -EOVERFLOW;
49036 return -EOVERFLOW;
49037 }
49038+
49039+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49040+ return 0;
49041+
49042 buf->result++;
49043 dirent = buf->dirent;
49044 if (!access_ok(VERIFY_WRITE, dirent,
fe2de317 49045@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
58c5fc13
MT
49046
49047 buf.result = 0;
49048 buf.dirent = dirent;
49049+ buf.file = file;
49050
49051 error = vfs_readdir(file, fillonedir, &buf);
49052 if (buf.result)
49053@@ -142,6 +149,7 @@ struct linux_dirent {
49054 struct getdents_callback {
49055 struct linux_dirent __user * current_dir;
49056 struct linux_dirent __user * previous;
49057+ struct file * file;
49058 int count;
49059 int error;
49060 };
fe2de317 49061@@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
58c5fc13
MT
49062 buf->error = -EOVERFLOW;
49063 return -EOVERFLOW;
49064 }
49065+
49066+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49067+ return 0;
49068+
49069 dirent = buf->previous;
49070 if (dirent) {
49071 if (__put_user(offset, &dirent->d_off))
fe2de317 49072@@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
58c5fc13
MT
49073 buf.previous = NULL;
49074 buf.count = count;
49075 buf.error = 0;
49076+ buf.file = file;
49077
49078 error = vfs_readdir(file, filldir, &buf);
49079 if (error >= 0)
6892158b 49080@@ -229,6 +242,7 @@ out:
58c5fc13
MT
49081 struct getdents_callback64 {
49082 struct linux_dirent64 __user * current_dir;
49083 struct linux_dirent64 __user * previous;
49084+ struct file *file;
49085 int count;
49086 int error;
49087 };
fe2de317 49088@@ -244,6 +258,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
58c5fc13
MT
49089 buf->error = -EINVAL; /* only used if we fail.. */
49090 if (reclen > buf->count)
49091 return -EINVAL;
49092+
49093+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49094+ return 0;
49095+
49096 dirent = buf->previous;
49097 if (dirent) {
49098 if (__put_user(offset, &dirent->d_off))
fe2de317 49099@@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
58c5fc13
MT
49100
49101 buf.current_dir = dirent;
49102 buf.previous = NULL;
49103+ buf.file = file;
49104 buf.count = count;
49105 buf.error = 0;
49106
fe2de317 49107@@ -299,7 +318,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
6e9df6a3
MT
49108 error = buf.error;
49109 lastdirent = buf.previous;
49110 if (lastdirent) {
49111- typeof(lastdirent->d_off) d_off = file->f_pos;
49112+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
49113 if (__put_user(d_off, &lastdirent->d_off))
49114 error = -EFAULT;
49115 else
fe2de317 49116diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
c6e2a6c8 49117index 2b7882b..1c5ef48 100644
fe2de317
MT
49118--- a/fs/reiserfs/do_balan.c
49119+++ b/fs/reiserfs/do_balan.c
49120@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
58c5fc13
MT
49121 return;
49122 }
49123
49124- atomic_inc(&(fs_generation(tb->tb_sb)));
49125+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
49126 do_balance_starts(tb);
49127
49128 /* balance leaf returns 0 except if combining L R and S into
fe2de317 49129diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
c6e2a6c8 49130index 2c1ade6..8c59d8d 100644
fe2de317
MT
49131--- a/fs/reiserfs/procfs.c
49132+++ b/fs/reiserfs/procfs.c
c6e2a6c8 49133@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
58c5fc13
MT
49134 "SMALL_TAILS " : "NO_TAILS ",
49135 replay_only(sb) ? "REPLAY_ONLY " : "",
49136 convert_reiserfs(sb) ? "CONV " : "",
49137- atomic_read(&r->s_generation_counter),
49138+ atomic_read_unchecked(&r->s_generation_counter),
49139 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
49140 SF(s_do_balance), SF(s_unneeded_left_neighbor),
49141 SF(s_good_search_by_key_reada), SF(s_bmaps),
c6e2a6c8
MT
49142diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
49143index a59d271..e12d1cf 100644
49144--- a/fs/reiserfs/reiserfs.h
49145+++ b/fs/reiserfs/reiserfs.h
49146@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
49147 /* Comment? -Hans */
49148 wait_queue_head_t s_wait;
49149 /* To be obsoleted soon by per buffer seals.. -Hans */
49150- atomic_t s_generation_counter; // increased by one every time the
49151+ atomic_unchecked_t s_generation_counter; // increased by one every time the
49152 // tree gets re-balanced
49153 unsigned long s_properties; /* File system properties. Currently holds
49154 on-disk FS format */
49155@@ -1973,7 +1973,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
49156 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
49157
49158 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
49159-#define get_generation(s) atomic_read (&fs_generation(s))
49160+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
49161 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
49162 #define __fs_changed(gen,s) (gen != get_generation (s))
49163 #define fs_changed(gen,s) \
fe2de317 49164diff --git a/fs/select.c b/fs/select.c
c6e2a6c8 49165index 17d33d0..da0bf5c 100644
fe2de317
MT
49166--- a/fs/select.c
49167+++ b/fs/select.c
ae4e228f 49168@@ -20,6 +20,7 @@
c6e2a6c8 49169 #include <linux/export.h>
58c5fc13
MT
49170 #include <linux/slab.h>
49171 #include <linux/poll.h>
49172+#include <linux/security.h>
49173 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
49174 #include <linux/file.h>
49175 #include <linux/fdtable.h>
c6e2a6c8 49176@@ -833,6 +834,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
58c5fc13
MT
49177 struct poll_list *walk = head;
49178 unsigned long todo = nfds;
49179
49180+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
df50ba0c 49181 if (nfds > rlimit(RLIMIT_NOFILE))
58c5fc13
MT
49182 return -EINVAL;
49183
fe2de317 49184diff --git a/fs/seq_file.c b/fs/seq_file.c
572b4308 49185index 0cbd049..64e705c 100644
fe2de317
MT
49186--- a/fs/seq_file.c
49187+++ b/fs/seq_file.c
4c928ab7 49188@@ -9,6 +9,7 @@
c6e2a6c8 49189 #include <linux/export.h>
4c928ab7
MT
49190 #include <linux/seq_file.h>
49191 #include <linux/slab.h>
49192+#include <linux/sched.h>
49193
49194 #include <asm/uaccess.h>
49195 #include <asm/page.h>
c6e2a6c8 49196@@ -56,6 +57,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
4c928ab7
MT
49197 memset(p, 0, sizeof(*p));
49198 mutex_init(&p->lock);
49199 p->op = op;
49200+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49201+ p->exec_id = current->exec_id;
49202+#endif
49203
49204 /*
49205 * Wrappers around seq_open(e.g. swaps_open) need to be
572b4308
MT
49206@@ -92,7 +96,7 @@ static int traverse(struct seq_file *m, loff_t offset)
49207 return 0;
49208 }
49209 if (!m->buf) {
49210- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49211+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
49212 if (!m->buf)
49213 return -ENOMEM;
49214 }
49215@@ -132,7 +136,7 @@ static int traverse(struct seq_file *m, loff_t offset)
49216 Eoverflow:
49217 m->op->stop(m, p);
49218 kfree(m->buf);
49219- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49220+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
49221 return !m->buf ? -ENOMEM : -EAGAIN;
49222 }
49223
49224@@ -187,7 +191,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49225
49226 /* grab buffer if we didn't have one */
49227 if (!m->buf) {
49228- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
49229+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
49230 if (!m->buf)
49231 goto Enomem;
49232 }
49233@@ -228,7 +232,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
49234 goto Fill;
49235 m->op->stop(m, p);
49236 kfree(m->buf);
49237- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
49238+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
49239 if (!m->buf)
49240 goto Enomem;
49241 m->count = 0;
c6e2a6c8 49242@@ -567,7 +571,7 @@ static void single_stop(struct seq_file *p, void *v)
15a11c5b
MT
49243 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
49244 void *data)
49245 {
49246- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
49247+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
49248 int res = -ENOMEM;
49249
49250 if (op) {
fe2de317 49251diff --git a/fs/splice.c b/fs/splice.c
572b4308 49252index 5cac690..f833a99 100644
fe2de317
MT
49253--- a/fs/splice.c
49254+++ b/fs/splice.c
49255@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
ae4e228f
MT
49256 pipe_lock(pipe);
49257
49258 for (;;) {
49259- if (!pipe->readers) {
49260+ if (!atomic_read(&pipe->readers)) {
49261 send_sig(SIGPIPE, current, 0);
49262 if (!ret)
49263 ret = -EPIPE;
fe2de317 49264@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
ae4e228f
MT
49265 do_wakeup = 0;
49266 }
49267
49268- pipe->waiting_writers++;
49269+ atomic_inc(&pipe->waiting_writers);
49270 pipe_wait(pipe);
49271- pipe->waiting_writers--;
49272+ atomic_dec(&pipe->waiting_writers);
49273 }
49274
49275 pipe_unlock(pipe);
572b4308 49276@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
ae4e228f
MT
49277 old_fs = get_fs();
49278 set_fs(get_ds());
49279 /* The cast to a user pointer is valid due to the set_fs() */
49280- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
6e9df6a3 49281+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
ae4e228f
MT
49282 set_fs(old_fs);
49283
49284 return res;
572b4308 49285@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
ae4e228f
MT
49286 old_fs = get_fs();
49287 set_fs(get_ds());
49288 /* The cast to a user pointer is valid due to the set_fs() */
49289- res = vfs_write(file, (const char __user *)buf, count, &pos);
6e9df6a3 49290+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
ae4e228f
MT
49291 set_fs(old_fs);
49292
49293 return res;
572b4308 49294@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
ae4e228f
MT
49295 goto err;
49296
49297 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
49298- vec[i].iov_base = (void __user *) page_address(page);
6e9df6a3 49299+ vec[i].iov_base = (void __force_user *) page_address(page);
ae4e228f 49300 vec[i].iov_len = this_len;
57199397 49301 spd.pages[i] = page;
ae4e228f 49302 spd.nr_pages++;
572b4308 49303@@ -849,10 +849,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
ae4e228f
MT
49304 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
49305 {
49306 while (!pipe->nrbufs) {
49307- if (!pipe->writers)
49308+ if (!atomic_read(&pipe->writers))
49309 return 0;
49310
49311- if (!pipe->waiting_writers && sd->num_spliced)
49312+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
49313 return 0;
49314
49315 if (sd->flags & SPLICE_F_NONBLOCK)
572b4308 49316@@ -1185,7 +1185,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
ae4e228f
MT
49317 * out of the pipe right after the splice_to_pipe(). So set
49318 * PIPE_READERS appropriately.
49319 */
49320- pipe->readers = 1;
49321+ atomic_set(&pipe->readers, 1);
49322
49323 current->splice_pipe = pipe;
49324 }
572b4308 49325@@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ae4e228f
MT
49326 ret = -ERESTARTSYS;
49327 break;
49328 }
49329- if (!pipe->writers)
49330+ if (!atomic_read(&pipe->writers))
49331 break;
49332- if (!pipe->waiting_writers) {
49333+ if (!atomic_read(&pipe->waiting_writers)) {
49334 if (flags & SPLICE_F_NONBLOCK) {
49335 ret = -EAGAIN;
49336 break;
572b4308 49337@@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ae4e228f
MT
49338 pipe_lock(pipe);
49339
57199397 49340 while (pipe->nrbufs >= pipe->buffers) {
ae4e228f
MT
49341- if (!pipe->readers) {
49342+ if (!atomic_read(&pipe->readers)) {
49343 send_sig(SIGPIPE, current, 0);
49344 ret = -EPIPE;
49345 break;
572b4308 49346@@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
ae4e228f
MT
49347 ret = -ERESTARTSYS;
49348 break;
49349 }
49350- pipe->waiting_writers++;
49351+ atomic_inc(&pipe->waiting_writers);
49352 pipe_wait(pipe);
49353- pipe->waiting_writers--;
49354+ atomic_dec(&pipe->waiting_writers);
49355 }
58c5fc13 49356
ae4e228f 49357 pipe_unlock(pipe);
572b4308 49358@@ -1823,14 +1823,14 @@ retry:
ae4e228f
MT
49359 pipe_double_lock(ipipe, opipe);
49360
49361 do {
49362- if (!opipe->readers) {
49363+ if (!atomic_read(&opipe->readers)) {
49364 send_sig(SIGPIPE, current, 0);
49365 if (!ret)
49366 ret = -EPIPE;
49367 break;
49368 }
49369
49370- if (!ipipe->nrbufs && !ipipe->writers)
49371+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
49372 break;
49373
49374 /*
572b4308 49375@@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
ae4e228f
MT
49376 pipe_double_lock(ipipe, opipe);
49377
49378 do {
49379- if (!opipe->readers) {
49380+ if (!atomic_read(&opipe->readers)) {
49381 send_sig(SIGPIPE, current, 0);
49382 if (!ret)
49383 ret = -EPIPE;
572b4308 49384@@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
ae4e228f
MT
49385 * return EAGAIN if we have the potential of some data in the
49386 * future, otherwise just return 0
49387 */
49388- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
49389+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
49390 ret = -EAGAIN;
49391
49392 pipe_unlock(ipipe);
4c928ab7 49393diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
c6e2a6c8 49394index 35a36d3..23424b2 100644
4c928ab7
MT
49395--- a/fs/sysfs/dir.c
49396+++ b/fs/sysfs/dir.c
c6e2a6c8 49397@@ -657,6 +657,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
4c928ab7
MT
49398 struct sysfs_dirent *sd;
49399 int rc;
49400
49401+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49402+ const char *parent_name = parent_sd->s_name;
49403+
49404+ mode = S_IFDIR | S_IRWXU;
49405+
49406+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
49407+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
49408+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
49409+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
49410+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
49411+#endif
49412+
49413 /* allocate */
49414 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
49415 if (!sd)
fe2de317 49416diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
5e856224 49417index 00012e3..8392349 100644
fe2de317
MT
49418--- a/fs/sysfs/file.c
49419+++ b/fs/sysfs/file.c
49420@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
8308f9c9
MT
49421
49422 struct sysfs_open_dirent {
49423 atomic_t refcnt;
49424- atomic_t event;
49425+ atomic_unchecked_t event;
49426 wait_queue_head_t poll;
49427 struct list_head buffers; /* goes through sysfs_buffer.list */
49428 };
fe2de317 49429@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
8308f9c9
MT
49430 if (!sysfs_get_active(attr_sd))
49431 return -ENODEV;
49432
49433- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
49434+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
49435 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
49436
49437 sysfs_put_active(attr_sd);
fe2de317 49438@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
8308f9c9
MT
49439 return -ENOMEM;
49440
49441 atomic_set(&new_od->refcnt, 0);
49442- atomic_set(&new_od->event, 1);
49443+ atomic_set_unchecked(&new_od->event, 1);
49444 init_waitqueue_head(&new_od->poll);
49445 INIT_LIST_HEAD(&new_od->buffers);
49446 goto retry;
fe2de317 49447@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
8308f9c9
MT
49448
49449 sysfs_put_active(attr_sd);
49450
49451- if (buffer->event != atomic_read(&od->event))
49452+ if (buffer->event != atomic_read_unchecked(&od->event))
49453 goto trigger;
49454
49455 return DEFAULT_POLLMASK;
fe2de317 49456@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
8308f9c9
MT
49457
49458 od = sd->s_attr.open;
49459 if (od) {
49460- atomic_inc(&od->event);
49461+ atomic_inc_unchecked(&od->event);
49462 wake_up_interruptible(&od->poll);
49463 }
49464
fe2de317
MT
49465diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
49466index a7ac78f..02158e1 100644
49467--- a/fs/sysfs/symlink.c
49468+++ b/fs/sysfs/symlink.c
49469@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58c5fc13
MT
49470
49471 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
49472 {
49473- char *page = nd_get_link(nd);
49474+ const char *page = nd_get_link(nd);
49475 if (!IS_ERR(page))
49476 free_page((unsigned long)page);
49477 }
fe2de317 49478diff --git a/fs/udf/misc.c b/fs/udf/misc.c
4c928ab7 49479index c175b4d..8f36a16 100644
fe2de317
MT
49480--- a/fs/udf/misc.c
49481+++ b/fs/udf/misc.c
4c928ab7 49482@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
bc901d79
MT
49483
49484 u8 udf_tag_checksum(const struct tag *t)
49485 {
49486- u8 *data = (u8 *)t;
49487+ const u8 *data = (const u8 *)t;
49488 u8 checksum = 0;
49489 int i;
49490 for (i = 0; i < sizeof(struct tag); ++i)
fe2de317
MT
49491diff --git a/fs/utimes.c b/fs/utimes.c
49492index ba653f3..06ea4b1 100644
49493--- a/fs/utimes.c
49494+++ b/fs/utimes.c
58c5fc13
MT
49495@@ -1,6 +1,7 @@
49496 #include <linux/compiler.h>
49497 #include <linux/file.h>
49498 #include <linux/fs.h>
49499+#include <linux/security.h>
49500 #include <linux/linkage.h>
49501 #include <linux/mount.h>
49502 #include <linux/namei.h>
fe2de317 49503@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
58c5fc13
MT
49504 goto mnt_drop_write_and_out;
49505 }
49506 }
49507+
49508+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
49509+ error = -EACCES;
49510+ goto mnt_drop_write_and_out;
49511+ }
49512+
49513 mutex_lock(&inode->i_mutex);
49514 error = notify_change(path->dentry, &newattrs);
49515 mutex_unlock(&inode->i_mutex);
fe2de317 49516diff --git a/fs/xattr.c b/fs/xattr.c
c6e2a6c8 49517index 3c8c1cc..a83c398 100644
fe2de317
MT
49518--- a/fs/xattr.c
49519+++ b/fs/xattr.c
c6e2a6c8 49520@@ -316,7 +316,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
bc901d79
MT
49521 * Extended attribute SET operations
49522 */
49523 static long
49524-setxattr(struct dentry *d, const char __user *name, const void __user *value,
49525+setxattr(struct path *path, const char __user *name, const void __user *value,
49526 size_t size, int flags)
49527 {
49528 int error;
c6e2a6c8
MT
49529@@ -349,7 +349,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
49530 }
bc901d79
MT
49531 }
49532
49533- error = vfs_setxattr(d, kname, kvalue, size, flags);
49534+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
49535+ error = -EACCES;
49536+ goto out;
49537+ }
49538+
49539+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
c6e2a6c8
MT
49540 out:
49541 if (vvalue)
49542 vfree(vvalue);
49543@@ -370,7 +375,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
bc901d79
MT
49544 return error;
49545 error = mnt_want_write(path.mnt);
49546 if (!error) {
49547- error = setxattr(path.dentry, name, value, size, flags);
49548+ error = setxattr(&path, name, value, size, flags);
49549 mnt_drop_write(path.mnt);
49550 }
49551 path_put(&path);
c6e2a6c8 49552@@ -389,7 +394,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
bc901d79
MT
49553 return error;
49554 error = mnt_want_write(path.mnt);
49555 if (!error) {
49556- error = setxattr(path.dentry, name, value, size, flags);
49557+ error = setxattr(&path, name, value, size, flags);
49558 mnt_drop_write(path.mnt);
49559 }
49560 path_put(&path);
c6e2a6c8 49561@@ -400,17 +405,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
bc901d79
MT
49562 const void __user *,value, size_t, size, int, flags)
49563 {
49564 struct file *f;
49565- struct dentry *dentry;
49566 int error = -EBADF;
49567
49568 f = fget(fd);
49569 if (!f)
49570 return error;
49571- dentry = f->f_path.dentry;
49572- audit_inode(NULL, dentry);
49573+ audit_inode(NULL, f->f_path.dentry);
49574 error = mnt_want_write_file(f);
49575 if (!error) {
49576- error = setxattr(dentry, name, value, size, flags);
49577+ error = setxattr(&f->f_path, name, value, size, flags);
5e856224 49578 mnt_drop_write_file(f);
bc901d79
MT
49579 }
49580 fput(f);
fe2de317 49581diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
c6e2a6c8 49582index 69d06b0..c0996e5 100644
fe2de317
MT
49583--- a/fs/xattr_acl.c
49584+++ b/fs/xattr_acl.c
49585@@ -17,8 +17,8 @@
49586 struct posix_acl *
49587 posix_acl_from_xattr(const void *value, size_t size)
49588 {
49589- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
49590- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
49591+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
49592+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
49593 int count;
49594 struct posix_acl *acl;
49595 struct posix_acl_entry *acl_e;
49596diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
c6e2a6c8 49597index 85e7e32..5344e52 100644
fe2de317
MT
49598--- a/fs/xfs/xfs_bmap.c
49599+++ b/fs/xfs/xfs_bmap.c
4c928ab7 49600@@ -190,7 +190,7 @@ xfs_bmap_validate_ret(
58c5fc13
MT
49601 int nmap,
49602 int ret_nmap);
49603 #else
49604-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
49605+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
49606 #endif /* DEBUG */
49607
ae4e228f 49608 STATIC int
fe2de317
MT
49609diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
49610index 79d05e8..e3e5861 100644
49611--- a/fs/xfs/xfs_dir2_sf.c
49612+++ b/fs/xfs/xfs_dir2_sf.c
6e9df6a3 49613@@ -852,7 +852,15 @@ xfs_dir2_sf_getdents(
71d190be
MT
49614 }
49615
6e9df6a3 49616 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
71d190be
MT
49617- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49618+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
49619+ char name[sfep->namelen];
49620+ memcpy(name, sfep->name, sfep->namelen);
49621+ if (filldir(dirent, name, sfep->namelen,
49622+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
49623+ *offset = off & 0x7fffffff;
49624+ return 0;
49625+ }
49626+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
49627 off & 0x7fffffff, ino, DT_UNKNOWN)) {
49628 *offset = off & 0x7fffffff;
49629 return 0;
fe2de317 49630diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
c6e2a6c8 49631index 91f8ff5..0ce68f9 100644
fe2de317
MT
49632--- a/fs/xfs/xfs_ioctl.c
49633+++ b/fs/xfs/xfs_ioctl.c
6e9df6a3
MT
49634@@ -128,7 +128,7 @@ xfs_find_handle(
49635 }
49636
49637 error = -EFAULT;
49638- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
49639+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
49640 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
49641 goto out_put;
49642
fe2de317 49643diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
c6e2a6c8 49644index 3011b87..1ab03e9 100644
fe2de317
MT
49645--- a/fs/xfs/xfs_iops.c
49646+++ b/fs/xfs/xfs_iops.c
c6e2a6c8 49647@@ -397,7 +397,7 @@ xfs_vn_put_link(
6e9df6a3
MT
49648 struct nameidata *nd,
49649 void *p)
49650 {
49651- char *s = nd_get_link(nd);
49652+ const char *s = nd_get_link(nd);
49653
49654 if (!IS_ERR(s))
49655 kfree(s);
fe2de317
MT
49656diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
49657new file mode 100644
572b4308 49658index 0000000..4d533f1
fe2de317
MT
49659--- /dev/null
49660+++ b/grsecurity/Kconfig
572b4308 49661@@ -0,0 +1,941 @@
fe2de317
MT
49662+#
49663+# grecurity configuration
49664+#
4c928ab7 49665+menu "Memory Protections"
fe2de317 49666+depends on GRKERNSEC
58c5fc13 49667+
fe2de317
MT
49668+config GRKERNSEC_KMEM
49669+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
c1e3898a 49670+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
49671+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
49672+ help
49673+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
49674+ be written to or read from to modify or leak the contents of the running
49675+ kernel. /dev/port will also not be allowed to be opened. If you have module
49676+ support disabled, enabling this will close up four ways that are
49677+ currently used to insert malicious code into the running kernel.
49678+ Even with all these features enabled, we still highly recommend that
49679+ you use the RBAC system, as it is still possible for an attacker to
49680+ modify the running kernel through privileged I/O granted by ioperm/iopl.
49681+ If you are not using XFree86, you may be able to stop this additional
49682+ case by enabling the 'Disable privileged I/O' option. Though nothing
49683+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
49684+ but only to video memory, which is the only writing we allow in this
49685+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
49686+ not be allowed to mprotect it with PROT_WRITE later.
49687+ It is highly recommended that you say Y here if you meet all the
49688+ conditions above.
58c5fc13 49689+
fe2de317
MT
49690+config GRKERNSEC_VM86
49691+ bool "Restrict VM86 mode"
c1e3898a 49692+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
fe2de317 49693+ depends on X86_32
58c5fc13 49694+
fe2de317
MT
49695+ help
49696+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
49697+ make use of a special execution mode on 32bit x86 processors called
49698+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
49699+ video cards and will still work with this option enabled. The purpose
49700+ of the option is to prevent exploitation of emulation errors in
49701+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
49702+ Nearly all users should be able to enable this option.
58c5fc13 49703+
fe2de317
MT
49704+config GRKERNSEC_IO
49705+ bool "Disable privileged I/O"
c1e3898a 49706+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
fe2de317
MT
49707+ depends on X86
49708+ select RTC_CLASS
49709+ select RTC_INTF_DEV
49710+ select RTC_DRV_CMOS
58c5fc13 49711+
fe2de317
MT
49712+ help
49713+ If you say Y here, all ioperm and iopl calls will return an error.
49714+ Ioperm and iopl can be used to modify the running kernel.
49715+ Unfortunately, some programs need this access to operate properly,
49716+ the most notable of which are XFree86 and hwclock. hwclock can be
49717+ remedied by having RTC support in the kernel, so real-time
49718+ clock support is enabled if this option is enabled, to ensure
49719+ that hwclock operates correctly. XFree86 still will not
49720+ operate correctly with this option enabled, so DO NOT CHOOSE Y
49721+ IF YOU USE XFree86. If you use XFree86 and you still want to
49722+ protect your kernel against modification, use the RBAC system.
58c5fc13 49723+
fe2de317 49724+config GRKERNSEC_PROC_MEMMAP
4c928ab7 49725+ bool "Harden ASLR against information leaks and entropy reduction"
c1e3898a 49726+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
fe2de317
MT
49727+ depends on PAX_NOEXEC || PAX_ASLR
49728+ help
49729+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
49730+ give no information about the addresses of its mappings if
49731+ PaX features that rely on random addresses are enabled on the task.
4c928ab7
MT
49732+ In addition to sanitizing this information and disabling other
49733+ dangerous sources of information, this option causes reads of sensitive
49734+ /proc/<pid> entries where the file descriptor was opened in a different
49735+ task than the one performing the read. Such attempts are logged.
49736+ This option also limits argv/env strings for suid/sgid binaries
49737+ to 512KB to prevent a complete exhaustion of the stack entropy provided
49738+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
49739+ binaries to prevent alternative mmap layouts from being abused.
49740+
49741+ If you use PaX it is essential that you say Y here as it closes up
49742+ several holes that make full ASLR useless locally.
58c5fc13 49743+
fe2de317
MT
49744+config GRKERNSEC_BRUTE
49745+ bool "Deter exploit bruteforcing"
c1e3898a 49746+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
49747+ help
49748+ If you say Y here, attempts to bruteforce exploits against forking
49749+ daemons such as apache or sshd, as well as against suid/sgid binaries
49750+ will be deterred. When a child of a forking daemon is killed by PaX
49751+ or crashes due to an illegal instruction or other suspicious signal,
49752+ the parent process will be delayed 30 seconds upon every subsequent
49753+ fork until the administrator is able to assess the situation and
49754+ restart the daemon.
49755+ In the suid/sgid case, the attempt is logged, the user has all their
49756+ processes terminated, and they are prevented from executing any further
49757+ processes for 15 minutes.
49758+ It is recommended that you also enable signal logging in the auditing
49759+ section so that logs are generated when a process triggers a suspicious
49760+ signal.
49761+ If the sysctl option is enabled, a sysctl option with name
49762+ "deter_bruteforce" is created.
58c5fc13 49763+
58c5fc13 49764+
fe2de317
MT
49765+config GRKERNSEC_MODHARDEN
49766+ bool "Harden module auto-loading"
c1e3898a 49767+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
49768+ depends on MODULES
49769+ help
49770+ If you say Y here, module auto-loading in response to use of some
49771+ feature implemented by an unloaded module will be restricted to
49772+ root users. Enabling this option helps defend against attacks
49773+ by unprivileged users who abuse the auto-loading behavior to
49774+ cause a vulnerable module to load that is then exploited.
58c5fc13 49775+
fe2de317
MT
49776+ If this option prevents a legitimate use of auto-loading for a
49777+ non-root user, the administrator can execute modprobe manually
49778+ with the exact name of the module mentioned in the alert log.
49779+ Alternatively, the administrator can add the module to the list
49780+ of modules loaded at boot by modifying init scripts.
58c5fc13 49781+
fe2de317
MT
49782+ Modification of init scripts will most likely be needed on
49783+ Ubuntu servers with encrypted home directory support enabled,
49784+ as the first non-root user logging in will cause the ecb(aes),
49785+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
58c5fc13 49786+
fe2de317
MT
49787+config GRKERNSEC_HIDESYM
49788+ bool "Hide kernel symbols"
c1e3898a 49789+ default y if GRKERNSEC_CONFIG_AUTO
572b4308 49790+ select PAX_USERCOPY_SLABS
fe2de317
MT
49791+ help
49792+ If you say Y here, getting information on loaded modules, and
49793+ displaying all kernel symbols through a syscall will be restricted
49794+ to users with CAP_SYS_MODULE. For software compatibility reasons,
49795+ /proc/kallsyms will be restricted to the root user. The RBAC
49796+ system can hide that entry even from root.
58c5fc13 49797+
fe2de317
MT
49798+ This option also prevents leaking of kernel addresses through
49799+ several /proc entries.
58c5fc13 49800+
fe2de317
MT
49801+ Note that this option is only effective provided the following
49802+ conditions are met:
49803+ 1) The kernel using grsecurity is not precompiled by some distribution
49804+ 2) You have also enabled GRKERNSEC_DMESG
49805+ 3) You are using the RBAC system and hiding other files such as your
49806+ kernel image and System.map. Alternatively, enabling this option
49807+ causes the permissions on /boot, /lib/modules, and the kernel
49808+ source directory to change at compile time to prevent
49809+ reading by non-root users.
49810+ If the above conditions are met, this option will aid in providing a
49811+ useful protection against local kernel exploitation of overflows
49812+ and arbitrary read/write vulnerabilities.
58c5fc13 49813+
fe2de317
MT
49814+config GRKERNSEC_KERN_LOCKOUT
49815+ bool "Active kernel exploit response"
c1e3898a 49816+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
49817+ depends on X86 || ARM || PPC || SPARC
49818+ help
49819+ If you say Y here, when a PaX alert is triggered due to suspicious
49820+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
c1e3898a 49821+ or an OOPS occurs due to bad memory accesses, instead of just
fe2de317
MT
49822+ terminating the offending process (and potentially allowing
49823+ a subsequent exploit from the same user), we will take one of two
49824+ actions:
49825+ If the user was root, we will panic the system
49826+ If the user was non-root, we will log the attempt, terminate
49827+ all processes owned by the user, then prevent them from creating
49828+ any new processes until the system is restarted
49829+ This deters repeated kernel exploitation/bruteforcing attempts
49830+ and is useful for later forensics.
ae4e228f 49831+
fe2de317
MT
49832+endmenu
49833+menu "Role Based Access Control Options"
49834+depends on GRKERNSEC
58c5fc13 49835+
fe2de317
MT
49836+config GRKERNSEC_RBAC_DEBUG
49837+ bool
58c5fc13 49838+
fe2de317
MT
49839+config GRKERNSEC_NO_RBAC
49840+ bool "Disable RBAC system"
49841+ help
49842+ If you say Y here, the /dev/grsec device will be removed from the kernel,
49843+ preventing the RBAC system from being enabled. You should only say Y
49844+ here if you have no intention of using the RBAC system, so as to prevent
49845+ an attacker with root access from misusing the RBAC system to hide files
49846+ and processes when loadable module support and /dev/[k]mem have been
49847+ locked down.
58c5fc13 49848+
fe2de317
MT
49849+config GRKERNSEC_ACL_HIDEKERN
49850+ bool "Hide kernel processes"
49851+ help
49852+ If you say Y here, all kernel threads will be hidden to all
49853+ processes but those whose subject has the "view hidden processes"
49854+ flag.
58c5fc13 49855+
fe2de317
MT
49856+config GRKERNSEC_ACL_MAXTRIES
49857+ int "Maximum tries before password lockout"
49858+ default 3
49859+ help
49860+ This option enforces the maximum number of times a user can attempt
49861+ to authorize themselves with the grsecurity RBAC system before being
49862+ denied the ability to attempt authorization again for a specified time.
49863+ The lower the number, the harder it will be to brute-force a password.
58c5fc13 49864+
fe2de317
MT
49865+config GRKERNSEC_ACL_TIMEOUT
49866+ int "Time to wait after max password tries, in seconds"
49867+ default 30
49868+ help
49869+ This option specifies the time the user must wait after attempting to
49870+ authorize to the RBAC system with the maximum number of invalid
49871+ passwords. The higher the number, the harder it will be to brute-force
49872+ a password.
58c5fc13 49873+
fe2de317
MT
49874+endmenu
49875+menu "Filesystem Protections"
49876+depends on GRKERNSEC
58c5fc13 49877+
fe2de317
MT
49878+config GRKERNSEC_PROC
49879+ bool "Proc restrictions"
c1e3898a 49880+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
49881+ help
49882+ If you say Y here, the permissions of the /proc filesystem
49883+ will be altered to enhance system security and privacy. You MUST
49884+ choose either a user only restriction or a user and group restriction.
49885+ Depending upon the option you choose, you can either restrict users to
49886+ see only the processes they themselves run, or choose a group that can
49887+ view all processes and files normally restricted to root if you choose
4c928ab7
MT
49888+ the "restrict to user only" option. NOTE: If you're running identd or
49889+ ntpd as a non-root user, you will have to run it as the group you
49890+ specify here.
58c5fc13 49891+
fe2de317
MT
49892+config GRKERNSEC_PROC_USER
49893+ bool "Restrict /proc to user only"
49894+ depends on GRKERNSEC_PROC
49895+ help
49896+ If you say Y here, non-root users will only be able to view their own
49897+ processes, and restricts them from viewing network-related information,
49898+ and viewing kernel symbol and module information.
58c5fc13 49899+
fe2de317
MT
49900+config GRKERNSEC_PROC_USERGROUP
49901+ bool "Allow special group"
c1e3898a 49902+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
49903+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
49904+ help
49905+ If you say Y here, you will be able to select a group that will be
49906+ able to view all processes and network-related information. If you've
49907+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
49908+ remain hidden. This option is useful if you want to run identd as
49909+ a non-root user.
58c5fc13 49910+
fe2de317
MT
49911+config GRKERNSEC_PROC_GID
49912+ int "GID for special group"
49913+ depends on GRKERNSEC_PROC_USERGROUP
49914+ default 1001
df50ba0c 49915+
fe2de317
MT
49916+config GRKERNSEC_PROC_ADD
49917+ bool "Additional restrictions"
c1e3898a 49918+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
49919+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
49920+ help
49921+ If you say Y here, additional restrictions will be placed on
49922+ /proc that keep normal users from viewing device information and
49923+ slabinfo information that could be useful for exploits.
58c5fc13 49924+
fe2de317
MT
49925+config GRKERNSEC_LINK
49926+ bool "Linking restrictions"
c1e3898a 49927+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
49928+ help
49929+ If you say Y here, /tmp race exploits will be prevented, since users
49930+ will no longer be able to follow symlinks owned by other users in
49931+ world-writable +t directories (e.g. /tmp), unless the owner of the
49932+ symlink is the owner of the directory. users will also not be
49933+ able to hardlink to files they do not own. If the sysctl option is
49934+ enabled, a sysctl option with name "linking_restrictions" is created.
15a11c5b 49935+
572b4308
MT
49936+config GRKERNSEC_SYMLINKOWN
49937+ bool "Kernel-enforced SymlinksIfOwnerMatch"
49938+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
49939+ help
49940+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
49941+ that prevents it from being used as a security feature. As Apache
49942+ verifies the symlink by performing a stat() against the target of
49943+ the symlink before it is followed, an attacker can setup a symlink
49944+ to point to a same-owned file, then replace the symlink with one
49945+ that targets another user's file just after Apache "validates" the
49946+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
49947+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
49948+ will be in place for the group you specify. If the sysctl option
49949+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
49950+ created.
49951+
49952+config GRKERNSEC_SYMLINKOWN_GID
49953+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
49954+ depends on GRKERNSEC_SYMLINKOWN
49955+ default 1006
49956+ help
49957+ Setting this GID determines what group kernel-enforced
49958+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
49959+ is enabled, a sysctl option with name "symlinkown_gid" is created.
49960+
fe2de317
MT
49961+config GRKERNSEC_FIFO
49962+ bool "FIFO restrictions"
c1e3898a 49963+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
49964+ help
49965+ If you say Y here, users will not be able to write to FIFOs they don't
49966+ own in world-writable +t directories (e.g. /tmp), unless the owner of
49967+ the FIFO is the same owner of the directory it's held in. If the sysctl
49968+ option is enabled, a sysctl option with name "fifo_restrictions" is
49969+ created.
58c5fc13 49970+
fe2de317
MT
49971+config GRKERNSEC_SYSFS_RESTRICT
49972+ bool "Sysfs/debugfs restriction"
c1e3898a 49973+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
fe2de317
MT
49974+ depends on SYSFS
49975+ help
49976+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
4c928ab7
MT
49977+ any filesystem normally mounted under it (e.g. debugfs) will be
49978+ mostly accessible only by root. These filesystems generally provide access
fe2de317
MT
49979+ to hardware and debug information that isn't appropriate for unprivileged
49980+ users of the system. Sysfs and debugfs have also become a large source
49981+ of new vulnerabilities, ranging from infoleaks to local compromise.
49982+ There has been very little oversight with an eye toward security involved
49983+ in adding new exporters of information to these filesystems, so their
49984+ use is discouraged.
4c928ab7
MT
49985+ For reasons of compatibility, a few directories have been whitelisted
49986+ for access by non-root users:
49987+ /sys/fs/selinux
49988+ /sys/fs/fuse
49989+ /sys/devices/system/cpu
df50ba0c 49990+
fe2de317
MT
49991+config GRKERNSEC_ROFS
49992+ bool "Runtime read-only mount protection"
49993+ help
49994+ If you say Y here, a sysctl option with name "romount_protect" will
49995+ be created. By setting this option to 1 at runtime, filesystems
49996+ will be protected in the following ways:
49997+ * No new writable mounts will be allowed
49998+ * Existing read-only mounts won't be able to be remounted read/write
49999+ * Write operations will be denied on all block devices
50000+ This option acts independently of grsec_lock: once it is set to 1,
50001+ it cannot be turned off. Therefore, please be mindful of the resulting
50002+ behavior if this option is enabled in an init script on a read-only
50003+ filesystem. This feature is mainly intended for secure embedded systems.
58c5fc13 50004+
fe2de317
MT
50005+config GRKERNSEC_CHROOT
50006+ bool "Chroot jail restrictions"
c1e3898a 50007+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50008+ help
50009+ If you say Y here, you will be able to choose several options that will
50010+ make breaking out of a chrooted jail much more difficult. If you
50011+ encounter no software incompatibilities with the following options, it
50012+ is recommended that you enable each one.
58c5fc13 50013+
fe2de317
MT
50014+config GRKERNSEC_CHROOT_MOUNT
50015+ bool "Deny mounts"
c1e3898a 50016+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50017+ depends on GRKERNSEC_CHROOT
50018+ help
50019+ If you say Y here, processes inside a chroot will not be able to
50020+ mount or remount filesystems. If the sysctl option is enabled, a
50021+ sysctl option with name "chroot_deny_mount" is created.
58c5fc13 50022+
fe2de317
MT
50023+config GRKERNSEC_CHROOT_DOUBLE
50024+ bool "Deny double-chroots"
c1e3898a 50025+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50026+ depends on GRKERNSEC_CHROOT
50027+ help
50028+ If you say Y here, processes inside a chroot will not be able to chroot
50029+ again outside the chroot. This is a widely used method of breaking
50030+ out of a chroot jail and should not be allowed. If the sysctl
50031+ option is enabled, a sysctl option with name
50032+ "chroot_deny_chroot" is created.
16454cff 50033+
fe2de317
MT
50034+config GRKERNSEC_CHROOT_PIVOT
50035+ bool "Deny pivot_root in chroot"
c1e3898a 50036+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50037+ depends on GRKERNSEC_CHROOT
50038+ help
50039+ If you say Y here, processes inside a chroot will not be able to use
50040+ a function called pivot_root() that was introduced in Linux 2.3.41. It
50041+ works similar to chroot in that it changes the root filesystem. This
50042+ function could be misused in a chrooted process to attempt to break out
50043+ of the chroot, and therefore should not be allowed. If the sysctl
50044+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
50045+ created.
16454cff 50046+
fe2de317
MT
50047+config GRKERNSEC_CHROOT_CHDIR
50048+ bool "Enforce chdir(\"/\") on all chroots"
c1e3898a 50049+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50050+ depends on GRKERNSEC_CHROOT
50051+ help
50052+ If you say Y here, the current working directory of all newly-chrooted
50053+ applications will be set to the the root directory of the chroot.
50054+ The man page on chroot(2) states:
50055+ Note that this call does not change the current working
50056+ directory, so that `.' can be outside the tree rooted at
50057+ `/'. In particular, the super-user can escape from a
50058+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
16454cff 50059+
fe2de317
MT
50060+ It is recommended that you say Y here, since it's not known to break
50061+ any software. If the sysctl option is enabled, a sysctl option with
50062+ name "chroot_enforce_chdir" is created.
58c5fc13 50063+
fe2de317
MT
50064+config GRKERNSEC_CHROOT_CHMOD
50065+ bool "Deny (f)chmod +s"
c1e3898a 50066+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50067+ depends on GRKERNSEC_CHROOT
50068+ help
50069+ If you say Y here, processes inside a chroot will not be able to chmod
50070+ or fchmod files to make them have suid or sgid bits. This protects
50071+ against another published method of breaking a chroot. If the sysctl
50072+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
50073+ created.
58c5fc13 50074+
fe2de317
MT
50075+config GRKERNSEC_CHROOT_FCHDIR
50076+ bool "Deny fchdir out of chroot"
c1e3898a 50077+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50078+ depends on GRKERNSEC_CHROOT
50079+ help
50080+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
50081+ to a file descriptor of the chrooting process that points to a directory
50082+ outside the filesystem will be stopped. If the sysctl option
50083+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
6892158b 50084+
fe2de317
MT
50085+config GRKERNSEC_CHROOT_MKNOD
50086+ bool "Deny mknod"
c1e3898a 50087+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50088+ depends on GRKERNSEC_CHROOT
50089+ help
50090+ If you say Y here, processes inside a chroot will not be allowed to
50091+ mknod. The problem with using mknod inside a chroot is that it
50092+ would allow an attacker to create a device entry that is the same
50093+ as one on the physical root of your system, which could range from
50094+ anything from the console device to a device for your harddrive (which
50095+ they could then use to wipe the drive or steal data). It is recommended
50096+ that you say Y here, unless you run into software incompatibilities.
50097+ If the sysctl option is enabled, a sysctl option with name
50098+ "chroot_deny_mknod" is created.
58c5fc13 50099+
fe2de317
MT
50100+config GRKERNSEC_CHROOT_SHMAT
50101+ bool "Deny shmat() out of chroot"
c1e3898a 50102+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50103+ depends on GRKERNSEC_CHROOT
50104+ help
50105+ If you say Y here, processes inside a chroot will not be able to attach
50106+ to shared memory segments that were created outside of the chroot jail.
50107+ It is recommended that you say Y here. If the sysctl option is enabled,
50108+ a sysctl option with name "chroot_deny_shmat" is created.
58c5fc13 50109+
fe2de317
MT
50110+config GRKERNSEC_CHROOT_UNIX
50111+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
c1e3898a 50112+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50113+ depends on GRKERNSEC_CHROOT
50114+ help
50115+ If you say Y here, processes inside a chroot will not be able to
50116+ connect to abstract (meaning not belonging to a filesystem) Unix
50117+ domain sockets that were bound outside of a chroot. It is recommended
50118+ that you say Y here. If the sysctl option is enabled, a sysctl option
50119+ with name "chroot_deny_unix" is created.
58c5fc13 50120+
fe2de317
MT
50121+config GRKERNSEC_CHROOT_FINDTASK
50122+ bool "Protect outside processes"
c1e3898a 50123+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50124+ depends on GRKERNSEC_CHROOT
50125+ help
50126+ If you say Y here, processes inside a chroot will not be able to
50127+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50128+ getsid, or view any process outside of the chroot. If the sysctl
50129+ option is enabled, a sysctl option with name "chroot_findtask" is
50130+ created.
58c5fc13 50131+
fe2de317
MT
50132+config GRKERNSEC_CHROOT_NICE
50133+ bool "Restrict priority changes"
c1e3898a 50134+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50135+ depends on GRKERNSEC_CHROOT
50136+ help
50137+ If you say Y here, processes inside a chroot will not be able to raise
50138+ the priority of processes in the chroot, or alter the priority of
50139+ processes outside the chroot. This provides more security than simply
50140+ removing CAP_SYS_NICE from the process' capability set. If the
50141+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50142+ is created.
bc901d79 50143+
fe2de317
MT
50144+config GRKERNSEC_CHROOT_SYSCTL
50145+ bool "Deny sysctl writes"
c1e3898a 50146+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50147+ depends on GRKERNSEC_CHROOT
50148+ help
50149+ If you say Y here, an attacker in a chroot will not be able to
50150+ write to sysctl entries, either by sysctl(2) or through a /proc
50151+ interface. It is strongly recommended that you say Y here. If the
50152+ sysctl option is enabled, a sysctl option with name
50153+ "chroot_deny_sysctl" is created.
bc901d79 50154+
fe2de317
MT
50155+config GRKERNSEC_CHROOT_CAPS
50156+ bool "Capability restrictions"
c1e3898a 50157+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50158+ depends on GRKERNSEC_CHROOT
50159+ help
50160+ If you say Y here, the capabilities on all processes within a
50161+ chroot jail will be lowered to stop module insertion, raw i/o,
50162+ system and net admin tasks, rebooting the system, modifying immutable
50163+ files, modifying IPC owned by another, and changing the system time.
50164+ This is left an option because it can break some apps. Disable this
50165+ if your chrooted apps are having problems performing those kinds of
50166+ tasks. If the sysctl option is enabled, a sysctl option with
50167+ name "chroot_caps" is created.
bc901d79 50168+
fe2de317
MT
50169+endmenu
50170+menu "Kernel Auditing"
50171+depends on GRKERNSEC
bc901d79 50172+
fe2de317
MT
50173+config GRKERNSEC_AUDIT_GROUP
50174+ bool "Single group for auditing"
50175+ help
50176+ If you say Y here, the exec, chdir, and (un)mount logging features
50177+ will only operate on a group you specify. This option is recommended
50178+ if you only want to watch certain users instead of having a large
50179+ amount of logs from the entire system. If the sysctl option is enabled,
50180+ a sysctl option with name "audit_group" is created.
bc901d79 50181+
fe2de317
MT
50182+config GRKERNSEC_AUDIT_GID
50183+ int "GID for auditing"
50184+ depends on GRKERNSEC_AUDIT_GROUP
50185+ default 1007
bc901d79 50186+
fe2de317
MT
50187+config GRKERNSEC_EXECLOG
50188+ bool "Exec logging"
50189+ help
50190+ If you say Y here, all execve() calls will be logged (since the
50191+ other exec*() calls are frontends to execve(), all execution
50192+ will be logged). Useful for shell-servers that like to keep track
50193+ of their users. If the sysctl option is enabled, a sysctl option with
50194+ name "exec_logging" is created.
50195+ WARNING: This option when enabled will produce a LOT of logs, especially
50196+ on an active system.
bc901d79 50197+
fe2de317
MT
50198+config GRKERNSEC_RESLOG
50199+ bool "Resource logging"
c1e3898a 50200+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50201+ help
50202+ If you say Y here, all attempts to overstep resource limits will
50203+ be logged with the resource name, the requested size, and the current
50204+ limit. It is highly recommended that you say Y here. If the sysctl
50205+ option is enabled, a sysctl option with name "resource_logging" is
50206+ created. If the RBAC system is enabled, the sysctl value is ignored.
bc901d79 50207+
fe2de317
MT
50208+config GRKERNSEC_CHROOT_EXECLOG
50209+ bool "Log execs within chroot"
50210+ help
50211+ If you say Y here, all executions inside a chroot jail will be logged
50212+ to syslog. This can cause a large amount of logs if certain
50213+ applications (eg. djb's daemontools) are installed on the system, and
50214+ is therefore left as an option. If the sysctl option is enabled, a
50215+ sysctl option with name "chroot_execlog" is created.
bc901d79 50216+
fe2de317
MT
50217+config GRKERNSEC_AUDIT_PTRACE
50218+ bool "Ptrace logging"
50219+ help
50220+ If you say Y here, all attempts to attach to a process via ptrace
50221+ will be logged. If the sysctl option is enabled, a sysctl option
50222+ with name "audit_ptrace" is created.
bc901d79 50223+
fe2de317
MT
50224+config GRKERNSEC_AUDIT_CHDIR
50225+ bool "Chdir logging"
50226+ help
50227+ If you say Y here, all chdir() calls will be logged. If the sysctl
50228+ option is enabled, a sysctl option with name "audit_chdir" is created.
bc901d79 50229+
fe2de317
MT
50230+config GRKERNSEC_AUDIT_MOUNT
50231+ bool "(Un)Mount logging"
50232+ help
50233+ If you say Y here, all mounts and unmounts will be logged. If the
50234+ sysctl option is enabled, a sysctl option with name "audit_mount" is
50235+ created.
bc901d79 50236+
fe2de317
MT
50237+config GRKERNSEC_SIGNAL
50238+ bool "Signal logging"
c1e3898a 50239+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50240+ help
50241+ If you say Y here, certain important signals will be logged, such as
50242+ SIGSEGV, which will as a result inform you of when a error in a program
50243+ occurred, which in some cases could mean a possible exploit attempt.
50244+ If the sysctl option is enabled, a sysctl option with name
50245+ "signal_logging" is created.
58c5fc13 50246+
fe2de317
MT
50247+config GRKERNSEC_FORKFAIL
50248+ bool "Fork failure logging"
50249+ help
50250+ If you say Y here, all failed fork() attempts will be logged.
50251+ This could suggest a fork bomb, or someone attempting to overstep
50252+ their process limit. If the sysctl option is enabled, a sysctl option
50253+ with name "forkfail_logging" is created.
58c5fc13 50254+
fe2de317
MT
50255+config GRKERNSEC_TIME
50256+ bool "Time change logging"
c1e3898a 50257+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50258+ help
50259+ If you say Y here, any changes of the system clock will be logged.
50260+ If the sysctl option is enabled, a sysctl option with name
50261+ "timechange_logging" is created.
58c5fc13 50262+
fe2de317
MT
50263+config GRKERNSEC_PROC_IPADDR
50264+ bool "/proc/<pid>/ipaddr support"
c1e3898a 50265+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50266+ help
50267+ If you say Y here, a new entry will be added to each /proc/<pid>
50268+ directory that contains the IP address of the person using the task.
50269+ The IP is carried across local TCP and AF_UNIX stream sockets.
50270+ This information can be useful for IDS/IPSes to perform remote response
50271+ to a local attack. The entry is readable by only the owner of the
50272+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50273+ the RBAC system), and thus does not create privacy concerns.
58c5fc13 50274+
fe2de317
MT
50275+config GRKERNSEC_RWXMAP_LOG
50276+ bool 'Denied RWX mmap/mprotect logging'
c1e3898a 50277+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50278+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50279+ help
50280+ If you say Y here, calls to mmap() and mprotect() with explicit
50281+ usage of PROT_WRITE and PROT_EXEC together will be logged when
50282+ denied by the PAX_MPROTECT feature. If the sysctl option is
50283+ enabled, a sysctl option with name "rwxmap_logging" is created.
6892158b 50284+
fe2de317
MT
50285+config GRKERNSEC_AUDIT_TEXTREL
50286+ bool 'ELF text relocations logging (READ HELP)'
50287+ depends on PAX_MPROTECT
50288+ help
50289+ If you say Y here, text relocations will be logged with the filename
50290+ of the offending library or binary. The purpose of the feature is
50291+ to help Linux distribution developers get rid of libraries and
50292+ binaries that need text relocations which hinder the future progress
50293+ of PaX. Only Linux distribution developers should say Y here, and
50294+ never on a production machine, as this option creates an information
50295+ leak that could aid an attacker in defeating the randomization of
50296+ a single memory region. If the sysctl option is enabled, a sysctl
50297+ option with name "audit_textrel" is created.
58c5fc13 50298+
fe2de317 50299+endmenu
58c5fc13 50300+
fe2de317
MT
50301+menu "Executable Protections"
50302+depends on GRKERNSEC
58c5fc13 50303+
fe2de317
MT
50304+config GRKERNSEC_DMESG
50305+ bool "Dmesg(8) restriction"
c1e3898a 50306+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50307+ help
50308+ If you say Y here, non-root users will not be able to use dmesg(8)
50309+ to view up to the last 4kb of messages in the kernel's log buffer.
50310+ The kernel's log buffer often contains kernel addresses and other
50311+ identifying information useful to an attacker in fingerprinting a
50312+ system for a targeted exploit.
50313+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
50314+ created.
6892158b 50315+
fe2de317
MT
50316+config GRKERNSEC_HARDEN_PTRACE
50317+ bool "Deter ptrace-based process snooping"
c1e3898a 50318+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50319+ help
50320+ If you say Y here, TTY sniffers and other malicious monitoring
50321+ programs implemented through ptrace will be defeated. If you
50322+ have been using the RBAC system, this option has already been
50323+ enabled for several years for all users, with the ability to make
50324+ fine-grained exceptions.
58c5fc13 50325+
fe2de317
MT
50326+ This option only affects the ability of non-root users to ptrace
50327+ processes that are not a descendent of the ptracing process.
50328+ This means that strace ./binary and gdb ./binary will still work,
50329+ but attaching to arbitrary processes will not. If the sysctl
50330+ option is enabled, a sysctl option with name "harden_ptrace" is
50331+ created.
58c5fc13 50332+
4c928ab7
MT
50333+config GRKERNSEC_PTRACE_READEXEC
50334+ bool "Require read access to ptrace sensitive binaries"
c1e3898a 50335+ default y if GRKERNSEC_CONFIG_AUTO
4c928ab7
MT
50336+ help
50337+ If you say Y here, unprivileged users will not be able to ptrace unreadable
50338+ binaries. This option is useful in environments that
50339+ remove the read bits (e.g. file mode 4711) from suid binaries to
50340+ prevent infoleaking of their contents. This option adds
50341+ consistency to the use of that file mode, as the binary could normally
50342+ be read out when run without privileges while ptracing.
50343+
50344+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
50345+ is created.
50346+
50347+config GRKERNSEC_SETXID
50348+ bool "Enforce consistent multithreaded privileges"
c1e3898a 50349+ default y if GRKERNSEC_CONFIG_AUTO
5e856224 50350+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
4c928ab7
MT
50351+ help
50352+ If you say Y here, a change from a root uid to a non-root uid
50353+ in a multithreaded application will cause the resulting uids,
50354+ gids, supplementary groups, and capabilities in that thread
50355+ to be propagated to the other threads of the process. In most
50356+ cases this is unnecessary, as glibc will emulate this behavior
50357+ on behalf of the application. Other libcs do not act in the
50358+ same way, allowing the other threads of the process to continue
50359+ running with root privileges. If the sysctl option is enabled,
50360+ a sysctl option with name "consistent_setxid" is created.
50361+
fe2de317
MT
50362+config GRKERNSEC_TPE
50363+ bool "Trusted Path Execution (TPE)"
572b4308 50364+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
fe2de317
MT
50365+ help
50366+ If you say Y here, you will be able to choose a gid to add to the
50367+ supplementary groups of users you want to mark as "untrusted."
50368+ These users will not be able to execute any files that are not in
50369+ root-owned directories writable only by root. If the sysctl option
50370+ is enabled, a sysctl option with name "tpe" is created.
58c5fc13 50371+
fe2de317
MT
50372+config GRKERNSEC_TPE_ALL
50373+ bool "Partially restrict all non-root users"
50374+ depends on GRKERNSEC_TPE
50375+ help
50376+ If you say Y here, all non-root users will be covered under
50377+ a weaker TPE restriction. This is separate from, and in addition to,
50378+ the main TPE options that you have selected elsewhere. Thus, if a
50379+ "trusted" GID is chosen, this restriction applies to even that GID.
50380+ Under this restriction, all non-root users will only be allowed to
50381+ execute files in directories they own that are not group or
50382+ world-writable, or in directories owned by root and writable only by
50383+ root. If the sysctl option is enabled, a sysctl option with name
50384+ "tpe_restrict_all" is created.
58c5fc13 50385+
fe2de317
MT
50386+config GRKERNSEC_TPE_INVERT
50387+ bool "Invert GID option"
50388+ depends on GRKERNSEC_TPE
50389+ help
50390+ If you say Y here, the group you specify in the TPE configuration will
50391+ decide what group TPE restrictions will be *disabled* for. This
50392+ option is useful if you want TPE restrictions to be applied to most
50393+ users on the system. If the sysctl option is enabled, a sysctl option
50394+ with name "tpe_invert" is created. Unlike other sysctl options, this
50395+ entry will default to on for backward-compatibility.
6e9df6a3 50396+
fe2de317
MT
50397+config GRKERNSEC_TPE_GID
50398+ int "GID for untrusted users"
50399+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50400+ default 1005
50401+ help
50402+ Setting this GID determines what group TPE restrictions will be
50403+ *enabled* for. If the sysctl option is enabled, a sysctl option
50404+ with name "tpe_gid" is created.
6e9df6a3 50405+
fe2de317
MT
50406+config GRKERNSEC_TPE_GID
50407+ int "GID for trusted users"
50408+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50409+ default 1005
50410+ help
50411+ Setting this GID determines what group TPE restrictions will be
50412+ *disabled* for. If the sysctl option is enabled, a sysctl option
50413+ with name "tpe_gid" is created.
58c5fc13 50414+
fe2de317
MT
50415+endmenu
50416+menu "Network Protections"
50417+depends on GRKERNSEC
58c5fc13 50418+
fe2de317
MT
50419+config GRKERNSEC_RANDNET
50420+ bool "Larger entropy pools"
c1e3898a 50421+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50422+ help
50423+ If you say Y here, the entropy pools used for many features of Linux
50424+ and grsecurity will be doubled in size. Since several grsecurity
50425+ features use additional randomness, it is recommended that you say Y
50426+ here. Saying Y here has a similar effect as modifying
50427+ /proc/sys/kernel/random/poolsize.
58c5fc13 50428+
fe2de317
MT
50429+config GRKERNSEC_BLACKHOLE
50430+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
c1e3898a 50431+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50432+ depends on NET
50433+ help
50434+ If you say Y here, neither TCP resets nor ICMP
50435+ destination-unreachable packets will be sent in response to packets
50436+ sent to ports for which no associated listening process exists.
50437+ This feature supports both IPV4 and IPV6 and exempts the
50438+ loopback interface from blackholing. Enabling this feature
50439+ makes a host more resilient to DoS attacks and reduces network
50440+ visibility against scanners.
58c5fc13 50441+
fe2de317
MT
50442+ The blackhole feature as-implemented is equivalent to the FreeBSD
50443+ blackhole feature, as it prevents RST responses to all packets, not
50444+ just SYNs. Under most application behavior this causes no
50445+ problems, but applications (like haproxy) may not close certain
50446+ connections in a way that cleanly terminates them on the remote
50447+ end, leaving the remote host in LAST_ACK state. Because of this
50448+ side-effect and to prevent intentional LAST_ACK DoSes, this
50449+ feature also adds automatic mitigation against such attacks.
50450+ The mitigation drastically reduces the amount of time a socket
50451+ can spend in LAST_ACK state. If you're using haproxy and not
50452+ all servers it connects to have this option enabled, consider
50453+ disabling this feature on the haproxy host.
58c5fc13 50454+
fe2de317
MT
50455+ If the sysctl option is enabled, two sysctl options with names
50456+ "ip_blackhole" and "lastack_retries" will be created.
50457+ While "ip_blackhole" takes the standard zero/non-zero on/off
50458+ toggle, "lastack_retries" uses the same kinds of values as
50459+ "tcp_retries1" and "tcp_retries2". The default value of 4
50460+ prevents a socket from lasting more than 45 seconds in LAST_ACK
50461+ state.
58c5fc13 50462+
fe2de317
MT
50463+config GRKERNSEC_SOCKET
50464+ bool "Socket restrictions"
50465+ depends on NET
50466+ help
50467+ If you say Y here, you will be able to choose from several options.
50468+ If you assign a GID on your system and add it to the supplementary
50469+ groups of users you want to restrict socket access to, this patch
50470+ will perform up to three things, based on the option(s) you choose.
58c5fc13 50471+
fe2de317
MT
50472+config GRKERNSEC_SOCKET_ALL
50473+ bool "Deny any sockets to group"
50474+ depends on GRKERNSEC_SOCKET
50475+ help
50476+ If you say Y here, you will be able to choose a GID of whose users will
50477+ be unable to connect to other hosts from your machine or run server
50478+ applications from your machine. If the sysctl option is enabled, a
50479+ sysctl option with name "socket_all" is created.
58c5fc13 50480+
fe2de317
MT
50481+config GRKERNSEC_SOCKET_ALL_GID
50482+ int "GID to deny all sockets for"
50483+ depends on GRKERNSEC_SOCKET_ALL
50484+ default 1004
50485+ help
50486+ Here you can choose the GID to disable socket access for. Remember to
50487+ add the users you want socket access disabled for to the GID
50488+ specified here. If the sysctl option is enabled, a sysctl option
50489+ with name "socket_all_gid" is created.
58c5fc13 50490+
fe2de317
MT
50491+config GRKERNSEC_SOCKET_CLIENT
50492+ bool "Deny client sockets to group"
50493+ depends on GRKERNSEC_SOCKET
50494+ help
50495+ If you say Y here, you will be able to choose a GID of whose users will
50496+ be unable to connect to other hosts from your machine, but will be
50497+ able to run servers. If this option is enabled, all users in the group
50498+ you specify will have to use passive mode when initiating ftp transfers
50499+ from the shell on your machine. If the sysctl option is enabled, a
50500+ sysctl option with name "socket_client" is created.
58c5fc13 50501+
fe2de317
MT
50502+config GRKERNSEC_SOCKET_CLIENT_GID
50503+ int "GID to deny client sockets for"
50504+ depends on GRKERNSEC_SOCKET_CLIENT
50505+ default 1003
50506+ help
50507+ Here you can choose the GID to disable client socket access for.
50508+ Remember to add the users you want client socket access disabled for to
50509+ the GID specified here. If the sysctl option is enabled, a sysctl
50510+ option with name "socket_client_gid" is created.
58c5fc13 50511+
fe2de317
MT
50512+config GRKERNSEC_SOCKET_SERVER
50513+ bool "Deny server sockets to group"
50514+ depends on GRKERNSEC_SOCKET
50515+ help
50516+ If you say Y here, you will be able to choose a GID of whose users will
50517+ be unable to run server applications from your machine. If the sysctl
50518+ option is enabled, a sysctl option with name "socket_server" is created.
58c5fc13 50519+
fe2de317
MT
50520+config GRKERNSEC_SOCKET_SERVER_GID
50521+ int "GID to deny server sockets for"
50522+ depends on GRKERNSEC_SOCKET_SERVER
50523+ default 1002
50524+ help
50525+ Here you can choose the GID to disable server socket access for.
50526+ Remember to add the users you want server socket access disabled for to
50527+ the GID specified here. If the sysctl option is enabled, a sysctl
50528+ option with name "socket_server_gid" is created.
58c5fc13 50529+
fe2de317 50530+endmenu
c1e3898a 50531+menu "Sysctl Support"
fe2de317 50532+depends on GRKERNSEC && SYSCTL
58c5fc13 50533+
fe2de317
MT
50534+config GRKERNSEC_SYSCTL
50535+ bool "Sysctl support"
c1e3898a 50536+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50537+ help
50538+ If you say Y here, you will be able to change the options that
50539+ grsecurity runs with at bootup, without having to recompile your
50540+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
50541+ to enable (1) or disable (0) various features. All the sysctl entries
50542+ are mutable until the "grsec_lock" entry is set to a non-zero value.
50543+ All features enabled in the kernel configuration are disabled at boot
50544+ if you do not say Y to the "Turn on features by default" option.
50545+ All options should be set at startup, and the grsec_lock entry should
50546+ be set to a non-zero value after all the options are set.
50547+ *THIS IS EXTREMELY IMPORTANT*
58c5fc13 50548+
fe2de317
MT
50549+config GRKERNSEC_SYSCTL_DISTRO
50550+ bool "Extra sysctl support for distro makers (READ HELP)"
50551+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
50552+ help
50553+ If you say Y here, additional sysctl options will be created
50554+ for features that affect processes running as root. Therefore,
50555+ it is critical when using this option that the grsec_lock entry be
50556+ enabled after boot. Only distros with prebuilt kernel packages
50557+ with this option enabled that can ensure grsec_lock is enabled
50558+ after boot should use this option.
50559+ *Failure to set grsec_lock after boot makes all grsec features
50560+ this option covers useless*
bc901d79 50561+
fe2de317
MT
50562+ Currently this option creates the following sysctl entries:
50563+ "Disable Privileged I/O": "disable_priv_io"
58c5fc13 50564+
fe2de317
MT
50565+config GRKERNSEC_SYSCTL_ON
50566+ bool "Turn on features by default"
c1e3898a 50567+ default y if GRKERNSEC_CONFIG_AUTO
fe2de317
MT
50568+ depends on GRKERNSEC_SYSCTL
50569+ help
50570+ If you say Y here, instead of having all features enabled in the
50571+ kernel configuration disabled at boot time, the features will be
50572+ enabled at boot time. It is recommended you say Y here unless
50573+ there is some reason you would want all sysctl-tunable features to
50574+ be disabled by default. As mentioned elsewhere, it is important
50575+ to enable the grsec_lock entry once you have finished modifying
50576+ the sysctl entries.
58c5fc13 50577+
fe2de317
MT
50578+endmenu
50579+menu "Logging Options"
50580+depends on GRKERNSEC
58c5fc13 50581+
fe2de317
MT
50582+config GRKERNSEC_FLOODTIME
50583+ int "Seconds in between log messages (minimum)"
50584+ default 10
50585+ help
50586+ This option allows you to enforce the number of seconds between
50587+ grsecurity log messages. The default should be suitable for most
50588+ people, however, if you choose to change it, choose a value small enough
50589+ to allow informative logs to be produced, but large enough to
50590+ prevent flooding.
58c5fc13 50591+
fe2de317
MT
50592+config GRKERNSEC_FLOODBURST
50593+ int "Number of messages in a burst (maximum)"
50594+ default 6
50595+ help
50596+ This option allows you to choose the maximum number of messages allowed
50597+ within the flood time interval you chose in a separate option. The
50598+ default should be suitable for most people, however if you find that
50599+ many of your logs are being interpreted as flooding, you may want to
50600+ raise this value.
58c5fc13 50601+
fe2de317 50602+endmenu
fe2de317
MT
50603diff --git a/grsecurity/Makefile b/grsecurity/Makefile
50604new file mode 100644
4c928ab7 50605index 0000000..1b9afa9
fe2de317
MT
50606--- /dev/null
50607+++ b/grsecurity/Makefile
4c928ab7 50608@@ -0,0 +1,38 @@
fe2de317
MT
50609+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
50610+# during 2001-2009 it has been completely redesigned by Brad Spengler
50611+# into an RBAC system
50612+#
50613+# All code in this directory and various hooks inserted throughout the kernel
50614+# are copyright Brad Spengler - Open Source Security, Inc., and released
50615+# under the GPL v2 or higher
58c5fc13 50616+
4c928ab7
MT
50617+KBUILD_CFLAGS += -Werror
50618+
fe2de317
MT
50619+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
50620+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
50621+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
58c5fc13 50622+
fe2de317
MT
50623+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
50624+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
50625+ gracl_learn.o grsec_log.o
50626+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
58c5fc13 50627+
fe2de317
MT
50628+ifdef CONFIG_NET
50629+obj-y += grsec_sock.o
50630+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
50631+endif
58c5fc13 50632+
fe2de317
MT
50633+ifndef CONFIG_GRKERNSEC
50634+obj-y += grsec_disabled.o
50635+endif
58c5fc13 50636+
fe2de317
MT
50637+ifdef CONFIG_GRKERNSEC_HIDESYM
50638+extra-y := grsec_hidesym.o
50639+$(obj)/grsec_hidesym.o:
50640+ @-chmod -f 500 /boot
50641+ @-chmod -f 500 /lib/modules
50642+ @-chmod -f 500 /lib64/modules
50643+ @-chmod -f 500 /lib32/modules
50644+ @-chmod -f 700 .
50645+ @echo ' grsec: protected kernel image paths'
50646+endif
50647diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
50648new file mode 100644
572b4308 50649index 0000000..7a5922f
fe2de317
MT
50650--- /dev/null
50651+++ b/grsecurity/gracl.c
572b4308 50652@@ -0,0 +1,4016 @@
fe2de317
MT
50653+#include <linux/kernel.h>
50654+#include <linux/module.h>
50655+#include <linux/sched.h>
50656+#include <linux/mm.h>
50657+#include <linux/file.h>
50658+#include <linux/fs.h>
50659+#include <linux/namei.h>
50660+#include <linux/mount.h>
50661+#include <linux/tty.h>
50662+#include <linux/proc_fs.h>
50663+#include <linux/lglock.h>
50664+#include <linux/slab.h>
50665+#include <linux/vmalloc.h>
50666+#include <linux/types.h>
50667+#include <linux/sysctl.h>
50668+#include <linux/netdevice.h>
50669+#include <linux/ptrace.h>
50670+#include <linux/gracl.h>
50671+#include <linux/gralloc.h>
4c928ab7 50672+#include <linux/security.h>
fe2de317
MT
50673+#include <linux/grinternal.h>
50674+#include <linux/pid_namespace.h>
572b4308 50675+#include <linux/stop_machine.h>
fe2de317
MT
50676+#include <linux/fdtable.h>
50677+#include <linux/percpu.h>
5e856224 50678+#include "../fs/mount.h"
58c5fc13 50679+
fe2de317
MT
50680+#include <asm/uaccess.h>
50681+#include <asm/errno.h>
50682+#include <asm/mman.h>
58c5fc13 50683+
fe2de317
MT
50684+static struct acl_role_db acl_role_set;
50685+static struct name_db name_set;
50686+static struct inodev_db inodev_set;
58c5fc13 50687+
fe2de317
MT
50688+/* for keeping track of userspace pointers used for subjects, so we
50689+ can share references in the kernel as well
50690+*/
58c5fc13 50691+
fe2de317 50692+static struct path real_root;
58c5fc13 50693+
fe2de317 50694+static struct acl_subj_map_db subj_map_set;
58c5fc13 50695+
fe2de317 50696+static struct acl_role_label *default_role;
58c5fc13 50697+
fe2de317 50698+static struct acl_role_label *role_list;
58c5fc13 50699+
fe2de317 50700+static u16 acl_sp_role_value;
58c5fc13 50701+
fe2de317
MT
50702+extern char *gr_shared_page[4];
50703+static DEFINE_MUTEX(gr_dev_mutex);
50704+DEFINE_RWLOCK(gr_inode_lock);
58c5fc13 50705+
fe2de317 50706+struct gr_arg *gr_usermode;
58c5fc13 50707+
fe2de317 50708+static unsigned int gr_status __read_only = GR_STATUS_INIT;
58c5fc13 50709+
fe2de317
MT
50710+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
50711+extern void gr_clear_learn_entries(void);
58c5fc13 50712+
fe2de317
MT
50713+#ifdef CONFIG_GRKERNSEC_RESLOG
50714+extern void gr_log_resource(const struct task_struct *task,
50715+ const int res, const unsigned long wanted, const int gt);
50716+#endif
58c5fc13 50717+
fe2de317
MT
50718+unsigned char *gr_system_salt;
50719+unsigned char *gr_system_sum;
58c5fc13 50720+
fe2de317
MT
50721+static struct sprole_pw **acl_special_roles = NULL;
50722+static __u16 num_sprole_pws = 0;
58c5fc13 50723+
fe2de317 50724+static struct acl_role_label *kernel_role = NULL;
58c5fc13 50725+
fe2de317
MT
50726+static unsigned int gr_auth_attempts = 0;
50727+static unsigned long gr_auth_expires = 0UL;
58c5fc13 50728+
fe2de317
MT
50729+#ifdef CONFIG_NET
50730+extern struct vfsmount *sock_mnt;
50731+#endif
58c5fc13 50732+
fe2de317
MT
50733+extern struct vfsmount *pipe_mnt;
50734+extern struct vfsmount *shm_mnt;
50735+#ifdef CONFIG_HUGETLBFS
50736+extern struct vfsmount *hugetlbfs_vfsmount;
50737+#endif
58c5fc13 50738+
fe2de317
MT
50739+static struct acl_object_label *fakefs_obj_rw;
50740+static struct acl_object_label *fakefs_obj_rwx;
58c5fc13 50741+
fe2de317
MT
50742+extern int gr_init_uidset(void);
50743+extern void gr_free_uidset(void);
50744+extern void gr_remove_uid(uid_t uid);
50745+extern int gr_find_uid(uid_t uid);
58c5fc13 50746+
fe2de317 50747+DECLARE_BRLOCK(vfsmount_lock);
58c5fc13 50748+
fe2de317
MT
50749+__inline__ int
50750+gr_acl_is_enabled(void)
57199397 50751+{
fe2de317
MT
50752+ return (gr_status & GR_READY);
50753+}
58c5fc13 50754+
fe2de317
MT
50755+#ifdef CONFIG_BTRFS_FS
50756+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
50757+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
50758+#endif
58c5fc13 50759+
fe2de317
MT
50760+static inline dev_t __get_dev(const struct dentry *dentry)
50761+{
50762+#ifdef CONFIG_BTRFS_FS
50763+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
50764+ return get_btrfs_dev_from_inode(dentry->d_inode);
50765+ else
50766+#endif
50767+ return dentry->d_inode->i_sb->s_dev;
58c5fc13
MT
50768+}
50769+
fe2de317 50770+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58c5fc13 50771+{
fe2de317
MT
50772+ return __get_dev(dentry);
50773+}
58c5fc13 50774+
fe2de317
MT
50775+static char gr_task_roletype_to_char(struct task_struct *task)
50776+{
50777+ switch (task->role->roletype &
50778+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
50779+ GR_ROLE_SPECIAL)) {
50780+ case GR_ROLE_DEFAULT:
50781+ return 'D';
50782+ case GR_ROLE_USER:
50783+ return 'U';
50784+ case GR_ROLE_GROUP:
50785+ return 'G';
50786+ case GR_ROLE_SPECIAL:
50787+ return 'S';
50788+ }
58c5fc13 50789+
fe2de317
MT
50790+ return 'X';
50791+}
ae4e228f 50792+
fe2de317
MT
50793+char gr_roletype_to_char(void)
50794+{
50795+ return gr_task_roletype_to_char(current);
58c5fc13 50796+}
efbe55a5 50797+
fe2de317
MT
50798+__inline__ int
50799+gr_acl_tpe_check(void)
efbe55a5 50800+{
fe2de317
MT
50801+ if (unlikely(!(gr_status & GR_READY)))
50802+ return 0;
50803+ if (current->role->roletype & GR_ROLE_TPE)
50804+ return 1;
50805+ else
50806+ return 0;
50807+}
efbe55a5 50808+
fe2de317
MT
50809+int
50810+gr_handle_rawio(const struct inode *inode)
50811+{
50812+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50813+ if (inode && S_ISBLK(inode->i_mode) &&
50814+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50815+ !capable(CAP_SYS_RAWIO))
50816+ return 1;
50817+#endif
50818+ return 0;
50819+}
efbe55a5 50820+
fe2de317
MT
50821+static int
50822+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
50823+{
50824+ if (likely(lena != lenb))
50825+ return 0;
efbe55a5 50826+
fe2de317 50827+ return !memcmp(a, b, lena);
efbe55a5
MT
50828+}
50829+
fe2de317 50830+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
efbe55a5 50831+{
fe2de317
MT
50832+ *buflen -= namelen;
50833+ if (*buflen < 0)
50834+ return -ENAMETOOLONG;
50835+ *buffer -= namelen;
50836+ memcpy(*buffer, str, namelen);
50837+ return 0;
50838+}
efbe55a5 50839+
fe2de317
MT
50840+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
50841+{
50842+ return prepend(buffer, buflen, name->name, name->len);
efbe55a5 50843+}
fe2de317
MT
50844+
50845+static int prepend_path(const struct path *path, struct path *root,
50846+ char **buffer, int *buflen)
efbe55a5 50847+{
fe2de317
MT
50848+ struct dentry *dentry = path->dentry;
50849+ struct vfsmount *vfsmnt = path->mnt;
5e856224 50850+ struct mount *mnt = real_mount(vfsmnt);
fe2de317
MT
50851+ bool slash = false;
50852+ int error = 0;
efbe55a5 50853+
fe2de317
MT
50854+ while (dentry != root->dentry || vfsmnt != root->mnt) {
50855+ struct dentry * parent;
efbe55a5 50856+
fe2de317
MT
50857+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
50858+ /* Global root? */
5e856224 50859+ if (!mnt_has_parent(mnt)) {
fe2de317
MT
50860+ goto out;
50861+ }
5e856224
MT
50862+ dentry = mnt->mnt_mountpoint;
50863+ mnt = mnt->mnt_parent;
50864+ vfsmnt = &mnt->mnt;
fe2de317
MT
50865+ continue;
50866+ }
50867+ parent = dentry->d_parent;
50868+ prefetch(parent);
50869+ spin_lock(&dentry->d_lock);
50870+ error = prepend_name(buffer, buflen, &dentry->d_name);
50871+ spin_unlock(&dentry->d_lock);
50872+ if (!error)
50873+ error = prepend(buffer, buflen, "/", 1);
50874+ if (error)
50875+ break;
efbe55a5 50876+
fe2de317
MT
50877+ slash = true;
50878+ dentry = parent;
50879+ }
efbe55a5 50880+
fe2de317
MT
50881+out:
50882+ if (!error && !slash)
50883+ error = prepend(buffer, buflen, "/", 1);
efbe55a5 50884+
fe2de317
MT
50885+ return error;
50886+}
efbe55a5 50887+
fe2de317 50888+/* this must be called with vfsmount_lock and rename_lock held */
efbe55a5 50889+
fe2de317
MT
50890+static char *__our_d_path(const struct path *path, struct path *root,
50891+ char *buf, int buflen)
50892+{
50893+ char *res = buf + buflen;
50894+ int error;
efbe55a5 50895+
fe2de317
MT
50896+ prepend(&res, &buflen, "\0", 1);
50897+ error = prepend_path(path, root, &res, &buflen);
50898+ if (error)
50899+ return ERR_PTR(error);
50900+
50901+ return res;
efbe55a5
MT
50902+}
50903+
fe2de317
MT
50904+static char *
50905+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
efbe55a5 50906+{
fe2de317 50907+ char *retval;
efbe55a5 50908+
fe2de317
MT
50909+ retval = __our_d_path(path, root, buf, buflen);
50910+ if (unlikely(IS_ERR(retval)))
50911+ retval = strcpy(buf, "<path too long>");
50912+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
50913+ retval[1] = '\0';
efbe55a5 50914+
fe2de317 50915+ return retval;
efbe55a5
MT
50916+}
50917+
fe2de317
MT
50918+static char *
50919+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50920+ char *buf, int buflen)
efbe55a5 50921+{
fe2de317
MT
50922+ struct path path;
50923+ char *res;
efbe55a5 50924+
fe2de317
MT
50925+ path.dentry = (struct dentry *)dentry;
50926+ path.mnt = (struct vfsmount *)vfsmnt;
efbe55a5 50927+
fe2de317
MT
50928+ /* we can use real_root.dentry, real_root.mnt, because this is only called
50929+ by the RBAC system */
50930+ res = gen_full_path(&path, &real_root, buf, buflen);
efbe55a5 50931+
fe2de317 50932+ return res;
efbe55a5
MT
50933+}
50934+
fe2de317
MT
50935+static char *
50936+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
50937+ char *buf, int buflen)
efbe55a5 50938+{
fe2de317
MT
50939+ char *res;
50940+ struct path path;
50941+ struct path root;
c6e2a6c8 50942+ struct task_struct *reaper = init_pid_ns.child_reaper;
fe2de317
MT
50943+
50944+ path.dentry = (struct dentry *)dentry;
50945+ path.mnt = (struct vfsmount *)vfsmnt;
50946+
50947+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
50948+ get_fs_root(reaper->fs, &root);
50949+
50950+ write_seqlock(&rename_lock);
50951+ br_read_lock(vfsmount_lock);
50952+ res = gen_full_path(&path, &root, buf, buflen);
50953+ br_read_unlock(vfsmount_lock);
50954+ write_sequnlock(&rename_lock);
50955+
50956+ path_put(&root);
50957+ return res;
50958+}
50959+
50960+static char *
50961+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50962+{
50963+ char *ret;
50964+ write_seqlock(&rename_lock);
50965+ br_read_lock(vfsmount_lock);
50966+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50967+ PAGE_SIZE);
50968+ br_read_unlock(vfsmount_lock);
50969+ write_sequnlock(&rename_lock);
50970+ return ret;
50971+}
50972+
50973+static char *
50974+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
50975+{
50976+ char *ret;
50977+ char *buf;
50978+ int buflen;
50979+
50980+ write_seqlock(&rename_lock);
50981+ br_read_lock(vfsmount_lock);
50982+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
50983+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
50984+ buflen = (int)(ret - buf);
50985+ if (buflen >= 5)
50986+ prepend(&ret, &buflen, "/proc", 5);
50987+ else
50988+ ret = strcpy(buf, "<path too long>");
50989+ br_read_unlock(vfsmount_lock);
50990+ write_sequnlock(&rename_lock);
50991+ return ret;
50992+}
50993+
50994+char *
50995+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
50996+{
50997+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
50998+ PAGE_SIZE);
50999+}
51000+
51001+char *
51002+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
51003+{
51004+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51005+ PAGE_SIZE);
51006+}
51007+
51008+char *
51009+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
51010+{
51011+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
51012+ PAGE_SIZE);
51013+}
51014+
51015+char *
51016+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
51017+{
51018+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
51019+ PAGE_SIZE);
51020+}
51021+
51022+char *
51023+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
51024+{
51025+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
51026+ PAGE_SIZE);
51027+}
51028+
51029+__inline__ __u32
51030+to_gr_audit(const __u32 reqmode)
51031+{
51032+ /* masks off auditable permission flags, then shifts them to create
51033+ auditing flags, and adds the special case of append auditing if
51034+ we're requesting write */
51035+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
51036+}
51037+
51038+struct acl_subject_label *
51039+lookup_subject_map(const struct acl_subject_label *userp)
51040+{
51041+ unsigned int index = shash(userp, subj_map_set.s_size);
51042+ struct subject_map *match;
51043+
51044+ match = subj_map_set.s_hash[index];
51045+
51046+ while (match && match->user != userp)
51047+ match = match->next;
51048+
51049+ if (match != NULL)
51050+ return match->kernel;
51051+ else
51052+ return NULL;
51053+}
51054+
51055+static void
51056+insert_subj_map_entry(struct subject_map *subjmap)
51057+{
51058+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
51059+ struct subject_map **curr;
51060+
51061+ subjmap->prev = NULL;
51062+
51063+ curr = &subj_map_set.s_hash[index];
51064+ if (*curr != NULL)
51065+ (*curr)->prev = subjmap;
51066+
51067+ subjmap->next = *curr;
51068+ *curr = subjmap;
51069+
51070+ return;
51071+}
51072+
51073+static struct acl_role_label *
51074+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
51075+ const gid_t gid)
51076+{
51077+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
51078+ struct acl_role_label *match;
51079+ struct role_allowed_ip *ipp;
51080+ unsigned int x;
51081+ u32 curr_ip = task->signal->curr_ip;
51082+
51083+ task->signal->saved_ip = curr_ip;
51084+
51085+ match = acl_role_set.r_hash[index];
51086+
51087+ while (match) {
51088+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
51089+ for (x = 0; x < match->domain_child_num; x++) {
51090+ if (match->domain_children[x] == uid)
51091+ goto found;
51092+ }
51093+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
51094+ break;
51095+ match = match->next;
51096+ }
51097+found:
51098+ if (match == NULL) {
51099+ try_group:
51100+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
51101+ match = acl_role_set.r_hash[index];
51102+
51103+ while (match) {
51104+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
51105+ for (x = 0; x < match->domain_child_num; x++) {
51106+ if (match->domain_children[x] == gid)
51107+ goto found2;
51108+ }
51109+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
51110+ break;
51111+ match = match->next;
51112+ }
51113+found2:
51114+ if (match == NULL)
51115+ match = default_role;
51116+ if (match->allowed_ips == NULL)
51117+ return match;
51118+ else {
51119+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51120+ if (likely
51121+ ((ntohl(curr_ip) & ipp->netmask) ==
51122+ (ntohl(ipp->addr) & ipp->netmask)))
51123+ return match;
51124+ }
51125+ match = default_role;
51126+ }
51127+ } else if (match->allowed_ips == NULL) {
51128+ return match;
51129+ } else {
51130+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
51131+ if (likely
51132+ ((ntohl(curr_ip) & ipp->netmask) ==
51133+ (ntohl(ipp->addr) & ipp->netmask)))
51134+ return match;
51135+ }
51136+ goto try_group;
51137+ }
51138+
51139+ return match;
51140+}
51141+
51142+struct acl_subject_label *
51143+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
51144+ const struct acl_role_label *role)
51145+{
51146+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51147+ struct acl_subject_label *match;
51148+
51149+ match = role->subj_hash[index];
51150+
51151+ while (match && (match->inode != ino || match->device != dev ||
51152+ (match->mode & GR_DELETED))) {
51153+ match = match->next;
51154+ }
51155+
51156+ if (match && !(match->mode & GR_DELETED))
51157+ return match;
51158+ else
51159+ return NULL;
51160+}
51161+
51162+struct acl_subject_label *
51163+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
51164+ const struct acl_role_label *role)
51165+{
51166+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
51167+ struct acl_subject_label *match;
51168+
51169+ match = role->subj_hash[index];
51170+
51171+ while (match && (match->inode != ino || match->device != dev ||
51172+ !(match->mode & GR_DELETED))) {
51173+ match = match->next;
51174+ }
51175+
51176+ if (match && (match->mode & GR_DELETED))
51177+ return match;
51178+ else
51179+ return NULL;
51180+}
51181+
51182+static struct acl_object_label *
51183+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
51184+ const struct acl_subject_label *subj)
51185+{
51186+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51187+ struct acl_object_label *match;
51188+
51189+ match = subj->obj_hash[index];
51190+
51191+ while (match && (match->inode != ino || match->device != dev ||
51192+ (match->mode & GR_DELETED))) {
51193+ match = match->next;
51194+ }
51195+
51196+ if (match && !(match->mode & GR_DELETED))
51197+ return match;
51198+ else
51199+ return NULL;
51200+}
51201+
51202+static struct acl_object_label *
51203+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
51204+ const struct acl_subject_label *subj)
51205+{
51206+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
51207+ struct acl_object_label *match;
51208+
51209+ match = subj->obj_hash[index];
51210+
51211+ while (match && (match->inode != ino || match->device != dev ||
51212+ !(match->mode & GR_DELETED))) {
51213+ match = match->next;
51214+ }
51215+
51216+ if (match && (match->mode & GR_DELETED))
51217+ return match;
51218+
51219+ match = subj->obj_hash[index];
51220+
51221+ while (match && (match->inode != ino || match->device != dev ||
51222+ (match->mode & GR_DELETED))) {
51223+ match = match->next;
51224+ }
51225+
51226+ if (match && !(match->mode & GR_DELETED))
51227+ return match;
51228+ else
51229+ return NULL;
51230+}
51231+
51232+static struct name_entry *
51233+lookup_name_entry(const char *name)
51234+{
51235+ unsigned int len = strlen(name);
51236+ unsigned int key = full_name_hash(name, len);
51237+ unsigned int index = key % name_set.n_size;
51238+ struct name_entry *match;
51239+
51240+ match = name_set.n_hash[index];
51241+
51242+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
51243+ match = match->next;
51244+
51245+ return match;
51246+}
51247+
51248+static struct name_entry *
51249+lookup_name_entry_create(const char *name)
51250+{
51251+ unsigned int len = strlen(name);
51252+ unsigned int key = full_name_hash(name, len);
51253+ unsigned int index = key % name_set.n_size;
51254+ struct name_entry *match;
51255+
51256+ match = name_set.n_hash[index];
51257+
51258+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51259+ !match->deleted))
51260+ match = match->next;
51261+
51262+ if (match && match->deleted)
51263+ return match;
51264+
51265+ match = name_set.n_hash[index];
51266+
51267+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
51268+ match->deleted))
51269+ match = match->next;
51270+
51271+ if (match && !match->deleted)
51272+ return match;
51273+ else
51274+ return NULL;
51275+}
51276+
51277+static struct inodev_entry *
51278+lookup_inodev_entry(const ino_t ino, const dev_t dev)
51279+{
51280+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
51281+ struct inodev_entry *match;
51282+
51283+ match = inodev_set.i_hash[index];
51284+
51285+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
51286+ match = match->next;
51287+
51288+ return match;
51289+}
51290+
51291+static void
51292+insert_inodev_entry(struct inodev_entry *entry)
51293+{
51294+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
51295+ inodev_set.i_size);
51296+ struct inodev_entry **curr;
51297+
51298+ entry->prev = NULL;
51299+
51300+ curr = &inodev_set.i_hash[index];
51301+ if (*curr != NULL)
51302+ (*curr)->prev = entry;
51303+
51304+ entry->next = *curr;
51305+ *curr = entry;
51306+
51307+ return;
51308+}
51309+
51310+static void
51311+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
51312+{
51313+ unsigned int index =
51314+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
51315+ struct acl_role_label **curr;
4c928ab7 51316+ struct acl_role_label *tmp, *tmp2;
fe2de317
MT
51317+
51318+ curr = &acl_role_set.r_hash[index];
51319+
4c928ab7
MT
51320+ /* simple case, slot is empty, just set it to our role */
51321+ if (*curr == NULL) {
51322+ *curr = role;
51323+ } else {
51324+ /* example:
51325+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
51326+ 2 -> 3
51327+ */
51328+ /* first check to see if we can already be reached via this slot */
51329+ tmp = *curr;
51330+ while (tmp && tmp != role)
fe2de317 51331+ tmp = tmp->next;
4c928ab7
MT
51332+ if (tmp == role) {
51333+ /* we don't need to add ourselves to this slot's chain */
51334+ return;
51335+ }
51336+ /* we need to add ourselves to this chain, two cases */
51337+ if (role->next == NULL) {
51338+ /* simple case, append the current chain to our role */
51339+ role->next = *curr;
51340+ *curr = role;
51341+ } else {
51342+ /* 1 -> 2 -> 3 -> 4
51343+ 2 -> 3 -> 4
51344+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
51345+ */
51346+ /* trickier case: walk our role's chain until we find
51347+ the role for the start of the current slot's chain */
51348+ tmp = role;
51349+ tmp2 = *curr;
51350+ while (tmp->next && tmp->next != tmp2)
51351+ tmp = tmp->next;
51352+ if (tmp->next == tmp2) {
51353+ /* from example above, we found 3, so just
51354+ replace this slot's chain with ours */
51355+ *curr = role;
51356+ } else {
51357+ /* we didn't find a subset of our role's chain
51358+ in the current slot's chain, so append their
51359+ chain to ours, and set us as the first role in
51360+ the slot's chain
51361+
51362+ we could fold this case with the case above,
51363+ but making it explicit for clarity
51364+ */
51365+ tmp->next = tmp2;
51366+ *curr = role;
51367+ }
51368+ }
51369+ }
fe2de317
MT
51370+
51371+ return;
51372+}
51373+
51374+static void
51375+insert_acl_role_label(struct acl_role_label *role)
51376+{
51377+ int i;
51378+
51379+ if (role_list == NULL) {
51380+ role_list = role;
51381+ role->prev = NULL;
51382+ } else {
51383+ role->prev = role_list;
51384+ role_list = role;
51385+ }
51386+
51387+ /* used for hash chains */
51388+ role->next = NULL;
51389+
51390+ if (role->roletype & GR_ROLE_DOMAIN) {
51391+ for (i = 0; i < role->domain_child_num; i++)
51392+ __insert_acl_role_label(role, role->domain_children[i]);
51393+ } else
51394+ __insert_acl_role_label(role, role->uidgid);
51395+}
51396+
51397+static int
51398+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
51399+{
51400+ struct name_entry **curr, *nentry;
51401+ struct inodev_entry *ientry;
51402+ unsigned int len = strlen(name);
51403+ unsigned int key = full_name_hash(name, len);
51404+ unsigned int index = key % name_set.n_size;
51405+
51406+ curr = &name_set.n_hash[index];
51407+
51408+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
51409+ curr = &((*curr)->next);
51410+
51411+ if (*curr != NULL)
51412+ return 1;
51413+
51414+ nentry = acl_alloc(sizeof (struct name_entry));
51415+ if (nentry == NULL)
51416+ return 0;
51417+ ientry = acl_alloc(sizeof (struct inodev_entry));
51418+ if (ientry == NULL)
51419+ return 0;
51420+ ientry->nentry = nentry;
51421+
51422+ nentry->key = key;
51423+ nentry->name = name;
51424+ nentry->inode = inode;
51425+ nentry->device = device;
51426+ nentry->len = len;
51427+ nentry->deleted = deleted;
51428+
51429+ nentry->prev = NULL;
51430+ curr = &name_set.n_hash[index];
51431+ if (*curr != NULL)
51432+ (*curr)->prev = nentry;
51433+ nentry->next = *curr;
51434+ *curr = nentry;
51435+
51436+ /* insert us into the table searchable by inode/dev */
51437+ insert_inodev_entry(ientry);
51438+
51439+ return 1;
51440+}
51441+
51442+static void
51443+insert_acl_obj_label(struct acl_object_label *obj,
51444+ struct acl_subject_label *subj)
51445+{
51446+ unsigned int index =
51447+ fhash(obj->inode, obj->device, subj->obj_hash_size);
51448+ struct acl_object_label **curr;
51449+
51450+
51451+ obj->prev = NULL;
51452+
51453+ curr = &subj->obj_hash[index];
51454+ if (*curr != NULL)
51455+ (*curr)->prev = obj;
51456+
51457+ obj->next = *curr;
51458+ *curr = obj;
51459+
51460+ return;
51461+}
51462+
51463+static void
51464+insert_acl_subj_label(struct acl_subject_label *obj,
51465+ struct acl_role_label *role)
51466+{
51467+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
51468+ struct acl_subject_label **curr;
51469+
51470+ obj->prev = NULL;
51471+
51472+ curr = &role->subj_hash[index];
51473+ if (*curr != NULL)
51474+ (*curr)->prev = obj;
51475+
51476+ obj->next = *curr;
51477+ *curr = obj;
51478+
51479+ return;
51480+}
51481+
51482+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
51483+
51484+static void *
51485+create_table(__u32 * len, int elementsize)
51486+{
51487+ unsigned int table_sizes[] = {
efbe55a5
MT
51488+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
51489+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
51490+ 4194301, 8388593, 16777213, 33554393, 67108859
51491+ };
51492+ void *newtable = NULL;
51493+ unsigned int pwr = 0;
51494+
51495+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
51496+ table_sizes[pwr] <= *len)
51497+ pwr++;
51498+
51499+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
51500+ return newtable;
51501+
51502+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
51503+ newtable =
51504+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
51505+ else
51506+ newtable = vmalloc(table_sizes[pwr] * elementsize);
51507+
51508+ *len = table_sizes[pwr];
51509+
51510+ return newtable;
51511+}
51512+
51513+static int
51514+init_variables(const struct gr_arg *arg)
51515+{
c6e2a6c8 51516+ struct task_struct *reaper = init_pid_ns.child_reaper;
efbe55a5 51517+ unsigned int stacksize;
58c5fc13
MT
51518+
51519+ subj_map_set.s_size = arg->role_db.num_subjects;
51520+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
51521+ name_set.n_size = arg->role_db.num_objects;
51522+ inodev_set.i_size = arg->role_db.num_objects;
51523+
51524+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
51525+ !name_set.n_size || !inodev_set.i_size)
51526+ return 1;
51527+
51528+ if (!gr_init_uidset())
51529+ return 1;
51530+
51531+ /* set up the stack that holds allocation info */
51532+
51533+ stacksize = arg->role_db.num_pointers + 5;
51534+
51535+ if (!acl_alloc_stack_init(stacksize))
51536+ return 1;
51537+
51538+ /* grab reference for the real root dentry and vfsmount */
ea610fa8 51539+ get_fs_root(reaper->fs, &real_root);
58c5fc13 51540+
16454cff
MT
51541+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
51542+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
51543+#endif
51544+
15a11c5b
MT
51545+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
51546+ if (fakefs_obj_rw == NULL)
51547+ return 1;
51548+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
51549+
51550+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
51551+ if (fakefs_obj_rwx == NULL)
58c5fc13 51552+ return 1;
15a11c5b 51553+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
58c5fc13
MT
51554+
51555+ subj_map_set.s_hash =
51556+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
51557+ acl_role_set.r_hash =
51558+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
51559+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
51560+ inodev_set.i_hash =
51561+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
51562+
51563+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
51564+ !name_set.n_hash || !inodev_set.i_hash)
51565+ return 1;
51566+
51567+ memset(subj_map_set.s_hash, 0,
51568+ sizeof(struct subject_map *) * subj_map_set.s_size);
51569+ memset(acl_role_set.r_hash, 0,
51570+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
51571+ memset(name_set.n_hash, 0,
51572+ sizeof (struct name_entry *) * name_set.n_size);
51573+ memset(inodev_set.i_hash, 0,
51574+ sizeof (struct inodev_entry *) * inodev_set.i_size);
51575+
51576+ return 0;
51577+}
51578+
51579+/* free information not needed after startup
51580+ currently contains user->kernel pointer mappings for subjects
51581+*/
51582+
51583+static void
51584+free_init_variables(void)
51585+{
51586+ __u32 i;
51587+
51588+ if (subj_map_set.s_hash) {
51589+ for (i = 0; i < subj_map_set.s_size; i++) {
51590+ if (subj_map_set.s_hash[i]) {
51591+ kfree(subj_map_set.s_hash[i]);
51592+ subj_map_set.s_hash[i] = NULL;
51593+ }
51594+ }
51595+
51596+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
51597+ PAGE_SIZE)
51598+ kfree(subj_map_set.s_hash);
51599+ else
51600+ vfree(subj_map_set.s_hash);
51601+ }
51602+
51603+ return;
51604+}
51605+
51606+static void
51607+free_variables(void)
51608+{
51609+ struct acl_subject_label *s;
51610+ struct acl_role_label *r;
51611+ struct task_struct *task, *task2;
ae4e228f 51612+ unsigned int x;
58c5fc13
MT
51613+
51614+ gr_clear_learn_entries();
51615+
51616+ read_lock(&tasklist_lock);
51617+ do_each_thread(task2, task) {
51618+ task->acl_sp_role = 0;
51619+ task->acl_role_id = 0;
51620+ task->acl = NULL;
51621+ task->role = NULL;
51622+ } while_each_thread(task2, task);
51623+ read_unlock(&tasklist_lock);
51624+
51625+ /* release the reference to the real root dentry and vfsmount */
6892158b 51626+ path_put(&real_root);
4c928ab7 51627+ memset(&real_root, 0, sizeof(real_root));
58c5fc13
MT
51628+
51629+ /* free all object hash tables */
51630+
ae4e228f 51631+ FOR_EACH_ROLE_START(r)
58c5fc13 51632+ if (r->subj_hash == NULL)
ae4e228f 51633+ goto next_role;
58c5fc13
MT
51634+ FOR_EACH_SUBJECT_START(r, s, x)
51635+ if (s->obj_hash == NULL)
51636+ break;
51637+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51638+ kfree(s->obj_hash);
51639+ else
51640+ vfree(s->obj_hash);
51641+ FOR_EACH_SUBJECT_END(s, x)
51642+ FOR_EACH_NESTED_SUBJECT_START(r, s)
51643+ if (s->obj_hash == NULL)
51644+ break;
51645+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
51646+ kfree(s->obj_hash);
51647+ else
51648+ vfree(s->obj_hash);
51649+ FOR_EACH_NESTED_SUBJECT_END(s)
51650+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
51651+ kfree(r->subj_hash);
51652+ else
51653+ vfree(r->subj_hash);
51654+ r->subj_hash = NULL;
ae4e228f
MT
51655+next_role:
51656+ FOR_EACH_ROLE_END(r)
58c5fc13
MT
51657+
51658+ acl_free_all();
51659+
51660+ if (acl_role_set.r_hash) {
51661+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
51662+ PAGE_SIZE)
51663+ kfree(acl_role_set.r_hash);
51664+ else
51665+ vfree(acl_role_set.r_hash);
51666+ }
51667+ if (name_set.n_hash) {
51668+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
51669+ PAGE_SIZE)
51670+ kfree(name_set.n_hash);
51671+ else
51672+ vfree(name_set.n_hash);
51673+ }
51674+
51675+ if (inodev_set.i_hash) {
51676+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
51677+ PAGE_SIZE)
51678+ kfree(inodev_set.i_hash);
51679+ else
51680+ vfree(inodev_set.i_hash);
51681+ }
51682+
51683+ gr_free_uidset();
51684+
51685+ memset(&name_set, 0, sizeof (struct name_db));
51686+ memset(&inodev_set, 0, sizeof (struct inodev_db));
51687+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
51688+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
51689+
51690+ default_role = NULL;
4c928ab7 51691+ kernel_role = NULL;
ae4e228f 51692+ role_list = NULL;
58c5fc13
MT
51693+
51694+ return;
51695+}
51696+
51697+static __u32
51698+count_user_objs(struct acl_object_label *userp)
51699+{
51700+ struct acl_object_label o_tmp;
51701+ __u32 num = 0;
51702+
51703+ while (userp) {
51704+ if (copy_from_user(&o_tmp, userp,
51705+ sizeof (struct acl_object_label)))
51706+ break;
51707+
51708+ userp = o_tmp.prev;
51709+ num++;
51710+ }
51711+
51712+ return num;
51713+}
51714+
51715+static struct acl_subject_label *
51716+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
51717+
51718+static int
51719+copy_user_glob(struct acl_object_label *obj)
51720+{
51721+ struct acl_object_label *g_tmp, **guser;
51722+ unsigned int len;
51723+ char *tmp;
51724+
51725+ if (obj->globbed == NULL)
51726+ return 0;
51727+
51728+ guser = &obj->globbed;
51729+ while (*guser) {
51730+ g_tmp = (struct acl_object_label *)
51731+ acl_alloc(sizeof (struct acl_object_label));
51732+ if (g_tmp == NULL)
51733+ return -ENOMEM;
51734+
51735+ if (copy_from_user(g_tmp, *guser,
51736+ sizeof (struct acl_object_label)))
51737+ return -EFAULT;
51738+
51739+ len = strnlen_user(g_tmp->filename, PATH_MAX);
51740+
51741+ if (!len || len >= PATH_MAX)
51742+ return -EINVAL;
51743+
51744+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51745+ return -ENOMEM;
51746+
51747+ if (copy_from_user(tmp, g_tmp->filename, len))
51748+ return -EFAULT;
51749+ tmp[len-1] = '\0';
51750+ g_tmp->filename = tmp;
51751+
51752+ *guser = g_tmp;
51753+ guser = &(g_tmp->next);
51754+ }
51755+
51756+ return 0;
51757+}
51758+
51759+static int
51760+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
51761+ struct acl_role_label *role)
51762+{
51763+ struct acl_object_label *o_tmp;
51764+ unsigned int len;
51765+ int ret;
51766+ char *tmp;
51767+
51768+ while (userp) {
51769+ if ((o_tmp = (struct acl_object_label *)
51770+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
51771+ return -ENOMEM;
51772+
51773+ if (copy_from_user(o_tmp, userp,
51774+ sizeof (struct acl_object_label)))
51775+ return -EFAULT;
51776+
51777+ userp = o_tmp->prev;
51778+
51779+ len = strnlen_user(o_tmp->filename, PATH_MAX);
51780+
51781+ if (!len || len >= PATH_MAX)
51782+ return -EINVAL;
51783+
51784+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51785+ return -ENOMEM;
51786+
51787+ if (copy_from_user(tmp, o_tmp->filename, len))
51788+ return -EFAULT;
51789+ tmp[len-1] = '\0';
51790+ o_tmp->filename = tmp;
51791+
51792+ insert_acl_obj_label(o_tmp, subj);
51793+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
51794+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
51795+ return -ENOMEM;
51796+
51797+ ret = copy_user_glob(o_tmp);
51798+ if (ret)
51799+ return ret;
51800+
51801+ if (o_tmp->nested) {
51802+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
51803+ if (IS_ERR(o_tmp->nested))
51804+ return PTR_ERR(o_tmp->nested);
51805+
51806+ /* insert into nested subject list */
51807+ o_tmp->nested->next = role->hash->first;
51808+ role->hash->first = o_tmp->nested;
51809+ }
51810+ }
51811+
51812+ return 0;
51813+}
51814+
51815+static __u32
51816+count_user_subjs(struct acl_subject_label *userp)
51817+{
51818+ struct acl_subject_label s_tmp;
51819+ __u32 num = 0;
51820+
51821+ while (userp) {
51822+ if (copy_from_user(&s_tmp, userp,
51823+ sizeof (struct acl_subject_label)))
51824+ break;
51825+
51826+ userp = s_tmp.prev;
51827+ /* do not count nested subjects against this count, since
51828+ they are not included in the hash table, but are
51829+ attached to objects. We have already counted
51830+ the subjects in userspace for the allocation
51831+ stack
51832+ */
51833+ if (!(s_tmp.mode & GR_NESTED))
51834+ num++;
51835+ }
51836+
51837+ return num;
51838+}
51839+
51840+static int
51841+copy_user_allowedips(struct acl_role_label *rolep)
51842+{
51843+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
51844+
51845+ ruserip = rolep->allowed_ips;
51846+
51847+ while (ruserip) {
51848+ rlast = rtmp;
51849+
51850+ if ((rtmp = (struct role_allowed_ip *)
51851+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
51852+ return -ENOMEM;
51853+
51854+ if (copy_from_user(rtmp, ruserip,
51855+ sizeof (struct role_allowed_ip)))
51856+ return -EFAULT;
51857+
51858+ ruserip = rtmp->prev;
51859+
51860+ if (!rlast) {
51861+ rtmp->prev = NULL;
51862+ rolep->allowed_ips = rtmp;
51863+ } else {
51864+ rlast->next = rtmp;
51865+ rtmp->prev = rlast;
51866+ }
51867+
51868+ if (!ruserip)
51869+ rtmp->next = NULL;
51870+ }
51871+
51872+ return 0;
51873+}
51874+
51875+static int
51876+copy_user_transitions(struct acl_role_label *rolep)
51877+{
51878+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
51879+
51880+ unsigned int len;
51881+ char *tmp;
51882+
51883+ rusertp = rolep->transitions;
51884+
51885+ while (rusertp) {
51886+ rlast = rtmp;
51887+
51888+ if ((rtmp = (struct role_transition *)
51889+ acl_alloc(sizeof (struct role_transition))) == NULL)
51890+ return -ENOMEM;
51891+
51892+ if (copy_from_user(rtmp, rusertp,
51893+ sizeof (struct role_transition)))
51894+ return -EFAULT;
51895+
51896+ rusertp = rtmp->prev;
51897+
51898+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
51899+
51900+ if (!len || len >= GR_SPROLE_LEN)
51901+ return -EINVAL;
51902+
51903+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51904+ return -ENOMEM;
51905+
51906+ if (copy_from_user(tmp, rtmp->rolename, len))
51907+ return -EFAULT;
51908+ tmp[len-1] = '\0';
51909+ rtmp->rolename = tmp;
51910+
51911+ if (!rlast) {
51912+ rtmp->prev = NULL;
51913+ rolep->transitions = rtmp;
51914+ } else {
51915+ rlast->next = rtmp;
51916+ rtmp->prev = rlast;
51917+ }
51918+
51919+ if (!rusertp)
51920+ rtmp->next = NULL;
51921+ }
51922+
51923+ return 0;
51924+}
51925+
51926+static struct acl_subject_label *
51927+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
51928+{
51929+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
51930+ unsigned int len;
51931+ char *tmp;
51932+ __u32 num_objs;
51933+ struct acl_ip_label **i_tmp, *i_utmp2;
51934+ struct gr_hash_struct ghash;
51935+ struct subject_map *subjmap;
51936+ unsigned int i_num;
51937+ int err;
51938+
51939+ s_tmp = lookup_subject_map(userp);
51940+
51941+ /* we've already copied this subject into the kernel, just return
51942+ the reference to it, and don't copy it over again
51943+ */
51944+ if (s_tmp)
51945+ return(s_tmp);
51946+
51947+ if ((s_tmp = (struct acl_subject_label *)
51948+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
51949+ return ERR_PTR(-ENOMEM);
51950+
51951+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
51952+ if (subjmap == NULL)
51953+ return ERR_PTR(-ENOMEM);
51954+
51955+ subjmap->user = userp;
51956+ subjmap->kernel = s_tmp;
51957+ insert_subj_map_entry(subjmap);
51958+
51959+ if (copy_from_user(s_tmp, userp,
51960+ sizeof (struct acl_subject_label)))
51961+ return ERR_PTR(-EFAULT);
51962+
51963+ len = strnlen_user(s_tmp->filename, PATH_MAX);
51964+
51965+ if (!len || len >= PATH_MAX)
51966+ return ERR_PTR(-EINVAL);
51967+
51968+ if ((tmp = (char *) acl_alloc(len)) == NULL)
51969+ return ERR_PTR(-ENOMEM);
51970+
51971+ if (copy_from_user(tmp, s_tmp->filename, len))
51972+ return ERR_PTR(-EFAULT);
51973+ tmp[len-1] = '\0';
51974+ s_tmp->filename = tmp;
51975+
51976+ if (!strcmp(s_tmp->filename, "/"))
51977+ role->root_label = s_tmp;
51978+
51979+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
51980+ return ERR_PTR(-EFAULT);
51981+
51982+ /* copy user and group transition tables */
51983+
51984+ if (s_tmp->user_trans_num) {
51985+ uid_t *uidlist;
51986+
51987+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
51988+ if (uidlist == NULL)
51989+ return ERR_PTR(-ENOMEM);
51990+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
51991+ return ERR_PTR(-EFAULT);
51992+
51993+ s_tmp->user_transitions = uidlist;
51994+ }
51995+
51996+ if (s_tmp->group_trans_num) {
51997+ gid_t *gidlist;
51998+
51999+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
52000+ if (gidlist == NULL)
52001+ return ERR_PTR(-ENOMEM);
52002+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
52003+ return ERR_PTR(-EFAULT);
52004+
52005+ s_tmp->group_transitions = gidlist;
52006+ }
52007+
52008+ /* set up object hash table */
52009+ num_objs = count_user_objs(ghash.first);
52010+
52011+ s_tmp->obj_hash_size = num_objs;
52012+ s_tmp->obj_hash =
52013+ (struct acl_object_label **)
52014+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
52015+
52016+ if (!s_tmp->obj_hash)
52017+ return ERR_PTR(-ENOMEM);
52018+
52019+ memset(s_tmp->obj_hash, 0,
52020+ s_tmp->obj_hash_size *
52021+ sizeof (struct acl_object_label *));
52022+
52023+ /* add in objects */
52024+ err = copy_user_objs(ghash.first, s_tmp, role);
52025+
52026+ if (err)
52027+ return ERR_PTR(err);
52028+
52029+ /* set pointer for parent subject */
52030+ if (s_tmp->parent_subject) {
52031+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
52032+
52033+ if (IS_ERR(s_tmp2))
52034+ return s_tmp2;
52035+
52036+ s_tmp->parent_subject = s_tmp2;
52037+ }
52038+
52039+ /* add in ip acls */
52040+
52041+ if (!s_tmp->ip_num) {
52042+ s_tmp->ips = NULL;
52043+ goto insert;
52044+ }
52045+
52046+ i_tmp =
52047+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
52048+ sizeof (struct acl_ip_label *));
52049+
52050+ if (!i_tmp)
52051+ return ERR_PTR(-ENOMEM);
52052+
52053+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
52054+ *(i_tmp + i_num) =
52055+ (struct acl_ip_label *)
52056+ acl_alloc(sizeof (struct acl_ip_label));
52057+ if (!*(i_tmp + i_num))
52058+ return ERR_PTR(-ENOMEM);
52059+
52060+ if (copy_from_user
52061+ (&i_utmp2, s_tmp->ips + i_num,
52062+ sizeof (struct acl_ip_label *)))
52063+ return ERR_PTR(-EFAULT);
52064+
52065+ if (copy_from_user
52066+ (*(i_tmp + i_num), i_utmp2,
52067+ sizeof (struct acl_ip_label)))
52068+ return ERR_PTR(-EFAULT);
52069+
52070+ if ((*(i_tmp + i_num))->iface == NULL)
52071+ continue;
52072+
52073+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
52074+ if (!len || len >= IFNAMSIZ)
52075+ return ERR_PTR(-EINVAL);
52076+ tmp = acl_alloc(len);
52077+ if (tmp == NULL)
52078+ return ERR_PTR(-ENOMEM);
52079+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
52080+ return ERR_PTR(-EFAULT);
52081+ (*(i_tmp + i_num))->iface = tmp;
52082+ }
52083+
52084+ s_tmp->ips = i_tmp;
52085+
52086+insert:
52087+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
52088+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
52089+ return ERR_PTR(-ENOMEM);
52090+
52091+ return s_tmp;
52092+}
52093+
52094+static int
52095+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
52096+{
52097+ struct acl_subject_label s_pre;
52098+ struct acl_subject_label * ret;
52099+ int err;
52100+
52101+ while (userp) {
52102+ if (copy_from_user(&s_pre, userp,
52103+ sizeof (struct acl_subject_label)))
52104+ return -EFAULT;
52105+
52106+ /* do not add nested subjects here, add
52107+ while parsing objects
52108+ */
52109+
52110+ if (s_pre.mode & GR_NESTED) {
52111+ userp = s_pre.prev;
52112+ continue;
52113+ }
52114+
52115+ ret = do_copy_user_subj(userp, role);
52116+
52117+ err = PTR_ERR(ret);
52118+ if (IS_ERR(ret))
52119+ return err;
52120+
52121+ insert_acl_subj_label(ret, role);
52122+
52123+ userp = s_pre.prev;
52124+ }
52125+
52126+ return 0;
52127+}
52128+
52129+static int
52130+copy_user_acl(struct gr_arg *arg)
52131+{
52132+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
52133+ struct sprole_pw *sptmp;
52134+ struct gr_hash_struct *ghash;
52135+ uid_t *domainlist;
52136+ unsigned int r_num;
52137+ unsigned int len;
52138+ char *tmp;
52139+ int err = 0;
52140+ __u16 i;
52141+ __u32 num_subjs;
52142+
52143+ /* we need a default and kernel role */
52144+ if (arg->role_db.num_roles < 2)
52145+ return -EINVAL;
52146+
52147+ /* copy special role authentication info from userspace */
52148+
52149+ num_sprole_pws = arg->num_sprole_pws;
52150+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
52151+
4c928ab7
MT
52152+ if (!acl_special_roles && num_sprole_pws)
52153+ return -ENOMEM;
58c5fc13
MT
52154+
52155+ for (i = 0; i < num_sprole_pws; i++) {
52156+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
4c928ab7
MT
52157+ if (!sptmp)
52158+ return -ENOMEM;
58c5fc13 52159+ if (copy_from_user(sptmp, arg->sprole_pws + i,
4c928ab7
MT
52160+ sizeof (struct sprole_pw)))
52161+ return -EFAULT;
58c5fc13 52162+
4c928ab7 52163+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
58c5fc13 52164+
4c928ab7
MT
52165+ if (!len || len >= GR_SPROLE_LEN)
52166+ return -EINVAL;
58c5fc13 52167+
4c928ab7
MT
52168+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52169+ return -ENOMEM;
52170+
52171+ if (copy_from_user(tmp, sptmp->rolename, len))
52172+ return -EFAULT;
58c5fc13 52173+
58c5fc13 52174+ tmp[len-1] = '\0';
16454cff 52175+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58c5fc13
MT
52176+ printk(KERN_ALERT "Copying special role %s\n", tmp);
52177+#endif
52178+ sptmp->rolename = tmp;
52179+ acl_special_roles[i] = sptmp;
52180+ }
52181+
52182+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
52183+
52184+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
52185+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
52186+
4c928ab7
MT
52187+ if (!r_tmp)
52188+ return -ENOMEM;
58c5fc13
MT
52189+
52190+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
4c928ab7
MT
52191+ sizeof (struct acl_role_label *)))
52192+ return -EFAULT;
58c5fc13
MT
52193+
52194+ if (copy_from_user(r_tmp, r_utmp2,
4c928ab7
MT
52195+ sizeof (struct acl_role_label)))
52196+ return -EFAULT;
58c5fc13
MT
52197+
52198+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
52199+
4c928ab7
MT
52200+ if (!len || len >= PATH_MAX)
52201+ return -EINVAL;
52202+
52203+ if ((tmp = (char *) acl_alloc(len)) == NULL)
52204+ return -ENOMEM;
52205+
52206+ if (copy_from_user(tmp, r_tmp->rolename, len))
52207+ return -EFAULT;
58c5fc13 52208+
58c5fc13
MT
52209+ tmp[len-1] = '\0';
52210+ r_tmp->rolename = tmp;
52211+
52212+ if (!strcmp(r_tmp->rolename, "default")
52213+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
52214+ default_role = r_tmp;
52215+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
52216+ kernel_role = r_tmp;
52217+ }
52218+
4c928ab7
MT
52219+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
52220+ return -ENOMEM;
52221+
52222+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
52223+ return -EFAULT;
58c5fc13
MT
52224+
52225+ r_tmp->hash = ghash;
52226+
52227+ num_subjs = count_user_subjs(r_tmp->hash->first);
52228+
52229+ r_tmp->subj_hash_size = num_subjs;
52230+ r_tmp->subj_hash =
52231+ (struct acl_subject_label **)
52232+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
52233+
4c928ab7
MT
52234+ if (!r_tmp->subj_hash)
52235+ return -ENOMEM;
58c5fc13
MT
52236+
52237+ err = copy_user_allowedips(r_tmp);
52238+ if (err)
4c928ab7 52239+ return err;
58c5fc13
MT
52240+
52241+ /* copy domain info */
52242+ if (r_tmp->domain_children != NULL) {
52243+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
4c928ab7
MT
52244+ if (domainlist == NULL)
52245+ return -ENOMEM;
52246+
52247+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
52248+ return -EFAULT;
52249+
58c5fc13
MT
52250+ r_tmp->domain_children = domainlist;
52251+ }
52252+
52253+ err = copy_user_transitions(r_tmp);
52254+ if (err)
4c928ab7 52255+ return err;
58c5fc13
MT
52256+
52257+ memset(r_tmp->subj_hash, 0,
52258+ r_tmp->subj_hash_size *
52259+ sizeof (struct acl_subject_label *));
52260+
52261+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
52262+
52263+ if (err)
4c928ab7 52264+ return err;
58c5fc13
MT
52265+
52266+ /* set nested subject list to null */
52267+ r_tmp->hash->first = NULL;
52268+
52269+ insert_acl_role_label(r_tmp);
52270+ }
52271+
4c928ab7
MT
52272+ if (default_role == NULL || kernel_role == NULL)
52273+ return -EINVAL;
58c5fc13 52274+
4c928ab7 52275+ return err;
58c5fc13
MT
52276+}
52277+
52278+static int
52279+gracl_init(struct gr_arg *args)
52280+{
52281+ int error = 0;
52282+
52283+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
52284+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
52285+
52286+ if (init_variables(args)) {
52287+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
52288+ error = -ENOMEM;
52289+ free_variables();
52290+ goto out;
52291+ }
52292+
52293+ error = copy_user_acl(args);
52294+ free_init_variables();
52295+ if (error) {
52296+ free_variables();
52297+ goto out;
52298+ }
52299+
52300+ if ((error = gr_set_acls(0))) {
52301+ free_variables();
52302+ goto out;
52303+ }
52304+
ae4e228f 52305+ pax_open_kernel();
58c5fc13 52306+ gr_status |= GR_READY;
ae4e228f 52307+ pax_close_kernel();
58c5fc13
MT
52308+
52309+ out:
52310+ return error;
52311+}
52312+
52313+/* derived from glibc fnmatch() 0: match, 1: no match*/
52314+
52315+static int
52316+glob_match(const char *p, const char *n)
52317+{
52318+ char c;
52319+
52320+ while ((c = *p++) != '\0') {
52321+ switch (c) {
52322+ case '?':
52323+ if (*n == '\0')
52324+ return 1;
52325+ else if (*n == '/')
52326+ return 1;
52327+ break;
52328+ case '\\':
52329+ if (*n != c)
52330+ return 1;
52331+ break;
52332+ case '*':
52333+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
52334+ if (*n == '/')
52335+ return 1;
52336+ else if (c == '?') {
52337+ if (*n == '\0')
52338+ return 1;
52339+ else
52340+ ++n;
52341+ }
52342+ }
52343+ if (c == '\0') {
52344+ return 0;
52345+ } else {
52346+ const char *endp;
52347+
52348+ if ((endp = strchr(n, '/')) == NULL)
52349+ endp = n + strlen(n);
52350+
52351+ if (c == '[') {
52352+ for (--p; n < endp; ++n)
52353+ if (!glob_match(p, n))
52354+ return 0;
52355+ } else if (c == '/') {
52356+ while (*n != '\0' && *n != '/')
52357+ ++n;
52358+ if (*n == '/' && !glob_match(p, n + 1))
52359+ return 0;
52360+ } else {
52361+ for (--p; n < endp; ++n)
52362+ if (*n == c && !glob_match(p, n))
52363+ return 0;
52364+ }
52365+
52366+ return 1;
52367+ }
52368+ case '[':
52369+ {
52370+ int not;
52371+ char cold;
52372+
52373+ if (*n == '\0' || *n == '/')
52374+ return 1;
52375+
52376+ not = (*p == '!' || *p == '^');
52377+ if (not)
52378+ ++p;
52379+
52380+ c = *p++;
52381+ for (;;) {
52382+ unsigned char fn = (unsigned char)*n;
52383+
52384+ if (c == '\0')
52385+ return 1;
52386+ else {
52387+ if (c == fn)
52388+ goto matched;
52389+ cold = c;
52390+ c = *p++;
52391+
52392+ if (c == '-' && *p != ']') {
52393+ unsigned char cend = *p++;
52394+
52395+ if (cend == '\0')
52396+ return 1;
52397+
52398+ if (cold <= fn && fn <= cend)
52399+ goto matched;
52400+
52401+ c = *p++;
52402+ }
52403+ }
52404+
52405+ if (c == ']')
52406+ break;
52407+ }
52408+ if (!not)
52409+ return 1;
52410+ break;
52411+ matched:
52412+ while (c != ']') {
52413+ if (c == '\0')
52414+ return 1;
52415+
52416+ c = *p++;
52417+ }
52418+ if (not)
52419+ return 1;
52420+ }
52421+ break;
52422+ default:
52423+ if (c != *n)
52424+ return 1;
52425+ }
52426+
52427+ ++n;
52428+ }
52429+
52430+ if (*n == '\0')
52431+ return 0;
52432+
52433+ if (*n == '/')
52434+ return 0;
52435+
52436+ return 1;
52437+}
52438+
52439+static struct acl_object_label *
52440+chk_glob_label(struct acl_object_label *globbed,
4c928ab7 52441+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
58c5fc13
MT
52442+{
52443+ struct acl_object_label *tmp;
52444+
52445+ if (*path == NULL)
52446+ *path = gr_to_filename_nolock(dentry, mnt);
52447+
52448+ tmp = globbed;
52449+
52450+ while (tmp) {
52451+ if (!glob_match(tmp->filename, *path))
52452+ return tmp;
52453+ tmp = tmp->next;
52454+ }
52455+
52456+ return NULL;
52457+}
52458+
52459+static struct acl_object_label *
52460+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
52461+ const ino_t curr_ino, const dev_t curr_dev,
52462+ const struct acl_subject_label *subj, char **path, const int checkglob)
52463+{
52464+ struct acl_subject_label *tmpsubj;
52465+ struct acl_object_label *retval;
52466+ struct acl_object_label *retval2;
52467+
52468+ tmpsubj = (struct acl_subject_label *) subj;
52469+ read_lock(&gr_inode_lock);
52470+ do {
52471+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
52472+ if (retval) {
52473+ if (checkglob && retval->globbed) {
4c928ab7 52474+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
58c5fc13
MT
52475+ if (retval2)
52476+ retval = retval2;
52477+ }
52478+ break;
52479+ }
52480+ } while ((tmpsubj = tmpsubj->parent_subject));
52481+ read_unlock(&gr_inode_lock);
52482+
52483+ return retval;
52484+}
52485+
52486+static __inline__ struct acl_object_label *
52487+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
16454cff 52488+ struct dentry *curr_dentry,
58c5fc13
MT
52489+ const struct acl_subject_label *subj, char **path, const int checkglob)
52490+{
bc901d79 52491+ int newglob = checkglob;
16454cff
MT
52492+ ino_t inode;
52493+ dev_t device;
bc901d79
MT
52494+
52495+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
52496+ as we don't want a / * rule to match instead of the / object
52497+ don't do this for create lookups that call this function though, since they're looking up
52498+ on the parent and thus need globbing checks on all paths
52499+ */
52500+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
52501+ newglob = GR_NO_GLOB;
52502+
16454cff
MT
52503+ spin_lock(&curr_dentry->d_lock);
52504+ inode = curr_dentry->d_inode->i_ino;
52505+ device = __get_dev(curr_dentry);
52506+ spin_unlock(&curr_dentry->d_lock);
52507+
52508+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
58c5fc13
MT
52509+}
52510+
52511+static struct acl_object_label *
52512+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52513+ const struct acl_subject_label *subj, char *path, const int checkglob)
52514+{
52515+ struct dentry *dentry = (struct dentry *) l_dentry;
52516+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
5e856224 52517+ struct mount *real_mnt = real_mount(mnt);
58c5fc13 52518+ struct acl_object_label *retval;
16454cff 52519+ struct dentry *parent;
58c5fc13 52520+
16454cff 52521+ write_seqlock(&rename_lock);
bc901d79 52522+ br_read_lock(vfsmount_lock);
58c5fc13 52523+
15a11c5b
MT
52524+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
52525+#ifdef CONFIG_NET
52526+ mnt == sock_mnt ||
52527+#endif
df50ba0c 52528+#ifdef CONFIG_HUGETLBFS
71d190be 52529+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
df50ba0c 52530+#endif
58c5fc13
MT
52531+ /* ignore Eric Biederman */
52532+ IS_PRIVATE(l_dentry->d_inode))) {
15a11c5b 52533+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58c5fc13
MT
52534+ goto out;
52535+ }
52536+
52537+ for (;;) {
6892158b 52538+ if (dentry == real_root.dentry && mnt == real_root.mnt)
58c5fc13
MT
52539+ break;
52540+
52541+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
5e856224 52542+ if (!mnt_has_parent(real_mnt))
58c5fc13
MT
52543+ break;
52544+
52545+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52546+ if (retval != NULL)
52547+ goto out;
52548+
5e856224
MT
52549+ dentry = real_mnt->mnt_mountpoint;
52550+ real_mnt = real_mnt->mnt_parent;
52551+ mnt = &real_mnt->mnt;
58c5fc13
MT
52552+ continue;
52553+ }
52554+
16454cff 52555+ parent = dentry->d_parent;
58c5fc13
MT
52556+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52557+ if (retval != NULL)
52558+ goto out;
52559+
16454cff 52560+ dentry = parent;
58c5fc13
MT
52561+ }
52562+
52563+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
52564+
16454cff 52565+ /* real_root is pinned so we don't have to hold a reference */
58c5fc13 52566+ if (retval == NULL)
6892158b 52567+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
58c5fc13 52568+out:
bc901d79 52569+ br_read_unlock(vfsmount_lock);
16454cff 52570+ write_sequnlock(&rename_lock);
bc901d79
MT
52571+
52572+ BUG_ON(retval == NULL);
52573+
58c5fc13
MT
52574+ return retval;
52575+}
52576+
52577+static __inline__ struct acl_object_label *
52578+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52579+ const struct acl_subject_label *subj)
52580+{
52581+ char *path = NULL;
bc901d79 52582+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
58c5fc13
MT
52583+}
52584+
52585+static __inline__ struct acl_object_label *
52586+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52587+ const struct acl_subject_label *subj)
52588+{
52589+ char *path = NULL;
bc901d79 52590+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
58c5fc13
MT
52591+}
52592+
52593+static __inline__ struct acl_object_label *
52594+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52595+ const struct acl_subject_label *subj, char *path)
52596+{
bc901d79 52597+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
58c5fc13
MT
52598+}
52599+
52600+static struct acl_subject_label *
52601+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
52602+ const struct acl_role_label *role)
52603+{
52604+ struct dentry *dentry = (struct dentry *) l_dentry;
52605+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
5e856224 52606+ struct mount *real_mnt = real_mount(mnt);
58c5fc13 52607+ struct acl_subject_label *retval;
16454cff 52608+ struct dentry *parent;
58c5fc13 52609+
16454cff 52610+ write_seqlock(&rename_lock);
bc901d79 52611+ br_read_lock(vfsmount_lock);
58c5fc13
MT
52612+
52613+ for (;;) {
6892158b 52614+ if (dentry == real_root.dentry && mnt == real_root.mnt)
58c5fc13
MT
52615+ break;
52616+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
5e856224 52617+ if (!mnt_has_parent(real_mnt))
58c5fc13
MT
52618+ break;
52619+
16454cff 52620+ spin_lock(&dentry->d_lock);
58c5fc13
MT
52621+ read_lock(&gr_inode_lock);
52622+ retval =
52623+ lookup_acl_subj_label(dentry->d_inode->i_ino,
16454cff 52624+ __get_dev(dentry), role);
58c5fc13 52625+ read_unlock(&gr_inode_lock);
16454cff 52626+ spin_unlock(&dentry->d_lock);
58c5fc13
MT
52627+ if (retval != NULL)
52628+ goto out;
52629+
5e856224
MT
52630+ dentry = real_mnt->mnt_mountpoint;
52631+ real_mnt = real_mnt->mnt_parent;
52632+ mnt = &real_mnt->mnt;
58c5fc13
MT
52633+ continue;
52634+ }
52635+
16454cff 52636+ spin_lock(&dentry->d_lock);
58c5fc13
MT
52637+ read_lock(&gr_inode_lock);
52638+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
16454cff 52639+ __get_dev(dentry), role);
58c5fc13 52640+ read_unlock(&gr_inode_lock);
16454cff
MT
52641+ parent = dentry->d_parent;
52642+ spin_unlock(&dentry->d_lock);
52643+
58c5fc13
MT
52644+ if (retval != NULL)
52645+ goto out;
52646+
16454cff 52647+ dentry = parent;
58c5fc13
MT
52648+ }
52649+
16454cff 52650+ spin_lock(&dentry->d_lock);
58c5fc13
MT
52651+ read_lock(&gr_inode_lock);
52652+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
16454cff 52653+ __get_dev(dentry), role);
58c5fc13 52654+ read_unlock(&gr_inode_lock);
16454cff 52655+ spin_unlock(&dentry->d_lock);
58c5fc13
MT
52656+
52657+ if (unlikely(retval == NULL)) {
16454cff 52658+ /* real_root is pinned, we don't need to hold a reference */
58c5fc13 52659+ read_lock(&gr_inode_lock);
6892158b 52660+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
16454cff 52661+ __get_dev(real_root.dentry), role);
58c5fc13
MT
52662+ read_unlock(&gr_inode_lock);
52663+ }
52664+out:
bc901d79 52665+ br_read_unlock(vfsmount_lock);
16454cff 52666+ write_sequnlock(&rename_lock);
58c5fc13 52667+
bc901d79
MT
52668+ BUG_ON(retval == NULL);
52669+
58c5fc13
MT
52670+ return retval;
52671+}
52672+
52673+static void
52674+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
52675+{
52676+ struct task_struct *task = current;
52677+ const struct cred *cred = current_cred();
52678+
52679+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
52680+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52681+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
bc901d79 52682+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58c5fc13
MT
52683+
52684+ return;
52685+}
52686+
52687+static void
58c5fc13
MT
52688+gr_log_learn_id_change(const char type, const unsigned int real,
52689+ const unsigned int effective, const unsigned int fs)
52690+{
52691+ struct task_struct *task = current;
52692+ const struct cred *cred = current_cred();
52693+
52694+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
52695+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
52696+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
bc901d79 52697+ type, real, effective, fs, &task->signal->saved_ip);
58c5fc13
MT
52698+
52699+ return;
52700+}
52701+
52702+__u32
58c5fc13
MT
52703+gr_search_file(const struct dentry * dentry, const __u32 mode,
52704+ const struct vfsmount * mnt)
52705+{
52706+ __u32 retval = mode;
52707+ struct acl_subject_label *curracl;
52708+ struct acl_object_label *currobj;
52709+
52710+ if (unlikely(!(gr_status & GR_READY)))
52711+ return (mode & ~GR_AUDITS);
52712+
52713+ curracl = current->acl;
52714+
52715+ currobj = chk_obj_label(dentry, mnt, curracl);
52716+ retval = currobj->mode & mode;
52717+
16454cff
MT
52718+ /* if we're opening a specified transfer file for writing
52719+ (e.g. /dev/initctl), then transfer our role to init
52720+ */
52721+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
52722+ current->role->roletype & GR_ROLE_PERSIST)) {
52723+ struct task_struct *task = init_pid_ns.child_reaper;
52724+
52725+ if (task->role != current->role) {
52726+ task->acl_sp_role = 0;
52727+ task->acl_role_id = current->acl_role_id;
52728+ task->role = current->role;
52729+ rcu_read_lock();
52730+ read_lock(&grsec_exec_file_lock);
52731+ gr_apply_subject_to_task(task);
52732+ read_unlock(&grsec_exec_file_lock);
52733+ rcu_read_unlock();
52734+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
52735+ }
52736+ }
52737+
58c5fc13
MT
52738+ if (unlikely
52739+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
52740+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
52741+ __u32 new_mode = mode;
52742+
52743+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52744+
52745+ retval = new_mode;
52746+
52747+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
52748+ new_mode |= GR_INHERIT;
52749+
52750+ if (!(mode & GR_NOLEARN))
52751+ gr_log_learn(dentry, mnt, new_mode);
52752+ }
52753+
52754+ return retval;
52755+}
52756+
6e9df6a3
MT
52757+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
52758+ const struct dentry *parent,
52759+ const struct vfsmount *mnt)
58c5fc13
MT
52760+{
52761+ struct name_entry *match;
52762+ struct acl_object_label *matchpo;
52763+ struct acl_subject_label *curracl;
52764+ char *path;
58c5fc13
MT
52765+
52766+ if (unlikely(!(gr_status & GR_READY)))
6e9df6a3 52767+ return NULL;
58c5fc13
MT
52768+
52769+ preempt_disable();
52770+ path = gr_to_filename_rbac(new_dentry, mnt);
52771+ match = lookup_name_entry_create(path);
52772+
58c5fc13
MT
52773+ curracl = current->acl;
52774+
6e9df6a3
MT
52775+ if (match) {
52776+ read_lock(&gr_inode_lock);
52777+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
52778+ read_unlock(&gr_inode_lock);
58c5fc13 52779+
6e9df6a3 52780+ if (matchpo) {
58c5fc13 52781+ preempt_enable();
6e9df6a3 52782+ return matchpo;
58c5fc13 52783+ }
58c5fc13
MT
52784+ }
52785+
6e9df6a3 52786+ // lookup parent
58c5fc13
MT
52787+
52788+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
6e9df6a3
MT
52789+
52790+ preempt_enable();
52791+ return matchpo;
52792+}
52793+
52794+__u32
52795+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
52796+ const struct vfsmount * mnt, const __u32 mode)
52797+{
52798+ struct acl_object_label *matchpo;
52799+ __u32 retval;
52800+
52801+ if (unlikely(!(gr_status & GR_READY)))
52802+ return (mode & ~GR_AUDITS);
52803+
52804+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
52805+
58c5fc13
MT
52806+ retval = matchpo->mode & mode;
52807+
52808+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
6e9df6a3 52809+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58c5fc13
MT
52810+ __u32 new_mode = mode;
52811+
52812+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
52813+
52814+ gr_log_learn(new_dentry, mnt, new_mode);
58c5fc13
MT
52815+ return new_mode;
52816+ }
52817+
58c5fc13
MT
52818+ return retval;
52819+}
52820+
6e9df6a3
MT
52821+__u32
52822+gr_check_link(const struct dentry * new_dentry,
52823+ const struct dentry * parent_dentry,
52824+ const struct vfsmount * parent_mnt,
52825+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
52826+{
52827+ struct acl_object_label *obj;
52828+ __u32 oldmode, newmode;
52829+ __u32 needmode;
52830+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
52831+ GR_DELETE | GR_INHERIT;
52832+
52833+ if (unlikely(!(gr_status & GR_READY)))
52834+ return (GR_CREATE | GR_LINK);
52835+
52836+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
52837+ oldmode = obj->mode;
52838+
52839+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
52840+ newmode = obj->mode;
52841+
52842+ needmode = newmode & checkmodes;
52843+
52844+ // old name for hardlink must have at least the permissions of the new name
52845+ if ((oldmode & needmode) != needmode)
52846+ goto bad;
52847+
52848+ // if old name had restrictions/auditing, make sure the new name does as well
52849+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
52850+
52851+ // don't allow hardlinking of suid/sgid files without permission
52852+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52853+ needmode |= GR_SETID;
52854+
52855+ if ((newmode & needmode) != needmode)
52856+ goto bad;
52857+
52858+ // enforce minimum permissions
52859+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
52860+ return newmode;
52861+bad:
52862+ needmode = oldmode;
52863+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
52864+ needmode |= GR_SETID;
52865+
52866+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
52867+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
52868+ return (GR_CREATE | GR_LINK);
52869+ } else if (newmode & GR_SUPPRESS)
52870+ return GR_SUPPRESS;
52871+ else
52872+ return 0;
52873+}
52874+
58c5fc13
MT
52875+int
52876+gr_check_hidden_task(const struct task_struct *task)
52877+{
52878+ if (unlikely(!(gr_status & GR_READY)))
52879+ return 0;
52880+
52881+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
52882+ return 1;
52883+
52884+ return 0;
52885+}
52886+
52887+int
52888+gr_check_protected_task(const struct task_struct *task)
52889+{
52890+ if (unlikely(!(gr_status & GR_READY) || !task))
52891+ return 0;
52892+
52893+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52894+ task->acl != current->acl)
52895+ return 1;
52896+
52897+ return 0;
52898+}
52899+
57199397
MT
52900+int
52901+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
52902+{
52903+ struct task_struct *p;
52904+ int ret = 0;
52905+
52906+ if (unlikely(!(gr_status & GR_READY) || !pid))
52907+ return ret;
52908+
52909+ read_lock(&tasklist_lock);
52910+ do_each_pid_task(pid, type, p) {
52911+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
52912+ p->acl != current->acl) {
52913+ ret = 1;
52914+ goto out;
52915+ }
52916+ } while_each_pid_task(pid, type, p);
52917+out:
52918+ read_unlock(&tasklist_lock);
52919+
52920+ return ret;
52921+}
52922+
58c5fc13
MT
52923+void
52924+gr_copy_label(struct task_struct *tsk)
52925+{
52926+ tsk->signal->used_accept = 0;
52927+ tsk->acl_sp_role = 0;
572b4308
MT
52928+ tsk->acl_role_id = current->acl_role_id;
52929+ tsk->acl = current->acl;
52930+ tsk->role = current->role;
58c5fc13 52931+ tsk->signal->curr_ip = current->signal->curr_ip;
bc901d79 52932+ tsk->signal->saved_ip = current->signal->saved_ip;
58c5fc13
MT
52933+ if (current->exec_file)
52934+ get_file(current->exec_file);
572b4308
MT
52935+ tsk->exec_file = current->exec_file;
52936+ tsk->is_writable = current->is_writable;
bc901d79 52937+ if (unlikely(current->signal->used_accept)) {
58c5fc13 52938+ current->signal->curr_ip = 0;
bc901d79
MT
52939+ current->signal->saved_ip = 0;
52940+ }
58c5fc13
MT
52941+
52942+ return;
52943+}
52944+
52945+static void
52946+gr_set_proc_res(struct task_struct *task)
52947+{
52948+ struct acl_subject_label *proc;
52949+ unsigned short i;
52950+
52951+ proc = task->acl;
52952+
52953+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
52954+ return;
52955+
52956+ for (i = 0; i < RLIM_NLIMITS; i++) {
52957+ if (!(proc->resmask & (1 << i)))
52958+ continue;
52959+
52960+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
52961+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
52962+ }
52963+
52964+ return;
52965+}
52966+
66a7e928
MT
52967+extern int __gr_process_user_ban(struct user_struct *user);
52968+
58c5fc13
MT
52969+int
52970+gr_check_user_change(int real, int effective, int fs)
52971+{
52972+ unsigned int i;
52973+ __u16 num;
52974+ uid_t *uidlist;
52975+ int curuid;
52976+ int realok = 0;
52977+ int effectiveok = 0;
52978+ int fsok = 0;
52979+
66a7e928
MT
52980+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52981+ struct user_struct *user;
52982+
52983+ if (real == -1)
52984+ goto skipit;
52985+
52986+ user = find_user(real);
52987+ if (user == NULL)
52988+ goto skipit;
52989+
52990+ if (__gr_process_user_ban(user)) {
52991+ /* for find_user */
52992+ free_uid(user);
52993+ return 1;
52994+ }
52995+
52996+ /* for find_user */
52997+ free_uid(user);
52998+
52999+skipit:
53000+#endif
53001+
58c5fc13
MT
53002+ if (unlikely(!(gr_status & GR_READY)))
53003+ return 0;
53004+
53005+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53006+ gr_log_learn_id_change('u', real, effective, fs);
53007+
53008+ num = current->acl->user_trans_num;
53009+ uidlist = current->acl->user_transitions;
53010+
53011+ if (uidlist == NULL)
53012+ return 0;
53013+
53014+ if (real == -1)
53015+ realok = 1;
53016+ if (effective == -1)
53017+ effectiveok = 1;
53018+ if (fs == -1)
53019+ fsok = 1;
53020+
53021+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
53022+ for (i = 0; i < num; i++) {
53023+ curuid = (int)uidlist[i];
53024+ if (real == curuid)
53025+ realok = 1;
53026+ if (effective == curuid)
53027+ effectiveok = 1;
53028+ if (fs == curuid)
53029+ fsok = 1;
53030+ }
53031+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
53032+ for (i = 0; i < num; i++) {
53033+ curuid = (int)uidlist[i];
53034+ if (real == curuid)
53035+ break;
53036+ if (effective == curuid)
53037+ break;
53038+ if (fs == curuid)
53039+ break;
53040+ }
53041+ /* not in deny list */
53042+ if (i == num) {
53043+ realok = 1;
53044+ effectiveok = 1;
53045+ fsok = 1;
53046+ }
53047+ }
53048+
53049+ if (realok && effectiveok && fsok)
53050+ return 0;
53051+ else {
53052+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53053+ return 1;
53054+ }
53055+}
53056+
53057+int
53058+gr_check_group_change(int real, int effective, int fs)
53059+{
53060+ unsigned int i;
53061+ __u16 num;
53062+ gid_t *gidlist;
53063+ int curgid;
53064+ int realok = 0;
53065+ int effectiveok = 0;
53066+ int fsok = 0;
53067+
53068+ if (unlikely(!(gr_status & GR_READY)))
53069+ return 0;
53070+
53071+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
53072+ gr_log_learn_id_change('g', real, effective, fs);
53073+
53074+ num = current->acl->group_trans_num;
53075+ gidlist = current->acl->group_transitions;
53076+
53077+ if (gidlist == NULL)
53078+ return 0;
53079+
53080+ if (real == -1)
53081+ realok = 1;
53082+ if (effective == -1)
53083+ effectiveok = 1;
53084+ if (fs == -1)
53085+ fsok = 1;
53086+
53087+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
53088+ for (i = 0; i < num; i++) {
53089+ curgid = (int)gidlist[i];
53090+ if (real == curgid)
53091+ realok = 1;
53092+ if (effective == curgid)
53093+ effectiveok = 1;
53094+ if (fs == curgid)
53095+ fsok = 1;
53096+ }
53097+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
53098+ for (i = 0; i < num; i++) {
53099+ curgid = (int)gidlist[i];
53100+ if (real == curgid)
53101+ break;
53102+ if (effective == curgid)
53103+ break;
53104+ if (fs == curgid)
53105+ break;
53106+ }
53107+ /* not in deny list */
53108+ if (i == num) {
53109+ realok = 1;
53110+ effectiveok = 1;
53111+ fsok = 1;
53112+ }
53113+ }
53114+
53115+ if (realok && effectiveok && fsok)
53116+ return 0;
53117+ else {
53118+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
53119+ return 1;
53120+ }
53121+}
53122+
4c928ab7
MT
53123+extern int gr_acl_is_capable(const int cap);
53124+
58c5fc13
MT
53125+void
53126+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
53127+{
53128+ struct acl_role_label *role = task->role;
53129+ struct acl_subject_label *subj = NULL;
53130+ struct acl_object_label *obj;
53131+ struct file *filp;
53132+
53133+ if (unlikely(!(gr_status & GR_READY)))
53134+ return;
53135+
53136+ filp = task->exec_file;
53137+
53138+ /* kernel process, we'll give them the kernel role */
53139+ if (unlikely(!filp)) {
53140+ task->role = kernel_role;
53141+ task->acl = kernel_role->root_label;
53142+ return;
53143+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
53144+ role = lookup_acl_role_label(task, uid, gid);
53145+
4c928ab7
MT
53146+ /* don't change the role if we're not a privileged process */
53147+ if (role && task->role != role &&
53148+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
53149+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
53150+ return;
53151+
58c5fc13
MT
53152+ /* perform subject lookup in possibly new role
53153+ we can use this result below in the case where role == task->role
53154+ */
53155+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
53156+
53157+ /* if we changed uid/gid, but result in the same role
53158+ and are using inheritance, don't lose the inherited subject
53159+ if current subject is other than what normal lookup
53160+ would result in, we arrived via inheritance, don't
53161+ lose subject
53162+ */
53163+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
53164+ (subj == task->acl)))
53165+ task->acl = subj;
53166+
53167+ task->role = role;
53168+
53169+ task->is_writable = 0;
53170+
53171+ /* ignore additional mmap checks for processes that are writable
53172+ by the default ACL */
53173+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53174+ if (unlikely(obj->mode & GR_WRITE))
53175+ task->is_writable = 1;
53176+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
53177+ if (unlikely(obj->mode & GR_WRITE))
53178+ task->is_writable = 1;
53179+
16454cff 53180+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58c5fc13
MT
53181+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53182+#endif
53183+
53184+ gr_set_proc_res(task);
53185+
53186+ return;
53187+}
53188+
53189+int
53190+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
4c928ab7 53191+ const int unsafe_flags)
58c5fc13
MT
53192+{
53193+ struct task_struct *task = current;
53194+ struct acl_subject_label *newacl;
53195+ struct acl_object_label *obj;
53196+ __u32 retmode;
53197+
53198+ if (unlikely(!(gr_status & GR_READY)))
53199+ return 0;
53200+
53201+ newacl = chk_subj_label(dentry, mnt, task->role);
53202+
5e856224
MT
53203+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
53204+ did an exec
53205+ */
53206+ rcu_read_lock();
53207+ read_lock(&tasklist_lock);
53208+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
53209+ (task->parent->acl->mode & GR_POVERRIDE))) {
53210+ read_unlock(&tasklist_lock);
53211+ rcu_read_unlock();
53212+ goto skip_check;
53213+ }
53214+ read_unlock(&tasklist_lock);
53215+ rcu_read_unlock();
53216+
4c928ab7 53217+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
58c5fc13
MT
53218+ !(task->role->roletype & GR_ROLE_GOD) &&
53219+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
4c928ab7 53220+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
4c928ab7 53221+ if (unsafe_flags & LSM_UNSAFE_SHARE)
ae4e228f
MT
53222+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
53223+ else
53224+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
58c5fc13
MT
53225+ return -EACCES;
53226+ }
5e856224
MT
53227+
53228+skip_check:
58c5fc13
MT
53229+
53230+ obj = chk_obj_label(dentry, mnt, task->acl);
53231+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
53232+
53233+ if (!(task->acl->mode & GR_INHERITLEARN) &&
53234+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
53235+ if (obj->nested)
53236+ task->acl = obj->nested;
53237+ else
53238+ task->acl = newacl;
53239+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
53240+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
53241+
53242+ task->is_writable = 0;
53243+
53244+ /* ignore additional mmap checks for processes that are writable
53245+ by the default ACL */
53246+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
53247+ if (unlikely(obj->mode & GR_WRITE))
53248+ task->is_writable = 1;
53249+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
53250+ if (unlikely(obj->mode & GR_WRITE))
53251+ task->is_writable = 1;
53252+
53253+ gr_set_proc_res(task);
53254+
16454cff 53255+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58c5fc13
MT
53256+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
53257+#endif
53258+ return 0;
53259+}
53260+
53261+/* always called with valid inodev ptr */
53262+static void
53263+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
53264+{
53265+ struct acl_object_label *matchpo;
53266+ struct acl_subject_label *matchps;
53267+ struct acl_subject_label *subj;
53268+ struct acl_role_label *role;
ae4e228f 53269+ unsigned int x;
58c5fc13 53270+
ae4e228f 53271+ FOR_EACH_ROLE_START(role)
58c5fc13
MT
53272+ FOR_EACH_SUBJECT_START(role, subj, x)
53273+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
53274+ matchpo->mode |= GR_DELETED;
53275+ FOR_EACH_SUBJECT_END(subj,x)
53276+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
53277+ if (subj->inode == ino && subj->device == dev)
53278+ subj->mode |= GR_DELETED;
53279+ FOR_EACH_NESTED_SUBJECT_END(subj)
53280+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
53281+ matchps->mode |= GR_DELETED;
ae4e228f 53282+ FOR_EACH_ROLE_END(role)
58c5fc13
MT
53283+
53284+ inodev->nentry->deleted = 1;
53285+
53286+ return;
53287+}
53288+
53289+void
53290+gr_handle_delete(const ino_t ino, const dev_t dev)
53291+{
53292+ struct inodev_entry *inodev;
53293+
53294+ if (unlikely(!(gr_status & GR_READY)))
53295+ return;
53296+
53297+ write_lock(&gr_inode_lock);
53298+ inodev = lookup_inodev_entry(ino, dev);
53299+ if (inodev != NULL)
53300+ do_handle_delete(inodev, ino, dev);
53301+ write_unlock(&gr_inode_lock);
53302+
53303+ return;
53304+}
53305+
53306+static void
53307+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
53308+ const ino_t newinode, const dev_t newdevice,
53309+ struct acl_subject_label *subj)
53310+{
53311+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
53312+ struct acl_object_label *match;
53313+
53314+ match = subj->obj_hash[index];
53315+
53316+ while (match && (match->inode != oldinode ||
53317+ match->device != olddevice ||
53318+ !(match->mode & GR_DELETED)))
53319+ match = match->next;
53320+
53321+ if (match && (match->inode == oldinode)
53322+ && (match->device == olddevice)
53323+ && (match->mode & GR_DELETED)) {
53324+ if (match->prev == NULL) {
53325+ subj->obj_hash[index] = match->next;
53326+ if (match->next != NULL)
53327+ match->next->prev = NULL;
53328+ } else {
53329+ match->prev->next = match->next;
53330+ if (match->next != NULL)
53331+ match->next->prev = match->prev;
53332+ }
53333+ match->prev = NULL;
53334+ match->next = NULL;
53335+ match->inode = newinode;
53336+ match->device = newdevice;
53337+ match->mode &= ~GR_DELETED;
53338+
53339+ insert_acl_obj_label(match, subj);
53340+ }
53341+
53342+ return;
53343+}
53344+
53345+static void
53346+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
53347+ const ino_t newinode, const dev_t newdevice,
53348+ struct acl_role_label *role)
53349+{
53350+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
53351+ struct acl_subject_label *match;
53352+
53353+ match = role->subj_hash[index];
53354+
53355+ while (match && (match->inode != oldinode ||
53356+ match->device != olddevice ||
53357+ !(match->mode & GR_DELETED)))
53358+ match = match->next;
53359+
53360+ if (match && (match->inode == oldinode)
53361+ && (match->device == olddevice)
53362+ && (match->mode & GR_DELETED)) {
53363+ if (match->prev == NULL) {
53364+ role->subj_hash[index] = match->next;
53365+ if (match->next != NULL)
53366+ match->next->prev = NULL;
53367+ } else {
53368+ match->prev->next = match->next;
53369+ if (match->next != NULL)
53370+ match->next->prev = match->prev;
53371+ }
53372+ match->prev = NULL;
53373+ match->next = NULL;
53374+ match->inode = newinode;
53375+ match->device = newdevice;
53376+ match->mode &= ~GR_DELETED;
53377+
53378+ insert_acl_subj_label(match, role);
53379+ }
53380+
53381+ return;
53382+}
53383+
53384+static void
53385+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
53386+ const ino_t newinode, const dev_t newdevice)
53387+{
53388+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
53389+ struct inodev_entry *match;
53390+
53391+ match = inodev_set.i_hash[index];
53392+
53393+ while (match && (match->nentry->inode != oldinode ||
53394+ match->nentry->device != olddevice || !match->nentry->deleted))
53395+ match = match->next;
53396+
53397+ if (match && (match->nentry->inode == oldinode)
53398+ && (match->nentry->device == olddevice) &&
53399+ match->nentry->deleted) {
53400+ if (match->prev == NULL) {
53401+ inodev_set.i_hash[index] = match->next;
53402+ if (match->next != NULL)
53403+ match->next->prev = NULL;
53404+ } else {
53405+ match->prev->next = match->next;
53406+ if (match->next != NULL)
53407+ match->next->prev = match->prev;
53408+ }
53409+ match->prev = NULL;
53410+ match->next = NULL;
53411+ match->nentry->inode = newinode;
53412+ match->nentry->device = newdevice;
53413+ match->nentry->deleted = 0;
53414+
53415+ insert_inodev_entry(match);
53416+ }
53417+
53418+ return;
53419+}
53420+
53421+static void
6e9df6a3 53422+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
58c5fc13
MT
53423+{
53424+ struct acl_subject_label *subj;
53425+ struct acl_role_label *role;
ae4e228f 53426+ unsigned int x;
6e9df6a3 53427+
ae4e228f 53428+ FOR_EACH_ROLE_START(role)
16454cff 53429+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
58c5fc13
MT
53430+
53431+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
16454cff
MT
53432+ if ((subj->inode == ino) && (subj->device == dev)) {
53433+ subj->inode = ino;
53434+ subj->device = dev;
58c5fc13
MT
53435+ }
53436+ FOR_EACH_NESTED_SUBJECT_END(subj)
53437+ FOR_EACH_SUBJECT_START(role, subj, x)
53438+ update_acl_obj_label(matchn->inode, matchn->device,
16454cff 53439+ ino, dev, subj);
58c5fc13 53440+ FOR_EACH_SUBJECT_END(subj,x)
ae4e228f 53441+ FOR_EACH_ROLE_END(role)
58c5fc13 53442+
16454cff 53443+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
58c5fc13
MT
53444+
53445+ return;
53446+}
53447+
6e9df6a3
MT
53448+static void
53449+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
53450+ const struct vfsmount *mnt)
53451+{
53452+ ino_t ino = dentry->d_inode->i_ino;
53453+ dev_t dev = __get_dev(dentry);
53454+
53455+ __do_handle_create(matchn, ino, dev);
53456+
53457+ return;
53458+}
53459+
58c5fc13
MT
53460+void
53461+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53462+{
53463+ struct name_entry *matchn;
53464+
53465+ if (unlikely(!(gr_status & GR_READY)))
53466+ return;
53467+
53468+ preempt_disable();
53469+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
53470+
53471+ if (unlikely((unsigned long)matchn)) {
53472+ write_lock(&gr_inode_lock);
53473+ do_handle_create(matchn, dentry, mnt);
53474+ write_unlock(&gr_inode_lock);
53475+ }
53476+ preempt_enable();
53477+
53478+ return;
53479+}
53480+
53481+void
6e9df6a3
MT
53482+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
53483+{
53484+ struct name_entry *matchn;
53485+
53486+ if (unlikely(!(gr_status & GR_READY)))
53487+ return;
53488+
53489+ preempt_disable();
53490+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
53491+
53492+ if (unlikely((unsigned long)matchn)) {
53493+ write_lock(&gr_inode_lock);
53494+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
53495+ write_unlock(&gr_inode_lock);
53496+ }
53497+ preempt_enable();
53498+
53499+ return;
53500+}
53501+
53502+void
58c5fc13
MT
53503+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53504+ struct dentry *old_dentry,
53505+ struct dentry *new_dentry,
53506+ struct vfsmount *mnt, const __u8 replace)
53507+{
53508+ struct name_entry *matchn;
53509+ struct inodev_entry *inodev;
6e9df6a3 53510+ struct inode *inode = new_dentry->d_inode;
16454cff
MT
53511+ ino_t old_ino = old_dentry->d_inode->i_ino;
53512+ dev_t old_dev = __get_dev(old_dentry);
58c5fc13
MT
53513+
53514+ /* vfs_rename swaps the name and parent link for old_dentry and
53515+ new_dentry
53516+ at this point, old_dentry has the new name, parent link, and inode
53517+ for the renamed file
53518+ if a file is being replaced by a rename, new_dentry has the inode
53519+ and name for the replaced file
53520+ */
53521+
53522+ if (unlikely(!(gr_status & GR_READY)))
53523+ return;
53524+
53525+ preempt_disable();
53526+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
53527+
53528+ /* we wouldn't have to check d_inode if it weren't for
53529+ NFS silly-renaming
53530+ */
53531+
53532+ write_lock(&gr_inode_lock);
6e9df6a3
MT
53533+ if (unlikely(replace && inode)) {
53534+ ino_t new_ino = inode->i_ino;
16454cff
MT
53535+ dev_t new_dev = __get_dev(new_dentry);
53536+
53537+ inodev = lookup_inodev_entry(new_ino, new_dev);
6e9df6a3 53538+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
16454cff 53539+ do_handle_delete(inodev, new_ino, new_dev);
58c5fc13
MT
53540+ }
53541+
16454cff 53542+ inodev = lookup_inodev_entry(old_ino, old_dev);
6e9df6a3 53543+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
16454cff 53544+ do_handle_delete(inodev, old_ino, old_dev);
58c5fc13
MT
53545+
53546+ if (unlikely((unsigned long)matchn))
53547+ do_handle_create(matchn, old_dentry, mnt);
53548+
53549+ write_unlock(&gr_inode_lock);
53550+ preempt_enable();
53551+
53552+ return;
53553+}
53554+
53555+static int
53556+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
53557+ unsigned char **sum)
53558+{
53559+ struct acl_role_label *r;
53560+ struct role_allowed_ip *ipp;
53561+ struct role_transition *trans;
53562+ unsigned int i;
53563+ int found = 0;
bc901d79
MT
53564+ u32 curr_ip = current->signal->curr_ip;
53565+
53566+ current->signal->saved_ip = curr_ip;
58c5fc13
MT
53567+
53568+ /* check transition table */
53569+
53570+ for (trans = current->role->transitions; trans; trans = trans->next) {
53571+ if (!strcmp(rolename, trans->rolename)) {
53572+ found = 1;
53573+ break;
53574+ }
53575+ }
53576+
53577+ if (!found)
53578+ return 0;
53579+
53580+ /* handle special roles that do not require authentication
53581+ and check ip */
53582+
ae4e228f 53583+ FOR_EACH_ROLE_START(r)
58c5fc13
MT
53584+ if (!strcmp(rolename, r->rolename) &&
53585+ (r->roletype & GR_ROLE_SPECIAL)) {
53586+ found = 0;
53587+ if (r->allowed_ips != NULL) {
53588+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
bc901d79 53589+ if ((ntohl(curr_ip) & ipp->netmask) ==
58c5fc13
MT
53590+ (ntohl(ipp->addr) & ipp->netmask))
53591+ found = 1;
53592+ }
53593+ } else
53594+ found = 2;
53595+ if (!found)
53596+ return 0;
53597+
53598+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
53599+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
53600+ *salt = NULL;
53601+ *sum = NULL;
53602+ return 1;
53603+ }
53604+ }
ae4e228f 53605+ FOR_EACH_ROLE_END(r)
58c5fc13
MT
53606+
53607+ for (i = 0; i < num_sprole_pws; i++) {
53608+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
53609+ *salt = acl_special_roles[i]->salt;
53610+ *sum = acl_special_roles[i]->sum;
53611+ return 1;
53612+ }
53613+ }
53614+
53615+ return 0;
53616+}
53617+
53618+static void
53619+assign_special_role(char *rolename)
53620+{
53621+ struct acl_object_label *obj;
53622+ struct acl_role_label *r;
53623+ struct acl_role_label *assigned = NULL;
53624+ struct task_struct *tsk;
53625+ struct file *filp;
58c5fc13 53626+
ae4e228f 53627+ FOR_EACH_ROLE_START(r)
58c5fc13 53628+ if (!strcmp(rolename, r->rolename) &&
ae4e228f 53629+ (r->roletype & GR_ROLE_SPECIAL)) {
58c5fc13 53630+ assigned = r;
ae4e228f
MT
53631+ break;
53632+ }
53633+ FOR_EACH_ROLE_END(r)
58c5fc13
MT
53634+
53635+ if (!assigned)
53636+ return;
53637+
53638+ read_lock(&tasklist_lock);
53639+ read_lock(&grsec_exec_file_lock);
53640+
6892158b 53641+ tsk = current->real_parent;
58c5fc13
MT
53642+ if (tsk == NULL)
53643+ goto out_unlock;
53644+
53645+ filp = tsk->exec_file;
53646+ if (filp == NULL)
53647+ goto out_unlock;
53648+
53649+ tsk->is_writable = 0;
53650+
53651+ tsk->acl_sp_role = 1;
53652+ tsk->acl_role_id = ++acl_sp_role_value;
53653+ tsk->role = assigned;
53654+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
53655+
53656+ /* ignore additional mmap checks for processes that are writable
53657+ by the default ACL */
53658+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
53659+ if (unlikely(obj->mode & GR_WRITE))
53660+ tsk->is_writable = 1;
53661+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
53662+ if (unlikely(obj->mode & GR_WRITE))
53663+ tsk->is_writable = 1;
53664+
16454cff 53665+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58c5fc13
MT
53666+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
53667+#endif
53668+
53669+out_unlock:
53670+ read_unlock(&grsec_exec_file_lock);
53671+ read_unlock(&tasklist_lock);
53672+ return;
53673+}
53674+
53675+int gr_check_secure_terminal(struct task_struct *task)
53676+{
53677+ struct task_struct *p, *p2, *p3;
53678+ struct files_struct *files;
53679+ struct fdtable *fdt;
53680+ struct file *our_file = NULL, *file;
53681+ int i;
53682+
53683+ if (task->signal->tty == NULL)
53684+ return 1;
53685+
53686+ files = get_files_struct(task);
53687+ if (files != NULL) {
53688+ rcu_read_lock();
53689+ fdt = files_fdtable(files);
53690+ for (i=0; i < fdt->max_fds; i++) {
53691+ file = fcheck_files(files, i);
53692+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
53693+ get_file(file);
53694+ our_file = file;
53695+ }
53696+ }
53697+ rcu_read_unlock();
53698+ put_files_struct(files);
53699+ }
53700+
53701+ if (our_file == NULL)
53702+ return 1;
53703+
53704+ read_lock(&tasklist_lock);
53705+ do_each_thread(p2, p) {
53706+ files = get_files_struct(p);
53707+ if (files == NULL ||
53708+ (p->signal && p->signal->tty == task->signal->tty)) {
53709+ if (files != NULL)
53710+ put_files_struct(files);
53711+ continue;
53712+ }
53713+ rcu_read_lock();
53714+ fdt = files_fdtable(files);
53715+ for (i=0; i < fdt->max_fds; i++) {
53716+ file = fcheck_files(files, i);
53717+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
53718+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
53719+ p3 = task;
53720+ while (p3->pid > 0) {
53721+ if (p3 == p)
53722+ break;
6892158b 53723+ p3 = p3->real_parent;
58c5fc13
MT
53724+ }
53725+ if (p3 == p)
53726+ break;
53727+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
53728+ gr_handle_alertkill(p);
53729+ rcu_read_unlock();
53730+ put_files_struct(files);
53731+ read_unlock(&tasklist_lock);
53732+ fput(our_file);
53733+ return 0;
53734+ }
53735+ }
53736+ rcu_read_unlock();
53737+ put_files_struct(files);
53738+ } while_each_thread(p2, p);
53739+ read_unlock(&tasklist_lock);
53740+
53741+ fput(our_file);
53742+ return 1;
53743+}
53744+
572b4308
MT
53745+static int gr_rbac_disable(void *unused)
53746+{
53747+ pax_open_kernel();
53748+ gr_status &= ~GR_READY;
53749+ pax_close_kernel();
53750+
53751+ return 0;
53752+}
53753+
58c5fc13
MT
53754+ssize_t
53755+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
53756+{
53757+ struct gr_arg_wrapper uwrap;
ae4e228f
MT
53758+ unsigned char *sprole_salt = NULL;
53759+ unsigned char *sprole_sum = NULL;
58c5fc13
MT
53760+ int error = sizeof (struct gr_arg_wrapper);
53761+ int error2 = 0;
53762+
bc901d79 53763+ mutex_lock(&gr_dev_mutex);
58c5fc13
MT
53764+
53765+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
53766+ error = -EPERM;
53767+ goto out;
53768+ }
53769+
53770+ if (count != sizeof (struct gr_arg_wrapper)) {
53771+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
53772+ error = -EINVAL;
53773+ goto out;
53774+ }
53775+
53776+
53777+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
53778+ gr_auth_expires = 0;
53779+ gr_auth_attempts = 0;
53780+ }
53781+
53782+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
53783+ error = -EFAULT;
53784+ goto out;
53785+ }
53786+
53787+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
53788+ error = -EINVAL;
53789+ goto out;
53790+ }
53791+
53792+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
53793+ error = -EFAULT;
53794+ goto out;
53795+ }
53796+
53797+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53798+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53799+ time_after(gr_auth_expires, get_seconds())) {
53800+ error = -EBUSY;
53801+ goto out;
53802+ }
53803+
53804+ /* if non-root trying to do anything other than use a special role,
53805+ do not attempt authentication, do not count towards authentication
53806+ locking
53807+ */
53808+
53809+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
53810+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
53811+ current_uid()) {
53812+ error = -EPERM;
53813+ goto out;
53814+ }
53815+
53816+ /* ensure pw and special role name are null terminated */
53817+
53818+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
53819+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
53820+
53821+ /* Okay.
53822+ * We have our enough of the argument structure..(we have yet
53823+ * to copy_from_user the tables themselves) . Copy the tables
53824+ * only if we need them, i.e. for loading operations. */
53825+
53826+ switch (gr_usermode->mode) {
53827+ case GR_STATUS:
53828+ if (gr_status & GR_READY) {
53829+ error = 1;
53830+ if (!gr_check_secure_terminal(current))
53831+ error = 3;
53832+ } else
53833+ error = 2;
53834+ goto out;
53835+ case GR_SHUTDOWN:
53836+ if ((gr_status & GR_READY)
53837+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
572b4308 53838+ stop_machine(gr_rbac_disable, NULL, NULL);
58c5fc13
MT
53839+ free_variables();
53840+ memset(gr_usermode, 0, sizeof (struct gr_arg));
53841+ memset(gr_system_salt, 0, GR_SALT_LEN);
53842+ memset(gr_system_sum, 0, GR_SHA_LEN);
572b4308 53843+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
58c5fc13
MT
53844+ } else if (gr_status & GR_READY) {
53845+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
53846+ error = -EPERM;
53847+ } else {
53848+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
53849+ error = -EAGAIN;
53850+ }
53851+ break;
53852+ case GR_ENABLE:
53853+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
53854+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
53855+ else {
53856+ if (gr_status & GR_READY)
53857+ error = -EAGAIN;
53858+ else
53859+ error = error2;
53860+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
53861+ }
53862+ break;
53863+ case GR_RELOAD:
53864+ if (!(gr_status & GR_READY)) {
53865+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
53866+ error = -EAGAIN;
53867+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
572b4308 53868+ stop_machine(gr_rbac_disable, NULL, NULL);
58c5fc13 53869+ free_variables();
572b4308
MT
53870+ error2 = gracl_init(gr_usermode);
53871+ if (!error2)
58c5fc13 53872+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
572b4308 53873+ else {
58c5fc13 53874+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
572b4308 53875+ error = error2;
58c5fc13
MT
53876+ }
53877+ } else {
53878+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
53879+ error = -EPERM;
53880+ }
53881+ break;
53882+ case GR_SEGVMOD:
53883+ if (unlikely(!(gr_status & GR_READY))) {
53884+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
53885+ error = -EAGAIN;
53886+ break;
53887+ }
53888+
53889+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
53890+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
53891+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
53892+ struct acl_subject_label *segvacl;
53893+ segvacl =
53894+ lookup_acl_subj_label(gr_usermode->segv_inode,
53895+ gr_usermode->segv_device,
53896+ current->role);
53897+ if (segvacl) {
53898+ segvacl->crashes = 0;
53899+ segvacl->expires = 0;
53900+ }
53901+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
53902+ gr_remove_uid(gr_usermode->segv_uid);
53903+ }
53904+ } else {
53905+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
53906+ error = -EPERM;
53907+ }
53908+ break;
53909+ case GR_SPROLE:
53910+ case GR_SPROLEPAM:
53911+ if (unlikely(!(gr_status & GR_READY))) {
53912+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
53913+ error = -EAGAIN;
53914+ break;
53915+ }
53916+
53917+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
53918+ current->role->expires = 0;
53919+ current->role->auth_attempts = 0;
53920+ }
53921+
53922+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
53923+ time_after(current->role->expires, get_seconds())) {
53924+ error = -EBUSY;
53925+ goto out;
53926+ }
53927+
53928+ if (lookup_special_role_auth
53929+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
53930+ && ((!sprole_salt && !sprole_sum)
53931+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
53932+ char *p = "";
53933+ assign_special_role(gr_usermode->sp_role);
53934+ read_lock(&tasklist_lock);
6892158b
MT
53935+ if (current->real_parent)
53936+ p = current->real_parent->role->rolename;
58c5fc13
MT
53937+ read_unlock(&tasklist_lock);
53938+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
53939+ p, acl_sp_role_value);
53940+ } else {
53941+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
53942+ error = -EPERM;
53943+ if(!(current->role->auth_attempts++))
53944+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53945+
53946+ goto out;
53947+ }
53948+ break;
53949+ case GR_UNSPROLE:
53950+ if (unlikely(!(gr_status & GR_READY))) {
53951+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
53952+ error = -EAGAIN;
53953+ break;
53954+ }
53955+
53956+ if (current->role->roletype & GR_ROLE_SPECIAL) {
53957+ char *p = "";
53958+ int i = 0;
53959+
53960+ read_lock(&tasklist_lock);
6892158b
MT
53961+ if (current->real_parent) {
53962+ p = current->real_parent->role->rolename;
53963+ i = current->real_parent->acl_role_id;
58c5fc13
MT
53964+ }
53965+ read_unlock(&tasklist_lock);
53966+
53967+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
53968+ gr_set_acls(1);
53969+ } else {
58c5fc13
MT
53970+ error = -EPERM;
53971+ goto out;
53972+ }
53973+ break;
53974+ default:
53975+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
53976+ error = -EINVAL;
53977+ break;
53978+ }
53979+
53980+ if (error != -EPERM)
53981+ goto out;
53982+
53983+ if(!(gr_auth_attempts++))
53984+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
53985+
53986+ out:
bc901d79 53987+ mutex_unlock(&gr_dev_mutex);
58c5fc13
MT
53988+ return error;
53989+}
53990+
16454cff
MT
53991+/* must be called with
53992+ rcu_read_lock();
53993+ read_lock(&tasklist_lock);
53994+ read_lock(&grsec_exec_file_lock);
53995+*/
53996+int gr_apply_subject_to_task(struct task_struct *task)
53997+{
53998+ struct acl_object_label *obj;
53999+ char *tmpname;
54000+ struct acl_subject_label *tmpsubj;
54001+ struct file *filp;
54002+ struct name_entry *nmatch;
54003+
54004+ filp = task->exec_file;
54005+ if (filp == NULL)
54006+ return 0;
54007+
54008+ /* the following is to apply the correct subject
54009+ on binaries running when the RBAC system
54010+ is enabled, when the binaries have been
54011+ replaced or deleted since their execution
54012+ -----
54013+ when the RBAC system starts, the inode/dev
54014+ from exec_file will be one the RBAC system
54015+ is unaware of. It only knows the inode/dev
54016+ of the present file on disk, or the absence
54017+ of it.
54018+ */
54019+ preempt_disable();
54020+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
54021+
54022+ nmatch = lookup_name_entry(tmpname);
54023+ preempt_enable();
54024+ tmpsubj = NULL;
54025+ if (nmatch) {
54026+ if (nmatch->deleted)
54027+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
54028+ else
54029+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
54030+ if (tmpsubj != NULL)
54031+ task->acl = tmpsubj;
54032+ }
54033+ if (tmpsubj == NULL)
54034+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
54035+ task->role);
54036+ if (task->acl) {
16454cff
MT
54037+ task->is_writable = 0;
54038+ /* ignore additional mmap checks for processes that are writable
54039+ by the default ACL */
54040+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54041+ if (unlikely(obj->mode & GR_WRITE))
54042+ task->is_writable = 1;
54043+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
54044+ if (unlikely(obj->mode & GR_WRITE))
54045+ task->is_writable = 1;
54046+
54047+ gr_set_proc_res(task);
54048+
54049+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
54050+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
54051+#endif
54052+ } else {
54053+ return 1;
54054+ }
54055+
54056+ return 0;
54057+}
54058+
58c5fc13
MT
54059+int
54060+gr_set_acls(const int type)
54061+{
58c5fc13 54062+ struct task_struct *task, *task2;
58c5fc13
MT
54063+ struct acl_role_label *role = current->role;
54064+ __u16 acl_role_id = current->acl_role_id;
54065+ const struct cred *cred;
16454cff 54066+ int ret;
58c5fc13 54067+
ae4e228f 54068+ rcu_read_lock();
58c5fc13
MT
54069+ read_lock(&tasklist_lock);
54070+ read_lock(&grsec_exec_file_lock);
54071+ do_each_thread(task2, task) {
54072+ /* check to see if we're called from the exit handler,
54073+ if so, only replace ACLs that have inherited the admin
54074+ ACL */
54075+
54076+ if (type && (task->role != role ||
54077+ task->acl_role_id != acl_role_id))
54078+ continue;
54079+
54080+ task->acl_role_id = 0;
54081+ task->acl_sp_role = 0;
54082+
16454cff 54083+ if (task->exec_file) {
58c5fc13
MT
54084+ cred = __task_cred(task);
54085+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
16454cff
MT
54086+ ret = gr_apply_subject_to_task(task);
54087+ if (ret) {
58c5fc13
MT
54088+ read_unlock(&grsec_exec_file_lock);
54089+ read_unlock(&tasklist_lock);
ae4e228f 54090+ rcu_read_unlock();
58c5fc13 54091+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
16454cff 54092+ return ret;
58c5fc13
MT
54093+ }
54094+ } else {
54095+ // it's a kernel process
54096+ task->role = kernel_role;
54097+ task->acl = kernel_role->root_label;
54098+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
54099+ task->acl->mode &= ~GR_PROCFIND;
54100+#endif
54101+ }
54102+ } while_each_thread(task2, task);
54103+ read_unlock(&grsec_exec_file_lock);
54104+ read_unlock(&tasklist_lock);
ae4e228f
MT
54105+ rcu_read_unlock();
54106+
58c5fc13
MT
54107+ return 0;
54108+}
54109+
54110+void
54111+gr_learn_resource(const struct task_struct *task,
54112+ const int res, const unsigned long wanted, const int gt)
54113+{
54114+ struct acl_subject_label *acl;
54115+ const struct cred *cred;
54116+
54117+ if (unlikely((gr_status & GR_READY) &&
54118+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
54119+ goto skip_reslog;
54120+
54121+#ifdef CONFIG_GRKERNSEC_RESLOG
54122+ gr_log_resource(task, res, wanted, gt);
54123+#endif
54124+ skip_reslog:
54125+
54126+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
54127+ return;
54128+
54129+ acl = task->acl;
54130+
54131+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
54132+ !(acl->resmask & (1 << (unsigned short) res))))
54133+ return;
54134+
54135+ if (wanted >= acl->res[res].rlim_cur) {
54136+ unsigned long res_add;
54137+
54138+ res_add = wanted;
54139+ switch (res) {
54140+ case RLIMIT_CPU:
54141+ res_add += GR_RLIM_CPU_BUMP;
54142+ break;
54143+ case RLIMIT_FSIZE:
54144+ res_add += GR_RLIM_FSIZE_BUMP;
54145+ break;
54146+ case RLIMIT_DATA:
54147+ res_add += GR_RLIM_DATA_BUMP;
54148+ break;
54149+ case RLIMIT_STACK:
54150+ res_add += GR_RLIM_STACK_BUMP;
54151+ break;
54152+ case RLIMIT_CORE:
54153+ res_add += GR_RLIM_CORE_BUMP;
54154+ break;
54155+ case RLIMIT_RSS:
54156+ res_add += GR_RLIM_RSS_BUMP;
54157+ break;
54158+ case RLIMIT_NPROC:
54159+ res_add += GR_RLIM_NPROC_BUMP;
54160+ break;
54161+ case RLIMIT_NOFILE:
54162+ res_add += GR_RLIM_NOFILE_BUMP;
54163+ break;
54164+ case RLIMIT_MEMLOCK:
54165+ res_add += GR_RLIM_MEMLOCK_BUMP;
54166+ break;
54167+ case RLIMIT_AS:
54168+ res_add += GR_RLIM_AS_BUMP;
54169+ break;
54170+ case RLIMIT_LOCKS:
54171+ res_add += GR_RLIM_LOCKS_BUMP;
54172+ break;
54173+ case RLIMIT_SIGPENDING:
54174+ res_add += GR_RLIM_SIGPENDING_BUMP;
54175+ break;
54176+ case RLIMIT_MSGQUEUE:
54177+ res_add += GR_RLIM_MSGQUEUE_BUMP;
54178+ break;
54179+ case RLIMIT_NICE:
54180+ res_add += GR_RLIM_NICE_BUMP;
54181+ break;
54182+ case RLIMIT_RTPRIO:
54183+ res_add += GR_RLIM_RTPRIO_BUMP;
54184+ break;
54185+ case RLIMIT_RTTIME:
54186+ res_add += GR_RLIM_RTTIME_BUMP;
54187+ break;
54188+ }
54189+
54190+ acl->res[res].rlim_cur = res_add;
54191+
54192+ if (wanted > acl->res[res].rlim_max)
54193+ acl->res[res].rlim_max = res_add;
54194+
54195+ /* only log the subject filename, since resource logging is supported for
54196+ single-subject learning only */
ae4e228f 54197+ rcu_read_lock();
58c5fc13
MT
54198+ cred = __task_cred(task);
54199+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54200+ task->role->roletype, cred->uid, cred->gid, acl->filename,
54201+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
bc901d79 54202+ "", (unsigned long) res, &task->signal->saved_ip);
ae4e228f 54203+ rcu_read_unlock();
58c5fc13
MT
54204+ }
54205+
54206+ return;
54207+}
54208+
54209+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
54210+void
54211+pax_set_initial_flags(struct linux_binprm *bprm)
54212+{
54213+ struct task_struct *task = current;
54214+ struct acl_subject_label *proc;
54215+ unsigned long flags;
54216+
54217+ if (unlikely(!(gr_status & GR_READY)))
54218+ return;
54219+
54220+ flags = pax_get_flags(task);
54221+
54222+ proc = task->acl;
54223+
54224+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
54225+ flags &= ~MF_PAX_PAGEEXEC;
54226+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
54227+ flags &= ~MF_PAX_SEGMEXEC;
54228+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
54229+ flags &= ~MF_PAX_RANDMMAP;
54230+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
54231+ flags &= ~MF_PAX_EMUTRAMP;
54232+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
54233+ flags &= ~MF_PAX_MPROTECT;
54234+
54235+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
54236+ flags |= MF_PAX_PAGEEXEC;
54237+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
54238+ flags |= MF_PAX_SEGMEXEC;
54239+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
54240+ flags |= MF_PAX_RANDMMAP;
54241+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
54242+ flags |= MF_PAX_EMUTRAMP;
54243+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
54244+ flags |= MF_PAX_MPROTECT;
54245+
54246+ pax_set_flags(task, flags);
54247+
54248+ return;
54249+}
54250+#endif
54251+
58c5fc13
MT
54252+int
54253+gr_handle_proc_ptrace(struct task_struct *task)
54254+{
54255+ struct file *filp;
54256+ struct task_struct *tmp = task;
54257+ struct task_struct *curtemp = current;
54258+ __u32 retmode;
54259+
54260+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54261+ if (unlikely(!(gr_status & GR_READY)))
54262+ return 0;
54263+#endif
54264+
54265+ read_lock(&tasklist_lock);
54266+ read_lock(&grsec_exec_file_lock);
54267+ filp = task->exec_file;
54268+
54269+ while (tmp->pid > 0) {
54270+ if (tmp == curtemp)
54271+ break;
6892158b 54272+ tmp = tmp->real_parent;
58c5fc13
MT
54273+ }
54274+
54275+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54276+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
54277+ read_unlock(&grsec_exec_file_lock);
54278+ read_unlock(&tasklist_lock);
54279+ return 1;
54280+ }
54281+
54282+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54283+ if (!(gr_status & GR_READY)) {
54284+ read_unlock(&grsec_exec_file_lock);
54285+ read_unlock(&tasklist_lock);
54286+ return 0;
54287+ }
54288+#endif
54289+
54290+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
54291+ read_unlock(&grsec_exec_file_lock);
54292+ read_unlock(&tasklist_lock);
54293+
54294+ if (retmode & GR_NOPTRACE)
54295+ return 1;
54296+
54297+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
54298+ && (current->acl != task->acl || (current->acl != current->role->root_label
54299+ && current->pid != task->pid)))
54300+ return 1;
54301+
54302+ return 0;
54303+}
54304+
6892158b
MT
54305+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
54306+{
54307+ if (unlikely(!(gr_status & GR_READY)))
54308+ return;
54309+
54310+ if (!(current->role->roletype & GR_ROLE_GOD))
54311+ return;
54312+
54313+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
54314+ p->role->rolename, gr_task_roletype_to_char(p),
54315+ p->acl->filename);
54316+}
54317+
58c5fc13
MT
54318+int
54319+gr_handle_ptrace(struct task_struct *task, const long request)
54320+{
54321+ struct task_struct *tmp = task;
54322+ struct task_struct *curtemp = current;
54323+ __u32 retmode;
54324+
54325+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
54326+ if (unlikely(!(gr_status & GR_READY)))
54327+ return 0;
54328+#endif
5e856224
MT
54329+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
54330+ read_lock(&tasklist_lock);
54331+ while (tmp->pid > 0) {
54332+ if (tmp == curtemp)
54333+ break;
54334+ tmp = tmp->real_parent;
54335+ }
58c5fc13 54336+
5e856224
MT
54337+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
54338+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
54339+ read_unlock(&tasklist_lock);
54340+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54341+ return 1;
54342+ }
58c5fc13 54343+ read_unlock(&tasklist_lock);
58c5fc13 54344+ }
58c5fc13
MT
54345+
54346+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54347+ if (!(gr_status & GR_READY))
54348+ return 0;
54349+#endif
54350+
54351+ read_lock(&grsec_exec_file_lock);
54352+ if (unlikely(!task->exec_file)) {
54353+ read_unlock(&grsec_exec_file_lock);
54354+ return 0;
54355+ }
54356+
54357+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
54358+ read_unlock(&grsec_exec_file_lock);
54359+
54360+ if (retmode & GR_NOPTRACE) {
54361+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54362+ return 1;
54363+ }
54364+
54365+ if (retmode & GR_PTRACERD) {
54366+ switch (request) {
6e9df6a3 54367+ case PTRACE_SEIZE:
58c5fc13
MT
54368+ case PTRACE_POKETEXT:
54369+ case PTRACE_POKEDATA:
54370+ case PTRACE_POKEUSR:
54371+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
54372+ case PTRACE_SETREGS:
54373+ case PTRACE_SETFPREGS:
54374+#endif
54375+#ifdef CONFIG_X86
54376+ case PTRACE_SETFPXREGS:
54377+#endif
54378+#ifdef CONFIG_ALTIVEC
54379+ case PTRACE_SETVRREGS:
54380+#endif
54381+ return 1;
54382+ default:
54383+ return 0;
54384+ }
54385+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
54386+ !(current->role->roletype & GR_ROLE_GOD) &&
54387+ (current->acl != task->acl)) {
54388+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
54389+ return 1;
54390+ }
54391+
54392+ return 0;
54393+}
54394+
54395+static int is_writable_mmap(const struct file *filp)
54396+{
54397+ struct task_struct *task = current;
54398+ struct acl_object_label *obj, *obj2;
54399+
54400+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
71d190be 54401+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
58c5fc13
MT
54402+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
54403+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
54404+ task->role->root_label);
54405+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
54406+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
54407+ return 1;
54408+ }
54409+ }
54410+ return 0;
54411+}
54412+
54413+int
54414+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
54415+{
54416+ __u32 mode;
54417+
54418+ if (unlikely(!file || !(prot & PROT_EXEC)))
54419+ return 1;
54420+
54421+ if (is_writable_mmap(file))
54422+ return 0;
54423+
54424+ mode =
54425+ gr_search_file(file->f_path.dentry,
54426+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54427+ file->f_path.mnt);
54428+
54429+ if (!gr_tpe_allow(file))
54430+ return 0;
54431+
54432+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54433+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54434+ return 0;
54435+ } else if (unlikely(!(mode & GR_EXEC))) {
54436+ return 0;
54437+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54438+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54439+ return 1;
54440+ }
54441+
54442+ return 1;
54443+}
54444+
54445+int
54446+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
54447+{
54448+ __u32 mode;
54449+
54450+ if (unlikely(!file || !(prot & PROT_EXEC)))
54451+ return 1;
54452+
54453+ if (is_writable_mmap(file))
54454+ return 0;
54455+
54456+ mode =
54457+ gr_search_file(file->f_path.dentry,
54458+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
54459+ file->f_path.mnt);
54460+
54461+ if (!gr_tpe_allow(file))
54462+ return 0;
54463+
54464+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
54465+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54466+ return 0;
54467+ } else if (unlikely(!(mode & GR_EXEC))) {
54468+ return 0;
54469+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
54470+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
54471+ return 1;
54472+ }
54473+
54474+ return 1;
54475+}
54476+
54477+void
54478+gr_acl_handle_psacct(struct task_struct *task, const long code)
54479+{
54480+ unsigned long runtime;
54481+ unsigned long cputime;
54482+ unsigned int wday, cday;
54483+ __u8 whr, chr;
54484+ __u8 wmin, cmin;
54485+ __u8 wsec, csec;
54486+ struct timespec timeval;
54487+
54488+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
54489+ !(task->acl->mode & GR_PROCACCT)))
54490+ return;
54491+
54492+ do_posix_clock_monotonic_gettime(&timeval);
54493+ runtime = timeval.tv_sec - task->start_time.tv_sec;
54494+ wday = runtime / (3600 * 24);
54495+ runtime -= wday * (3600 * 24);
54496+ whr = runtime / 3600;
54497+ runtime -= whr * 3600;
54498+ wmin = runtime / 60;
54499+ runtime -= wmin * 60;
54500+ wsec = runtime;
54501+
54502+ cputime = (task->utime + task->stime) / HZ;
54503+ cday = cputime / (3600 * 24);
54504+ cputime -= cday * (3600 * 24);
54505+ chr = cputime / 3600;
54506+ cputime -= chr * 3600;
54507+ cmin = cputime / 60;
54508+ cputime -= cmin * 60;
54509+ csec = cputime;
54510+
54511+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
54512+
54513+ return;
54514+}
54515+
54516+void gr_set_kernel_label(struct task_struct *task)
54517+{
54518+ if (gr_status & GR_READY) {
54519+ task->role = kernel_role;
54520+ task->acl = kernel_role->root_label;
54521+ }
54522+ return;
54523+}
54524+
54525+#ifdef CONFIG_TASKSTATS
54526+int gr_is_taskstats_denied(int pid)
54527+{
54528+ struct task_struct *task;
54529+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54530+ const struct cred *cred;
54531+#endif
54532+ int ret = 0;
54533+
54534+ /* restrict taskstats viewing to un-chrooted root users
54535+ who have the 'view' subject flag if the RBAC system is enabled
54536+ */
54537+
df50ba0c 54538+ rcu_read_lock();
58c5fc13
MT
54539+ read_lock(&tasklist_lock);
54540+ task = find_task_by_vpid(pid);
54541+ if (task) {
58c5fc13
MT
54542+#ifdef CONFIG_GRKERNSEC_CHROOT
54543+ if (proc_is_chrooted(task))
54544+ ret = -EACCES;
54545+#endif
54546+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54547+ cred = __task_cred(task);
54548+#ifdef CONFIG_GRKERNSEC_PROC_USER
54549+ if (cred->uid != 0)
54550+ ret = -EACCES;
54551+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54552+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
54553+ ret = -EACCES;
54554+#endif
54555+#endif
54556+ if (gr_status & GR_READY) {
54557+ if (!(task->acl->mode & GR_VIEW))
54558+ ret = -EACCES;
54559+ }
58c5fc13
MT
54560+ } else
54561+ ret = -ENOENT;
54562+
54563+ read_unlock(&tasklist_lock);
df50ba0c 54564+ rcu_read_unlock();
58c5fc13
MT
54565+
54566+ return ret;
54567+}
54568+#endif
54569+
bc901d79
MT
54570+/* AUXV entries are filled via a descendant of search_binary_handler
54571+ after we've already applied the subject for the target
54572+*/
54573+int gr_acl_enable_at_secure(void)
54574+{
54575+ if (unlikely(!(gr_status & GR_READY)))
54576+ return 0;
54577+
54578+ if (current->acl->mode & GR_ATSECURE)
54579+ return 1;
54580+
54581+ return 0;
54582+}
54583+
58c5fc13
MT
54584+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
54585+{
54586+ struct task_struct *task = current;
54587+ struct dentry *dentry = file->f_path.dentry;
54588+ struct vfsmount *mnt = file->f_path.mnt;
54589+ struct acl_object_label *obj, *tmp;
54590+ struct acl_subject_label *subj;
54591+ unsigned int bufsize;
54592+ int is_not_root;
54593+ char *path;
16454cff 54594+ dev_t dev = __get_dev(dentry);
58c5fc13
MT
54595+
54596+ if (unlikely(!(gr_status & GR_READY)))
54597+ return 1;
54598+
54599+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
54600+ return 1;
54601+
54602+ /* ignore Eric Biederman */
54603+ if (IS_PRIVATE(dentry->d_inode))
54604+ return 1;
54605+
54606+ subj = task->acl;
572b4308 54607+ read_lock(&gr_inode_lock);
58c5fc13 54608+ do {
16454cff 54609+ obj = lookup_acl_obj_label(ino, dev, subj);
572b4308
MT
54610+ if (obj != NULL) {
54611+ read_unlock(&gr_inode_lock);
58c5fc13 54612+ return (obj->mode & GR_FIND) ? 1 : 0;
572b4308 54613+ }
58c5fc13 54614+ } while ((subj = subj->parent_subject));
572b4308 54615+ read_unlock(&gr_inode_lock);
58c5fc13
MT
54616+
54617+ /* this is purely an optimization since we're looking for an object
54618+ for the directory we're doing a readdir on
54619+ if it's possible for any globbed object to match the entry we're
54620+ filling into the directory, then the object we find here will be
54621+ an anchor point with attached globbed objects
54622+ */
54623+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
54624+ if (obj->globbed == NULL)
54625+ return (obj->mode & GR_FIND) ? 1 : 0;
54626+
54627+ is_not_root = ((obj->filename[0] == '/') &&
54628+ (obj->filename[1] == '\0')) ? 0 : 1;
54629+ bufsize = PAGE_SIZE - namelen - is_not_root;
54630+
54631+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
54632+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
54633+ return 1;
54634+
54635+ preempt_disable();
54636+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
54637+ bufsize);
54638+
54639+ bufsize = strlen(path);
54640+
54641+ /* if base is "/", don't append an additional slash */
54642+ if (is_not_root)
54643+ *(path + bufsize) = '/';
54644+ memcpy(path + bufsize + is_not_root, name, namelen);
54645+ *(path + bufsize + namelen + is_not_root) = '\0';
54646+
54647+ tmp = obj->globbed;
54648+ while (tmp) {
54649+ if (!glob_match(tmp->filename, path)) {
54650+ preempt_enable();
54651+ return (tmp->mode & GR_FIND) ? 1 : 0;
54652+ }
54653+ tmp = tmp->next;
54654+ }
54655+ preempt_enable();
54656+ return (obj->mode & GR_FIND) ? 1 : 0;
54657+}
54658+
6892158b
MT
54659+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
54660+EXPORT_SYMBOL(gr_acl_is_enabled);
54661+#endif
58c5fc13
MT
54662+EXPORT_SYMBOL(gr_learn_resource);
54663+EXPORT_SYMBOL(gr_set_kernel_label);
54664+#ifdef CONFIG_SECURITY
54665+EXPORT_SYMBOL(gr_check_user_change);
54666+EXPORT_SYMBOL(gr_check_group_change);
54667+#endif
54668+
fe2de317
MT
54669diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
54670new file mode 100644
54671index 0000000..34fefda
54672--- /dev/null
54673+++ b/grsecurity/gracl_alloc.c
54674@@ -0,0 +1,105 @@
54675+#include <linux/kernel.h>
54676+#include <linux/mm.h>
54677+#include <linux/slab.h>
54678+#include <linux/vmalloc.h>
54679+#include <linux/gracl.h>
54680+#include <linux/grsecurity.h>
54681+
54682+static unsigned long alloc_stack_next = 1;
54683+static unsigned long alloc_stack_size = 1;
54684+static void **alloc_stack;
54685+
54686+static __inline__ int
54687+alloc_pop(void)
54688+{
54689+ if (alloc_stack_next == 1)
54690+ return 0;
54691+
54692+ kfree(alloc_stack[alloc_stack_next - 2]);
54693+
54694+ alloc_stack_next--;
54695+
54696+ return 1;
54697+}
54698+
54699+static __inline__ int
54700+alloc_push(void *buf)
54701+{
54702+ if (alloc_stack_next >= alloc_stack_size)
54703+ return 1;
54704+
54705+ alloc_stack[alloc_stack_next - 1] = buf;
54706+
54707+ alloc_stack_next++;
54708+
54709+ return 0;
54710+}
54711+
54712+void *
54713+acl_alloc(unsigned long len)
54714+{
54715+ void *ret = NULL;
54716+
54717+ if (!len || len > PAGE_SIZE)
54718+ goto out;
54719+
54720+ ret = kmalloc(len, GFP_KERNEL);
54721+
54722+ if (ret) {
54723+ if (alloc_push(ret)) {
54724+ kfree(ret);
54725+ ret = NULL;
54726+ }
54727+ }
54728+
54729+out:
54730+ return ret;
54731+}
54732+
54733+void *
54734+acl_alloc_num(unsigned long num, unsigned long len)
54735+{
54736+ if (!len || (num > (PAGE_SIZE / len)))
54737+ return NULL;
54738+
54739+ return acl_alloc(num * len);
54740+}
54741+
54742+void
54743+acl_free_all(void)
54744+{
54745+ if (gr_acl_is_enabled() || !alloc_stack)
54746+ return;
54747+
54748+ while (alloc_pop()) ;
54749+
54750+ if (alloc_stack) {
54751+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
54752+ kfree(alloc_stack);
54753+ else
54754+ vfree(alloc_stack);
54755+ }
54756+
54757+ alloc_stack = NULL;
54758+ alloc_stack_size = 1;
54759+ alloc_stack_next = 1;
54760+
54761+ return;
54762+}
54763+
54764+int
54765+acl_alloc_stack_init(unsigned long size)
54766+{
54767+ if ((size * sizeof (void *)) <= PAGE_SIZE)
54768+ alloc_stack =
54769+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
54770+ else
54771+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
54772+
54773+ alloc_stack_size = size;
54774+
54775+ if (!alloc_stack)
54776+ return 0;
54777+ else
54778+ return 1;
54779+}
54780diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
54781new file mode 100644
5e856224 54782index 0000000..6d21049
fe2de317
MT
54783--- /dev/null
54784+++ b/grsecurity/gracl_cap.c
5e856224 54785@@ -0,0 +1,110 @@
58c5fc13
MT
54786+#include <linux/kernel.h>
54787+#include <linux/module.h>
54788+#include <linux/sched.h>
54789+#include <linux/gracl.h>
54790+#include <linux/grsecurity.h>
54791+#include <linux/grinternal.h>
54792+
15a11c5b
MT
54793+extern const char *captab_log[];
54794+extern int captab_log_entries;
58c5fc13 54795+
5e856224 54796+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
58c5fc13 54797+{
58c5fc13
MT
54798+ struct acl_subject_label *curracl;
54799+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
df50ba0c 54800+ kernel_cap_t cap_audit = __cap_empty_set;
58c5fc13
MT
54801+
54802+ if (!gr_acl_is_enabled())
54803+ return 1;
54804+
54805+ curracl = task->acl;
54806+
54807+ cap_drop = curracl->cap_lower;
54808+ cap_mask = curracl->cap_mask;
df50ba0c 54809+ cap_audit = curracl->cap_invert_audit;
58c5fc13
MT
54810+
54811+ while ((curracl = curracl->parent_subject)) {
54812+ /* if the cap isn't specified in the current computed mask but is specified in the
54813+ current level subject, and is lowered in the current level subject, then add
54814+ it to the set of dropped capabilities
54815+ otherwise, add the current level subject's mask to the current computed mask
54816+ */
54817+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54818+ cap_raise(cap_mask, cap);
54819+ if (cap_raised(curracl->cap_lower, cap))
54820+ cap_raise(cap_drop, cap);
df50ba0c
MT
54821+ if (cap_raised(curracl->cap_invert_audit, cap))
54822+ cap_raise(cap_audit, cap);
58c5fc13
MT
54823+ }
54824+ }
54825+
df50ba0c
MT
54826+ if (!cap_raised(cap_drop, cap)) {
54827+ if (cap_raised(cap_audit, cap))
54828+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
58c5fc13 54829+ return 1;
df50ba0c 54830+ }
58c5fc13
MT
54831+
54832+ curracl = task->acl;
54833+
54834+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
54835+ && cap_raised(cred->cap_effective, cap)) {
54836+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
54837+ task->role->roletype, cred->uid,
54838+ cred->gid, task->exec_file ?
54839+ gr_to_filename(task->exec_file->f_path.dentry,
54840+ task->exec_file->f_path.mnt) : curracl->filename,
54841+ curracl->filename, 0UL,
bc901d79 54842+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
58c5fc13
MT
54843+ return 1;
54844+ }
54845+
15a11c5b 54846+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
58c5fc13 54847+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
5e856224 54848+
58c5fc13
MT
54849+ return 0;
54850+}
54851+
54852+int
5e856224
MT
54853+gr_acl_is_capable(const int cap)
54854+{
54855+ return gr_task_acl_is_capable(current, current_cred(), cap);
54856+}
54857+
54858+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
58c5fc13
MT
54859+{
54860+ struct acl_subject_label *curracl;
54861+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
54862+
54863+ if (!gr_acl_is_enabled())
54864+ return 1;
54865+
5e856224 54866+ curracl = task->acl;
58c5fc13
MT
54867+
54868+ cap_drop = curracl->cap_lower;
54869+ cap_mask = curracl->cap_mask;
54870+
54871+ while ((curracl = curracl->parent_subject)) {
54872+ /* if the cap isn't specified in the current computed mask but is specified in the
54873+ current level subject, and is lowered in the current level subject, then add
54874+ it to the set of dropped capabilities
54875+ otherwise, add the current level subject's mask to the current computed mask
54876+ */
54877+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
54878+ cap_raise(cap_mask, cap);
54879+ if (cap_raised(curracl->cap_lower, cap))
54880+ cap_raise(cap_drop, cap);
54881+ }
54882+ }
54883+
54884+ if (!cap_raised(cap_drop, cap))
54885+ return 1;
54886+
54887+ return 0;
54888+}
54889+
5e856224
MT
54890+int
54891+gr_acl_is_capable_nolog(const int cap)
54892+{
54893+ return gr_task_acl_is_capable_nolog(current, cap);
54894+}
54895+
fe2de317
MT
54896diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
54897new file mode 100644
4c928ab7 54898index 0000000..88d0e87
fe2de317
MT
54899--- /dev/null
54900+++ b/grsecurity/gracl_fs.c
4c928ab7 54901@@ -0,0 +1,435 @@
58c5fc13
MT
54902+#include <linux/kernel.h>
54903+#include <linux/sched.h>
54904+#include <linux/types.h>
54905+#include <linux/fs.h>
54906+#include <linux/file.h>
54907+#include <linux/stat.h>
54908+#include <linux/grsecurity.h>
54909+#include <linux/grinternal.h>
54910+#include <linux/gracl.h>
54911+
4c928ab7
MT
54912+umode_t
54913+gr_acl_umask(void)
54914+{
54915+ if (unlikely(!gr_acl_is_enabled()))
54916+ return 0;
54917+
54918+ return current->role->umask;
54919+}
54920+
58c5fc13
MT
54921+__u32
54922+gr_acl_handle_hidden_file(const struct dentry * dentry,
54923+ const struct vfsmount * mnt)
54924+{
54925+ __u32 mode;
54926+
54927+ if (unlikely(!dentry->d_inode))
54928+ return GR_FIND;
54929+
54930+ mode =
54931+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
54932+
54933+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
54934+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54935+ return mode;
54936+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
54937+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
54938+ return 0;
54939+ } else if (unlikely(!(mode & GR_FIND)))
54940+ return 0;
54941+
54942+ return GR_FIND;
54943+}
54944+
54945+__u32
54946+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
6e9df6a3 54947+ int acc_mode)
58c5fc13
MT
54948+{
54949+ __u32 reqmode = GR_FIND;
54950+ __u32 mode;
54951+
54952+ if (unlikely(!dentry->d_inode))
54953+ return reqmode;
54954+
6e9df6a3 54955+ if (acc_mode & MAY_APPEND)
58c5fc13 54956+ reqmode |= GR_APPEND;
6e9df6a3 54957+ else if (acc_mode & MAY_WRITE)
58c5fc13 54958+ reqmode |= GR_WRITE;
6e9df6a3 54959+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
58c5fc13 54960+ reqmode |= GR_READ;
6e9df6a3 54961+
58c5fc13
MT
54962+ mode =
54963+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
54964+ mnt);
54965+
54966+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
54967+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54968+ reqmode & GR_READ ? " reading" : "",
54969+ reqmode & GR_WRITE ? " writing" : reqmode &
54970+ GR_APPEND ? " appending" : "");
54971+ return reqmode;
54972+ } else
54973+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
54974+ {
54975+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
54976+ reqmode & GR_READ ? " reading" : "",
54977+ reqmode & GR_WRITE ? " writing" : reqmode &
54978+ GR_APPEND ? " appending" : "");
54979+ return 0;
54980+ } else if (unlikely((mode & reqmode) != reqmode))
54981+ return 0;
54982+
54983+ return reqmode;
54984+}
54985+
54986+__u32
54987+gr_acl_handle_creat(const struct dentry * dentry,
54988+ const struct dentry * p_dentry,
6e9df6a3 54989+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
58c5fc13
MT
54990+ const int imode)
54991+{
54992+ __u32 reqmode = GR_WRITE | GR_CREATE;
54993+ __u32 mode;
54994+
6e9df6a3 54995+ if (acc_mode & MAY_APPEND)
58c5fc13 54996+ reqmode |= GR_APPEND;
6e9df6a3
MT
54997+ // if a directory was required or the directory already exists, then
54998+ // don't count this open as a read
54999+ if ((acc_mode & MAY_READ) &&
55000+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
58c5fc13 55001+ reqmode |= GR_READ;
6e9df6a3 55002+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
58c5fc13
MT
55003+ reqmode |= GR_SETID;
55004+
55005+ mode =
55006+ gr_check_create(dentry, p_dentry, p_mnt,
55007+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55008+
55009+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55010+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55011+ reqmode & GR_READ ? " reading" : "",
55012+ reqmode & GR_WRITE ? " writing" : reqmode &
55013+ GR_APPEND ? " appending" : "");
55014+ return reqmode;
55015+ } else
55016+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55017+ {
55018+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
55019+ reqmode & GR_READ ? " reading" : "",
55020+ reqmode & GR_WRITE ? " writing" : reqmode &
55021+ GR_APPEND ? " appending" : "");
55022+ return 0;
55023+ } else if (unlikely((mode & reqmode) != reqmode))
55024+ return 0;
55025+
55026+ return reqmode;
55027+}
55028+
55029+__u32
55030+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
55031+ const int fmode)
55032+{
55033+ __u32 mode, reqmode = GR_FIND;
55034+
55035+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
55036+ reqmode |= GR_EXEC;
55037+ if (fmode & S_IWOTH)
55038+ reqmode |= GR_WRITE;
55039+ if (fmode & S_IROTH)
55040+ reqmode |= GR_READ;
55041+
55042+ mode =
55043+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
55044+ mnt);
55045+
55046+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
55047+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55048+ reqmode & GR_READ ? " reading" : "",
55049+ reqmode & GR_WRITE ? " writing" : "",
55050+ reqmode & GR_EXEC ? " executing" : "");
55051+ return reqmode;
55052+ } else
55053+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
55054+ {
55055+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
55056+ reqmode & GR_READ ? " reading" : "",
55057+ reqmode & GR_WRITE ? " writing" : "",
55058+ reqmode & GR_EXEC ? " executing" : "");
55059+ return 0;
55060+ } else if (unlikely((mode & reqmode) != reqmode))
55061+ return 0;
55062+
55063+ return reqmode;
55064+}
55065+
55066+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
55067+{
55068+ __u32 mode;
55069+
55070+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
55071+
55072+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55073+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
55074+ return mode;
55075+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55076+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
55077+ return 0;
55078+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55079+ return 0;
55080+
55081+ return (reqmode);
55082+}
55083+
55084+__u32
55085+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
55086+{
55087+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
55088+}
55089+
55090+__u32
55091+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
55092+{
55093+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
55094+}
55095+
55096+__u32
55097+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
55098+{
55099+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
55100+}
55101+
55102+__u32
55103+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
55104+{
55105+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
55106+}
55107+
55108+__u32
4c928ab7
MT
55109+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
55110+ umode_t *modeptr)
58c5fc13 55111+{
4c928ab7
MT
55112+ umode_t mode;
55113+
55114+ *modeptr &= ~gr_acl_umask();
55115+ mode = *modeptr;
55116+
58c5fc13
MT
55117+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
55118+ return 1;
55119+
4c928ab7 55120+ if (unlikely(mode & (S_ISUID | S_ISGID))) {
58c5fc13
MT
55121+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
55122+ GR_CHMOD_ACL_MSG);
55123+ } else {
55124+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
55125+ }
55126+}
55127+
55128+__u32
55129+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
55130+{
55131+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
55132+}
55133+
55134+__u32
bc901d79
MT
55135+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
55136+{
55137+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
55138+}
55139+
55140+__u32
58c5fc13
MT
55141+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
55142+{
55143+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
55144+}
55145+
55146+__u32
55147+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
55148+{
55149+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
55150+ GR_UNIXCONNECT_ACL_MSG);
55151+}
55152+
6e9df6a3 55153+/* hardlinks require at minimum create and link permission,
58c5fc13
MT
55154+ any additional privilege required is based on the
55155+ privilege of the file being linked to
55156+*/
55157+__u32
55158+gr_acl_handle_link(const struct dentry * new_dentry,
55159+ const struct dentry * parent_dentry,
55160+ const struct vfsmount * parent_mnt,
55161+ const struct dentry * old_dentry,
55162+ const struct vfsmount * old_mnt, const char *to)
55163+{
55164+ __u32 mode;
55165+ __u32 needmode = GR_CREATE | GR_LINK;
55166+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
55167+
55168+ mode =
55169+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
55170+ old_mnt);
55171+
55172+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
55173+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55174+ return mode;
55175+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55176+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
55177+ return 0;
55178+ } else if (unlikely((mode & needmode) != needmode))
55179+ return 0;
55180+
55181+ return 1;
55182+}
55183+
55184+__u32
55185+gr_acl_handle_symlink(const struct dentry * new_dentry,
55186+ const struct dentry * parent_dentry,
55187+ const struct vfsmount * parent_mnt, const char *from)
55188+{
55189+ __u32 needmode = GR_WRITE | GR_CREATE;
55190+ __u32 mode;
55191+
55192+ mode =
55193+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
55194+ GR_CREATE | GR_AUDIT_CREATE |
55195+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
55196+
55197+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
55198+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55199+ return mode;
55200+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
55201+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
55202+ return 0;
55203+ } else if (unlikely((mode & needmode) != needmode))
55204+ return 0;
55205+
55206+ return (GR_WRITE | GR_CREATE);
55207+}
55208+
55209+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
55210+{
55211+ __u32 mode;
55212+
55213+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
55214+
55215+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
55216+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
55217+ return mode;
55218+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
55219+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
55220+ return 0;
55221+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
55222+ return 0;
55223+
55224+ return (reqmode);
55225+}
55226+
55227+__u32
55228+gr_acl_handle_mknod(const struct dentry * new_dentry,
55229+ const struct dentry * parent_dentry,
55230+ const struct vfsmount * parent_mnt,
55231+ const int mode)
55232+{
55233+ __u32 reqmode = GR_WRITE | GR_CREATE;
55234+ if (unlikely(mode & (S_ISUID | S_ISGID)))
55235+ reqmode |= GR_SETID;
55236+
55237+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55238+ reqmode, GR_MKNOD_ACL_MSG);
55239+}
55240+
55241+__u32
55242+gr_acl_handle_mkdir(const struct dentry *new_dentry,
55243+ const struct dentry *parent_dentry,
55244+ const struct vfsmount *parent_mnt)
55245+{
55246+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
55247+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
55248+}
55249+
55250+#define RENAME_CHECK_SUCCESS(old, new) \
55251+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
55252+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
55253+
55254+int
55255+gr_acl_handle_rename(struct dentry *new_dentry,
55256+ struct dentry *parent_dentry,
55257+ const struct vfsmount *parent_mnt,
55258+ struct dentry *old_dentry,
55259+ struct inode *old_parent_inode,
55260+ struct vfsmount *old_mnt, const char *newname)
55261+{
55262+ __u32 comp1, comp2;
55263+ int error = 0;
55264+
55265+ if (unlikely(!gr_acl_is_enabled()))
55266+ return 0;
55267+
55268+ if (!new_dentry->d_inode) {
55269+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
55270+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
55271+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
55272+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
55273+ GR_DELETE | GR_AUDIT_DELETE |
55274+ GR_AUDIT_READ | GR_AUDIT_WRITE |
55275+ GR_SUPPRESS, old_mnt);
55276+ } else {
55277+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
55278+ GR_CREATE | GR_DELETE |
55279+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
55280+ GR_AUDIT_READ | GR_AUDIT_WRITE |
55281+ GR_SUPPRESS, parent_mnt);
55282+ comp2 =
55283+ gr_search_file(old_dentry,
55284+ GR_READ | GR_WRITE | GR_AUDIT_READ |
55285+ GR_DELETE | GR_AUDIT_DELETE |
55286+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
55287+ }
55288+
55289+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
55290+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
55291+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55292+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
55293+ && !(comp2 & GR_SUPPRESS)) {
55294+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
55295+ error = -EACCES;
55296+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
55297+ error = -EACCES;
55298+
55299+ return error;
55300+}
55301+
55302+void
55303+gr_acl_handle_exit(void)
55304+{
55305+ u16 id;
55306+ char *rolename;
55307+ struct file *exec_file;
55308+
16454cff
MT
55309+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
55310+ !(current->role->roletype & GR_ROLE_PERSIST))) {
58c5fc13
MT
55311+ id = current->acl_role_id;
55312+ rolename = current->role->rolename;
55313+ gr_set_acls(1);
55314+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
55315+ }
55316+
55317+ write_lock(&grsec_exec_file_lock);
55318+ exec_file = current->exec_file;
55319+ current->exec_file = NULL;
55320+ write_unlock(&grsec_exec_file_lock);
55321+
55322+ if (exec_file)
55323+ fput(exec_file);
55324+}
55325+
55326+int
55327+gr_acl_handle_procpidmem(const struct task_struct *task)
55328+{
55329+ if (unlikely(!gr_acl_is_enabled()))
55330+ return 0;
55331+
55332+ if (task != current && task->acl->mode & GR_PROTPROCFD)
55333+ return -EACCES;
55334+
55335+ return 0;
55336+}
fe2de317
MT
55337diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
55338new file mode 100644
5e856224 55339index 0000000..58800a7
fe2de317
MT
55340--- /dev/null
55341+++ b/grsecurity/gracl_ip.c
5e856224 55342@@ -0,0 +1,384 @@
58c5fc13
MT
55343+#include <linux/kernel.h>
55344+#include <asm/uaccess.h>
55345+#include <asm/errno.h>
55346+#include <net/sock.h>
55347+#include <linux/file.h>
55348+#include <linux/fs.h>
55349+#include <linux/net.h>
55350+#include <linux/in.h>
55351+#include <linux/skbuff.h>
55352+#include <linux/ip.h>
55353+#include <linux/udp.h>
58c5fc13
MT
55354+#include <linux/types.h>
55355+#include <linux/sched.h>
55356+#include <linux/netdevice.h>
55357+#include <linux/inetdevice.h>
55358+#include <linux/gracl.h>
55359+#include <linux/grsecurity.h>
55360+#include <linux/grinternal.h>
55361+
55362+#define GR_BIND 0x01
55363+#define GR_CONNECT 0x02
55364+#define GR_INVERT 0x04
55365+#define GR_BINDOVERRIDE 0x08
55366+#define GR_CONNECTOVERRIDE 0x10
bc901d79 55367+#define GR_SOCK_FAMILY 0x20
58c5fc13 55368+
bc901d79 55369+static const char * gr_protocols[IPPROTO_MAX] = {
58c5fc13
MT
55370+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
55371+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
55372+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
55373+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
55374+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
55375+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
55376+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
55377+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
55378+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
55379+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
55380+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
55381+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
55382+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
55383+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
55384+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
55385+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
55386+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
55387+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
55388+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
55389+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
55390+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
55391+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
55392+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
55393+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
55394+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
55395+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
55396+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
55397+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
55398+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
55399+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
55400+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
55401+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
55402+ };
55403+
bc901d79 55404+static const char * gr_socktypes[SOCK_MAX] = {
58c5fc13
MT
55405+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
55406+ "unknown:7", "unknown:8", "unknown:9", "packet"
55407+ };
55408+
bc901d79
MT
55409+static const char * gr_sockfamilies[AF_MAX+1] = {
55410+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
55411+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
c52201e0
MT
55412+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
55413+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
bc901d79
MT
55414+ };
55415+
58c5fc13
MT
55416+const char *
55417+gr_proto_to_name(unsigned char proto)
55418+{
55419+ return gr_protocols[proto];
55420+}
55421+
55422+const char *
55423+gr_socktype_to_name(unsigned char type)
55424+{
55425+ return gr_socktypes[type];
55426+}
55427+
bc901d79
MT
55428+const char *
55429+gr_sockfamily_to_name(unsigned char family)
55430+{
55431+ return gr_sockfamilies[family];
55432+}
55433+
58c5fc13
MT
55434+int
55435+gr_search_socket(const int domain, const int type, const int protocol)
55436+{
55437+ struct acl_subject_label *curr;
55438+ const struct cred *cred = current_cred();
55439+
55440+ if (unlikely(!gr_acl_is_enabled()))
55441+ goto exit;
55442+
bc901d79
MT
55443+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
55444+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
58c5fc13
MT
55445+ goto exit; // let the kernel handle it
55446+
55447+ curr = current->acl;
55448+
bc901d79
MT
55449+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
55450+ /* the family is allowed, if this is PF_INET allow it only if
55451+ the extra sock type/protocol checks pass */
55452+ if (domain == PF_INET)
55453+ goto inet_check;
55454+ goto exit;
55455+ } else {
55456+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55457+ __u32 fakeip = 0;
55458+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55459+ current->role->roletype, cred->uid,
55460+ cred->gid, current->exec_file ?
55461+ gr_to_filename(current->exec_file->f_path.dentry,
55462+ current->exec_file->f_path.mnt) :
55463+ curr->filename, curr->filename,
55464+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
55465+ &current->signal->saved_ip);
55466+ goto exit;
55467+ }
55468+ goto exit_fail;
55469+ }
55470+
55471+inet_check:
55472+ /* the rest of this checking is for IPv4 only */
58c5fc13
MT
55473+ if (!curr->ips)
55474+ goto exit;
55475+
55476+ if ((curr->ip_type & (1 << type)) &&
55477+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
55478+ goto exit;
55479+
55480+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55481+ /* we don't place acls on raw sockets , and sometimes
55482+ dgram/ip sockets are opened for ioctl and not
55483+ bind/connect, so we'll fake a bind learn log */
55484+ if (type == SOCK_RAW || type == SOCK_PACKET) {
55485+ __u32 fakeip = 0;
55486+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55487+ current->role->roletype, cred->uid,
55488+ cred->gid, current->exec_file ?
55489+ gr_to_filename(current->exec_file->f_path.dentry,
55490+ current->exec_file->f_path.mnt) :
55491+ curr->filename, curr->filename,
ae4e228f 55492+ &fakeip, 0, type,
bc901d79 55493+ protocol, GR_CONNECT, &current->signal->saved_ip);
58c5fc13
MT
55494+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
55495+ __u32 fakeip = 0;
55496+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55497+ current->role->roletype, cred->uid,
55498+ cred->gid, current->exec_file ?
55499+ gr_to_filename(current->exec_file->f_path.dentry,
55500+ current->exec_file->f_path.mnt) :
55501+ curr->filename, curr->filename,
ae4e228f 55502+ &fakeip, 0, type,
bc901d79 55503+ protocol, GR_BIND, &current->signal->saved_ip);
58c5fc13
MT
55504+ }
55505+ /* we'll log when they use connect or bind */
55506+ goto exit;
55507+ }
55508+
bc901d79
MT
55509+exit_fail:
55510+ if (domain == PF_INET)
55511+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
55512+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
55513+ else
55514+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
55515+ gr_socktype_to_name(type), protocol);
58c5fc13
MT
55516+
55517+ return 0;
bc901d79 55518+exit:
58c5fc13
MT
55519+ return 1;
55520+}
55521+
55522+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
55523+{
55524+ if ((ip->mode & mode) &&
55525+ (ip_port >= ip->low) &&
55526+ (ip_port <= ip->high) &&
55527+ ((ntohl(ip_addr) & our_netmask) ==
55528+ (ntohl(our_addr) & our_netmask))
55529+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
55530+ && (ip->type & (1 << type))) {
55531+ if (ip->mode & GR_INVERT)
55532+ return 2; // specifically denied
55533+ else
55534+ return 1; // allowed
55535+ }
55536+
55537+ return 0; // not specifically allowed, may continue parsing
55538+}
55539+
55540+static int
55541+gr_search_connectbind(const int full_mode, struct sock *sk,
55542+ struct sockaddr_in *addr, const int type)
55543+{
55544+ char iface[IFNAMSIZ] = {0};
55545+ struct acl_subject_label *curr;
55546+ struct acl_ip_label *ip;
55547+ struct inet_sock *isk;
55548+ struct net_device *dev;
55549+ struct in_device *idev;
55550+ unsigned long i;
55551+ int ret;
55552+ int mode = full_mode & (GR_BIND | GR_CONNECT);
55553+ __u32 ip_addr = 0;
55554+ __u32 our_addr;
55555+ __u32 our_netmask;
55556+ char *p;
55557+ __u16 ip_port = 0;
55558+ const struct cred *cred = current_cred();
55559+
55560+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
55561+ return 0;
55562+
55563+ curr = current->acl;
55564+ isk = inet_sk(sk);
55565+
55566+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
55567+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
55568+ addr->sin_addr.s_addr = curr->inaddr_any_override;
ae4e228f 55569+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
58c5fc13
MT
55570+ struct sockaddr_in saddr;
55571+ int err;
55572+
55573+ saddr.sin_family = AF_INET;
55574+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
ae4e228f 55575+ saddr.sin_port = isk->inet_sport;
58c5fc13
MT
55576+
55577+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55578+ if (err)
55579+ return err;
55580+
55581+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
55582+ if (err)
55583+ return err;
55584+ }
55585+
55586+ if (!curr->ips)
55587+ return 0;
55588+
55589+ ip_addr = addr->sin_addr.s_addr;
55590+ ip_port = ntohs(addr->sin_port);
55591+
55592+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
55593+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
55594+ current->role->roletype, cred->uid,
55595+ cred->gid, current->exec_file ?
55596+ gr_to_filename(current->exec_file->f_path.dentry,
55597+ current->exec_file->f_path.mnt) :
55598+ curr->filename, curr->filename,
ae4e228f 55599+ &ip_addr, ip_port, type,
bc901d79 55600+ sk->sk_protocol, mode, &current->signal->saved_ip);
58c5fc13
MT
55601+ return 0;
55602+ }
55603+
55604+ for (i = 0; i < curr->ip_num; i++) {
55605+ ip = *(curr->ips + i);
55606+ if (ip->iface != NULL) {
55607+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
55608+ p = strchr(iface, ':');
55609+ if (p != NULL)
55610+ *p = '\0';
55611+ dev = dev_get_by_name(sock_net(sk), iface);
55612+ if (dev == NULL)
55613+ continue;
55614+ idev = in_dev_get(dev);
55615+ if (idev == NULL) {
55616+ dev_put(dev);
55617+ continue;
55618+ }
55619+ rcu_read_lock();
55620+ for_ifa(idev) {
55621+ if (!strcmp(ip->iface, ifa->ifa_label)) {
55622+ our_addr = ifa->ifa_address;
55623+ our_netmask = 0xffffffff;
55624+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55625+ if (ret == 1) {
55626+ rcu_read_unlock();
55627+ in_dev_put(idev);
55628+ dev_put(dev);
55629+ return 0;
55630+ } else if (ret == 2) {
55631+ rcu_read_unlock();
55632+ in_dev_put(idev);
55633+ dev_put(dev);
55634+ goto denied;
55635+ }
55636+ }
55637+ } endfor_ifa(idev);
55638+ rcu_read_unlock();
55639+ in_dev_put(idev);
55640+ dev_put(dev);
55641+ } else {
55642+ our_addr = ip->addr;
55643+ our_netmask = ip->netmask;
55644+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
55645+ if (ret == 1)
55646+ return 0;
55647+ else if (ret == 2)
55648+ goto denied;
55649+ }
55650+ }
55651+
55652+denied:
55653+ if (mode == GR_BIND)
ae4e228f 55654+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58c5fc13 55655+ else if (mode == GR_CONNECT)
ae4e228f 55656+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
58c5fc13
MT
55657+
55658+ return -EACCES;
55659+}
55660+
55661+int
55662+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
55663+{
5e856224
MT
55664+ /* always allow disconnection of dgram sockets with connect */
55665+ if (addr->sin_family == AF_UNSPEC)
55666+ return 0;
58c5fc13
MT
55667+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
55668+}
55669+
55670+int
55671+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
55672+{
55673+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
55674+}
55675+
55676+int gr_search_listen(struct socket *sock)
55677+{
55678+ struct sock *sk = sock->sk;
55679+ struct sockaddr_in addr;
55680+
ae4e228f
MT
55681+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55682+ addr.sin_port = inet_sk(sk)->inet_sport;
58c5fc13
MT
55683+
55684+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55685+}
55686+
55687+int gr_search_accept(struct socket *sock)
55688+{
55689+ struct sock *sk = sock->sk;
55690+ struct sockaddr_in addr;
55691+
ae4e228f
MT
55692+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
55693+ addr.sin_port = inet_sk(sk)->inet_sport;
58c5fc13
MT
55694+
55695+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
55696+}
55697+
55698+int
55699+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
55700+{
55701+ if (addr)
55702+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
55703+ else {
55704+ struct sockaddr_in sin;
55705+ const struct inet_sock *inet = inet_sk(sk);
55706+
ae4e228f
MT
55707+ sin.sin_addr.s_addr = inet->inet_daddr;
55708+ sin.sin_port = inet->inet_dport;
58c5fc13
MT
55709+
55710+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55711+ }
55712+}
55713+
55714+int
55715+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
55716+{
55717+ struct sockaddr_in sin;
55718+
55719+ if (unlikely(skb->len < sizeof (struct udphdr)))
55720+ return 0; // skip this packet
55721+
55722+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
55723+ sin.sin_port = udp_hdr(skb)->source;
55724+
55725+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
55726+}
fe2de317
MT
55727diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
55728new file mode 100644
55729index 0000000..25f54ef
55730--- /dev/null
55731+++ b/grsecurity/gracl_learn.c
15a11c5b 55732@@ -0,0 +1,207 @@
58c5fc13
MT
55733+#include <linux/kernel.h>
55734+#include <linux/mm.h>
55735+#include <linux/sched.h>
55736+#include <linux/poll.h>
58c5fc13
MT
55737+#include <linux/string.h>
55738+#include <linux/file.h>
55739+#include <linux/types.h>
55740+#include <linux/vmalloc.h>
55741+#include <linux/grinternal.h>
55742+
55743+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
55744+ size_t count, loff_t *ppos);
55745+extern int gr_acl_is_enabled(void);
55746+
55747+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
55748+static int gr_learn_attached;
55749+
55750+/* use a 512k buffer */
55751+#define LEARN_BUFFER_SIZE (512 * 1024)
55752+
55753+static DEFINE_SPINLOCK(gr_learn_lock);
bc901d79 55754+static DEFINE_MUTEX(gr_learn_user_mutex);
58c5fc13
MT
55755+
55756+/* we need to maintain two buffers, so that the kernel context of grlearn
55757+ uses a semaphore around the userspace copying, and the other kernel contexts
55758+ use a spinlock when copying into the buffer, since they cannot sleep
55759+*/
55760+static char *learn_buffer;
55761+static char *learn_buffer_user;
55762+static int learn_buffer_len;
55763+static int learn_buffer_user_len;
55764+
55765+static ssize_t
55766+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
55767+{
55768+ DECLARE_WAITQUEUE(wait, current);
55769+ ssize_t retval = 0;
55770+
55771+ add_wait_queue(&learn_wait, &wait);
55772+ set_current_state(TASK_INTERRUPTIBLE);
55773+ do {
bc901d79 55774+ mutex_lock(&gr_learn_user_mutex);
58c5fc13
MT
55775+ spin_lock(&gr_learn_lock);
55776+ if (learn_buffer_len)
55777+ break;
55778+ spin_unlock(&gr_learn_lock);
bc901d79 55779+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
55780+ if (file->f_flags & O_NONBLOCK) {
55781+ retval = -EAGAIN;
55782+ goto out;
55783+ }
55784+ if (signal_pending(current)) {
55785+ retval = -ERESTARTSYS;
55786+ goto out;
55787+ }
55788+
55789+ schedule();
55790+ } while (1);
55791+
55792+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
55793+ learn_buffer_user_len = learn_buffer_len;
55794+ retval = learn_buffer_len;
55795+ learn_buffer_len = 0;
55796+
55797+ spin_unlock(&gr_learn_lock);
55798+
55799+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
55800+ retval = -EFAULT;
55801+
bc901d79 55802+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
55803+out:
55804+ set_current_state(TASK_RUNNING);
55805+ remove_wait_queue(&learn_wait, &wait);
55806+ return retval;
55807+}
55808+
55809+static unsigned int
55810+poll_learn(struct file * file, poll_table * wait)
55811+{
55812+ poll_wait(file, &learn_wait, wait);
55813+
55814+ if (learn_buffer_len)
55815+ return (POLLIN | POLLRDNORM);
55816+
55817+ return 0;
55818+}
55819+
55820+void
55821+gr_clear_learn_entries(void)
55822+{
55823+ char *tmp;
55824+
bc901d79 55825+ mutex_lock(&gr_learn_user_mutex);
15a11c5b
MT
55826+ spin_lock(&gr_learn_lock);
55827+ tmp = learn_buffer;
55828+ learn_buffer = NULL;
55829+ spin_unlock(&gr_learn_lock);
55830+ if (tmp)
55831+ vfree(tmp);
58c5fc13
MT
55832+ if (learn_buffer_user != NULL) {
55833+ vfree(learn_buffer_user);
55834+ learn_buffer_user = NULL;
55835+ }
55836+ learn_buffer_len = 0;
bc901d79 55837+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
55838+
55839+ return;
55840+}
55841+
55842+void
55843+gr_add_learn_entry(const char *fmt, ...)
55844+{
55845+ va_list args;
55846+ unsigned int len;
55847+
55848+ if (!gr_learn_attached)
55849+ return;
55850+
55851+ spin_lock(&gr_learn_lock);
55852+
55853+ /* leave a gap at the end so we know when it's "full" but don't have to
55854+ compute the exact length of the string we're trying to append
55855+ */
55856+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
55857+ spin_unlock(&gr_learn_lock);
55858+ wake_up_interruptible(&learn_wait);
55859+ return;
55860+ }
55861+ if (learn_buffer == NULL) {
55862+ spin_unlock(&gr_learn_lock);
55863+ return;
55864+ }
55865+
55866+ va_start(args, fmt);
55867+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
55868+ va_end(args);
55869+
55870+ learn_buffer_len += len + 1;
55871+
55872+ spin_unlock(&gr_learn_lock);
55873+ wake_up_interruptible(&learn_wait);
55874+
55875+ return;
55876+}
55877+
55878+static int
55879+open_learn(struct inode *inode, struct file *file)
55880+{
55881+ if (file->f_mode & FMODE_READ && gr_learn_attached)
55882+ return -EBUSY;
55883+ if (file->f_mode & FMODE_READ) {
55884+ int retval = 0;
bc901d79 55885+ mutex_lock(&gr_learn_user_mutex);
58c5fc13
MT
55886+ if (learn_buffer == NULL)
55887+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
55888+ if (learn_buffer_user == NULL)
55889+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
55890+ if (learn_buffer == NULL) {
55891+ retval = -ENOMEM;
55892+ goto out_error;
55893+ }
55894+ if (learn_buffer_user == NULL) {
55895+ retval = -ENOMEM;
55896+ goto out_error;
55897+ }
55898+ learn_buffer_len = 0;
55899+ learn_buffer_user_len = 0;
55900+ gr_learn_attached = 1;
55901+out_error:
bc901d79 55902+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
55903+ return retval;
55904+ }
55905+ return 0;
55906+}
55907+
55908+static int
55909+close_learn(struct inode *inode, struct file *file)
55910+{
58c5fc13 55911+ if (file->f_mode & FMODE_READ) {
15a11c5b 55912+ char *tmp = NULL;
bc901d79 55913+ mutex_lock(&gr_learn_user_mutex);
15a11c5b
MT
55914+ spin_lock(&gr_learn_lock);
55915+ tmp = learn_buffer;
55916+ learn_buffer = NULL;
55917+ spin_unlock(&gr_learn_lock);
55918+ if (tmp)
58c5fc13 55919+ vfree(tmp);
58c5fc13
MT
55920+ if (learn_buffer_user != NULL) {
55921+ vfree(learn_buffer_user);
55922+ learn_buffer_user = NULL;
55923+ }
55924+ learn_buffer_len = 0;
55925+ learn_buffer_user_len = 0;
55926+ gr_learn_attached = 0;
bc901d79 55927+ mutex_unlock(&gr_learn_user_mutex);
58c5fc13
MT
55928+ }
55929+
55930+ return 0;
55931+}
55932+
55933+const struct file_operations grsec_fops = {
55934+ .read = read_learn,
55935+ .write = write_grsec_handler,
55936+ .open = open_learn,
55937+ .release = close_learn,
55938+ .poll = poll_learn,
55939+};
fe2de317
MT
55940diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
55941new file mode 100644
55942index 0000000..39645c9
55943--- /dev/null
55944+++ b/grsecurity/gracl_res.c
df50ba0c 55945@@ -0,0 +1,68 @@
58c5fc13
MT
55946+#include <linux/kernel.h>
55947+#include <linux/sched.h>
55948+#include <linux/gracl.h>
55949+#include <linux/grinternal.h>
55950+
55951+static const char *restab_log[] = {
55952+ [RLIMIT_CPU] = "RLIMIT_CPU",
55953+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
55954+ [RLIMIT_DATA] = "RLIMIT_DATA",
55955+ [RLIMIT_STACK] = "RLIMIT_STACK",
55956+ [RLIMIT_CORE] = "RLIMIT_CORE",
55957+ [RLIMIT_RSS] = "RLIMIT_RSS",
55958+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
55959+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
55960+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
55961+ [RLIMIT_AS] = "RLIMIT_AS",
55962+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
55963+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
55964+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
55965+ [RLIMIT_NICE] = "RLIMIT_NICE",
55966+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
55967+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
55968+ [GR_CRASH_RES] = "RLIMIT_CRASH"
55969+};
55970+
55971+void
55972+gr_log_resource(const struct task_struct *task,
55973+ const int res, const unsigned long wanted, const int gt)
55974+{
ae4e228f 55975+ const struct cred *cred;
df50ba0c 55976+ unsigned long rlim;
58c5fc13
MT
55977+
55978+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
55979+ return;
55980+
55981+ // not yet supported resource
df50ba0c
MT
55982+ if (unlikely(!restab_log[res]))
55983+ return;
55984+
55985+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
55986+ rlim = task_rlimit_max(task, res);
55987+ else
55988+ rlim = task_rlimit(task, res);
55989+
55990+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
58c5fc13
MT
55991+ return;
55992+
ae4e228f
MT
55993+ rcu_read_lock();
55994+ cred = __task_cred(task);
55995+
55996+ if (res == RLIMIT_NPROC &&
55997+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
55998+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
55999+ goto out_rcu_unlock;
56000+ else if (res == RLIMIT_MEMLOCK &&
56001+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
56002+ goto out_rcu_unlock;
56003+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
56004+ goto out_rcu_unlock;
56005+ rcu_read_unlock();
56006+
df50ba0c 56007+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
58c5fc13
MT
56008+
56009+ return;
ae4e228f
MT
56010+out_rcu_unlock:
56011+ rcu_read_unlock();
56012+ return;
58c5fc13 56013+}
fe2de317
MT
56014diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
56015new file mode 100644
56016index 0000000..5556be3
56017--- /dev/null
56018+++ b/grsecurity/gracl_segv.c
66a7e928 56019@@ -0,0 +1,299 @@
58c5fc13
MT
56020+#include <linux/kernel.h>
56021+#include <linux/mm.h>
56022+#include <asm/uaccess.h>
56023+#include <asm/errno.h>
56024+#include <asm/mman.h>
56025+#include <net/sock.h>
56026+#include <linux/file.h>
56027+#include <linux/fs.h>
56028+#include <linux/net.h>
56029+#include <linux/in.h>
58c5fc13
MT
56030+#include <linux/slab.h>
56031+#include <linux/types.h>
56032+#include <linux/sched.h>
56033+#include <linux/timer.h>
56034+#include <linux/gracl.h>
56035+#include <linux/grsecurity.h>
56036+#include <linux/grinternal.h>
56037+
56038+static struct crash_uid *uid_set;
56039+static unsigned short uid_used;
56040+static DEFINE_SPINLOCK(gr_uid_lock);
56041+extern rwlock_t gr_inode_lock;
56042+extern struct acl_subject_label *
56043+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
56044+ struct acl_role_label *role);
16454cff
MT
56045+
56046+#ifdef CONFIG_BTRFS_FS
56047+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56048+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56049+#endif
56050+
56051+static inline dev_t __get_dev(const struct dentry *dentry)
56052+{
56053+#ifdef CONFIG_BTRFS_FS
56054+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56055+ return get_btrfs_dev_from_inode(dentry->d_inode);
56056+ else
56057+#endif
56058+ return dentry->d_inode->i_sb->s_dev;
56059+}
56060+
58c5fc13
MT
56061+int
56062+gr_init_uidset(void)
56063+{
56064+ uid_set =
56065+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
56066+ uid_used = 0;
56067+
56068+ return uid_set ? 1 : 0;
56069+}
56070+
56071+void
56072+gr_free_uidset(void)
56073+{
56074+ if (uid_set)
56075+ kfree(uid_set);
56076+
56077+ return;
56078+}
56079+
56080+int
56081+gr_find_uid(const uid_t uid)
56082+{
56083+ struct crash_uid *tmp = uid_set;
56084+ uid_t buid;
56085+ int low = 0, high = uid_used - 1, mid;
56086+
56087+ while (high >= low) {
56088+ mid = (low + high) >> 1;
56089+ buid = tmp[mid].uid;
56090+ if (buid == uid)
56091+ return mid;
56092+ if (buid > uid)
56093+ high = mid - 1;
56094+ if (buid < uid)
56095+ low = mid + 1;
56096+ }
56097+
56098+ return -1;
56099+}
56100+
56101+static __inline__ void
56102+gr_insertsort(void)
56103+{
56104+ unsigned short i, j;
56105+ struct crash_uid index;
56106+
56107+ for (i = 1; i < uid_used; i++) {
56108+ index = uid_set[i];
56109+ j = i;
56110+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
56111+ uid_set[j] = uid_set[j - 1];
56112+ j--;
56113+ }
56114+ uid_set[j] = index;
56115+ }
56116+
56117+ return;
56118+}
56119+
56120+static __inline__ void
56121+gr_insert_uid(const uid_t uid, const unsigned long expires)
56122+{
56123+ int loc;
56124+
56125+ if (uid_used == GR_UIDTABLE_MAX)
56126+ return;
56127+
56128+ loc = gr_find_uid(uid);
56129+
56130+ if (loc >= 0) {
56131+ uid_set[loc].expires = expires;
56132+ return;
56133+ }
56134+
56135+ uid_set[uid_used].uid = uid;
56136+ uid_set[uid_used].expires = expires;
56137+ uid_used++;
56138+
56139+ gr_insertsort();
56140+
56141+ return;
56142+}
56143+
56144+void
56145+gr_remove_uid(const unsigned short loc)
56146+{
56147+ unsigned short i;
56148+
56149+ for (i = loc + 1; i < uid_used; i++)
56150+ uid_set[i - 1] = uid_set[i];
56151+
56152+ uid_used--;
56153+
56154+ return;
56155+}
56156+
56157+int
56158+gr_check_crash_uid(const uid_t uid)
56159+{
56160+ int loc;
56161+ int ret = 0;
56162+
56163+ if (unlikely(!gr_acl_is_enabled()))
56164+ return 0;
56165+
56166+ spin_lock(&gr_uid_lock);
56167+ loc = gr_find_uid(uid);
56168+
56169+ if (loc < 0)
56170+ goto out_unlock;
56171+
56172+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
56173+ gr_remove_uid(loc);
56174+ else
56175+ ret = 1;
56176+
56177+out_unlock:
56178+ spin_unlock(&gr_uid_lock);
56179+ return ret;
56180+}
56181+
56182+static __inline__ int
56183+proc_is_setxid(const struct cred *cred)
56184+{
56185+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
56186+ cred->uid != cred->fsuid)
56187+ return 1;
56188+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
56189+ cred->gid != cred->fsgid)
56190+ return 1;
56191+
56192+ return 0;
56193+}
58c5fc13 56194+
71d190be 56195+extern int gr_fake_force_sig(int sig, struct task_struct *t);
58c5fc13
MT
56196+
56197+void
56198+gr_handle_crash(struct task_struct *task, const int sig)
56199+{
56200+ struct acl_subject_label *curr;
58c5fc13 56201+ struct task_struct *tsk, *tsk2;
ae4e228f 56202+ const struct cred *cred;
58c5fc13
MT
56203+ const struct cred *cred2;
56204+
56205+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
56206+ return;
56207+
56208+ if (unlikely(!gr_acl_is_enabled()))
56209+ return;
56210+
56211+ curr = task->acl;
56212+
56213+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
56214+ return;
56215+
56216+ if (time_before_eq(curr->expires, get_seconds())) {
56217+ curr->expires = 0;
56218+ curr->crashes = 0;
56219+ }
56220+
56221+ curr->crashes++;
56222+
56223+ if (!curr->expires)
56224+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
56225+
56226+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56227+ time_after(curr->expires, get_seconds())) {
ae4e228f
MT
56228+ rcu_read_lock();
56229+ cred = __task_cred(task);
58c5fc13
MT
56230+ if (cred->uid && proc_is_setxid(cred)) {
56231+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56232+ spin_lock(&gr_uid_lock);
56233+ gr_insert_uid(cred->uid, curr->expires);
56234+ spin_unlock(&gr_uid_lock);
56235+ curr->expires = 0;
56236+ curr->crashes = 0;
56237+ read_lock(&tasklist_lock);
56238+ do_each_thread(tsk2, tsk) {
56239+ cred2 = __task_cred(tsk);
56240+ if (tsk != task && cred2->uid == cred->uid)
56241+ gr_fake_force_sig(SIGKILL, tsk);
56242+ } while_each_thread(tsk2, tsk);
56243+ read_unlock(&tasklist_lock);
56244+ } else {
56245+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
56246+ read_lock(&tasklist_lock);
6e9df6a3 56247+ read_lock(&grsec_exec_file_lock);
58c5fc13
MT
56248+ do_each_thread(tsk2, tsk) {
56249+ if (likely(tsk != task)) {
6e9df6a3
MT
56250+ // if this thread has the same subject as the one that triggered
56251+ // RES_CRASH and it's the same binary, kill it
56252+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
58c5fc13
MT
56253+ gr_fake_force_sig(SIGKILL, tsk);
56254+ }
56255+ } while_each_thread(tsk2, tsk);
6e9df6a3 56256+ read_unlock(&grsec_exec_file_lock);
58c5fc13
MT
56257+ read_unlock(&tasklist_lock);
56258+ }
ae4e228f 56259+ rcu_read_unlock();
58c5fc13
MT
56260+ }
56261+
56262+ return;
56263+}
56264+
56265+int
56266+gr_check_crash_exec(const struct file *filp)
56267+{
56268+ struct acl_subject_label *curr;
56269+
56270+ if (unlikely(!gr_acl_is_enabled()))
56271+ return 0;
56272+
56273+ read_lock(&gr_inode_lock);
56274+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
16454cff 56275+ __get_dev(filp->f_path.dentry),
58c5fc13
MT
56276+ current->role);
56277+ read_unlock(&gr_inode_lock);
56278+
56279+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
56280+ (!curr->crashes && !curr->expires))
56281+ return 0;
56282+
56283+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
56284+ time_after(curr->expires, get_seconds()))
56285+ return 1;
56286+ else if (time_before_eq(curr->expires, get_seconds())) {
56287+ curr->crashes = 0;
56288+ curr->expires = 0;
56289+ }
56290+
56291+ return 0;
56292+}
56293+
56294+void
56295+gr_handle_alertkill(struct task_struct *task)
56296+{
56297+ struct acl_subject_label *curracl;
56298+ __u32 curr_ip;
56299+ struct task_struct *p, *p2;
56300+
56301+ if (unlikely(!gr_acl_is_enabled()))
56302+ return;
56303+
56304+ curracl = task->acl;
56305+ curr_ip = task->signal->curr_ip;
56306+
56307+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
56308+ read_lock(&tasklist_lock);
56309+ do_each_thread(p2, p) {
56310+ if (p->signal->curr_ip == curr_ip)
56311+ gr_fake_force_sig(SIGKILL, p);
56312+ } while_each_thread(p2, p);
56313+ read_unlock(&tasklist_lock);
56314+ } else if (curracl->mode & GR_KILLPROC)
56315+ gr_fake_force_sig(SIGKILL, task);
56316+
56317+ return;
56318+}
fe2de317
MT
56319diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
56320new file mode 100644
56321index 0000000..9d83a69
56322--- /dev/null
56323+++ b/grsecurity/gracl_shm.c
df50ba0c 56324@@ -0,0 +1,40 @@
58c5fc13
MT
56325+#include <linux/kernel.h>
56326+#include <linux/mm.h>
56327+#include <linux/sched.h>
56328+#include <linux/file.h>
56329+#include <linux/ipc.h>
56330+#include <linux/gracl.h>
56331+#include <linux/grsecurity.h>
56332+#include <linux/grinternal.h>
56333+
56334+int
56335+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56336+ const time_t shm_createtime, const uid_t cuid, const int shmid)
56337+{
56338+ struct task_struct *task;
56339+
56340+ if (!gr_acl_is_enabled())
56341+ return 1;
56342+
df50ba0c 56343+ rcu_read_lock();
58c5fc13
MT
56344+ read_lock(&tasklist_lock);
56345+
56346+ task = find_task_by_vpid(shm_cprid);
56347+
56348+ if (unlikely(!task))
56349+ task = find_task_by_vpid(shm_lapid);
56350+
56351+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
56352+ (task->pid == shm_lapid)) &&
56353+ (task->acl->mode & GR_PROTSHM) &&
56354+ (task->acl != current->acl))) {
56355+ read_unlock(&tasklist_lock);
df50ba0c 56356+ rcu_read_unlock();
58c5fc13
MT
56357+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
56358+ return 0;
56359+ }
56360+ read_unlock(&tasklist_lock);
df50ba0c 56361+ rcu_read_unlock();
58c5fc13
MT
56362+
56363+ return 1;
56364+}
fe2de317
MT
56365diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
56366new file mode 100644
56367index 0000000..bc0be01
56368--- /dev/null
56369+++ b/grsecurity/grsec_chdir.c
58c5fc13
MT
56370@@ -0,0 +1,19 @@
56371+#include <linux/kernel.h>
56372+#include <linux/sched.h>
56373+#include <linux/fs.h>
56374+#include <linux/file.h>
56375+#include <linux/grsecurity.h>
56376+#include <linux/grinternal.h>
56377+
56378+void
56379+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
56380+{
56381+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
56382+ if ((grsec_enable_chdir && grsec_enable_group &&
56383+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
56384+ !grsec_enable_group)) {
56385+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
56386+ }
56387+#endif
56388+ return;
56389+}
fe2de317
MT
56390diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
56391new file mode 100644
5e856224 56392index 0000000..9807ee2
fe2de317
MT
56393--- /dev/null
56394+++ b/grsecurity/grsec_chroot.c
5e856224 56395@@ -0,0 +1,368 @@
58c5fc13
MT
56396+#include <linux/kernel.h>
56397+#include <linux/module.h>
56398+#include <linux/sched.h>
56399+#include <linux/file.h>
56400+#include <linux/fs.h>
56401+#include <linux/mount.h>
56402+#include <linux/types.h>
5e856224 56403+#include "../fs/mount.h"
58c5fc13
MT
56404+#include <linux/grsecurity.h>
56405+#include <linux/grinternal.h>
56406+
df50ba0c
MT
56407+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
56408+{
56409+#ifdef CONFIG_GRKERNSEC
56410+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
5e856224 56411+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
df50ba0c
MT
56412+ task->gr_is_chrooted = 1;
56413+ else
56414+ task->gr_is_chrooted = 0;
56415+
56416+ task->gr_chroot_dentry = path->dentry;
56417+#endif
56418+ return;
56419+}
56420+
56421+void gr_clear_chroot_entries(struct task_struct *task)
56422+{
56423+#ifdef CONFIG_GRKERNSEC
56424+ task->gr_is_chrooted = 0;
56425+ task->gr_chroot_dentry = NULL;
56426+#endif
56427+ return;
56428+}
56429+
58c5fc13 56430+int
15a11c5b 56431+gr_handle_chroot_unix(const pid_t pid)
58c5fc13
MT
56432+{
56433+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
6892158b 56434+ struct task_struct *p;
58c5fc13
MT
56435+
56436+ if (unlikely(!grsec_enable_chroot_unix))
56437+ return 1;
56438+
56439+ if (likely(!proc_is_chrooted(current)))
56440+ return 1;
56441+
df50ba0c 56442+ rcu_read_lock();
58c5fc13 56443+ read_lock(&tasklist_lock);
15a11c5b 56444+ p = find_task_by_vpid_unrestricted(pid);
71d190be 56445+ if (unlikely(p && !have_same_root(current, p))) {
6892158b
MT
56446+ read_unlock(&tasklist_lock);
56447+ rcu_read_unlock();
56448+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
56449+ return 0;
58c5fc13
MT
56450+ }
56451+ read_unlock(&tasklist_lock);
df50ba0c 56452+ rcu_read_unlock();
58c5fc13
MT
56453+#endif
56454+ return 1;
56455+}
56456+
56457+int
56458+gr_handle_chroot_nice(void)
56459+{
56460+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56461+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
56462+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
56463+ return -EPERM;
56464+ }
56465+#endif
56466+ return 0;
56467+}
56468+
56469+int
56470+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
56471+{
56472+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
56473+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
56474+ && proc_is_chrooted(current)) {
56475+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
56476+ return -EACCES;
56477+ }
56478+#endif
56479+ return 0;
56480+}
56481+
56482+int
56483+gr_handle_chroot_rawio(const struct inode *inode)
56484+{
56485+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56486+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56487+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
56488+ return 1;
56489+#endif
56490+ return 0;
56491+}
56492+
56493+int
57199397
MT
56494+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
56495+{
56496+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56497+ struct task_struct *p;
56498+ int ret = 0;
56499+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
56500+ return ret;
56501+
56502+ read_lock(&tasklist_lock);
56503+ do_each_pid_task(pid, type, p) {
56504+ if (!have_same_root(current, p)) {
56505+ ret = 1;
56506+ goto out;
56507+ }
56508+ } while_each_pid_task(pid, type, p);
56509+out:
56510+ read_unlock(&tasklist_lock);
56511+ return ret;
56512+#endif
56513+ return 0;
56514+}
56515+
56516+int
58c5fc13
MT
56517+gr_pid_is_chrooted(struct task_struct *p)
56518+{
56519+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56520+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
56521+ return 0;
56522+
58c5fc13
MT
56523+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
56524+ !have_same_root(current, p)) {
58c5fc13
MT
56525+ return 1;
56526+ }
58c5fc13
MT
56527+#endif
56528+ return 0;
56529+}
56530+
56531+EXPORT_SYMBOL(gr_pid_is_chrooted);
56532+
56533+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
56534+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
56535+{
16454cff
MT
56536+ struct path path, currentroot;
56537+ int ret = 0;
58c5fc13 56538+
16454cff
MT
56539+ path.dentry = (struct dentry *)u_dentry;
56540+ path.mnt = (struct vfsmount *)u_mnt;
6892158b 56541+ get_fs_root(current->fs, &currentroot);
16454cff
MT
56542+ if (path_is_under(&path, &currentroot))
56543+ ret = 1;
6892158b 56544+ path_put(&currentroot);
58c5fc13 56545+
58c5fc13
MT
56546+ return ret;
56547+}
56548+#endif
56549+
56550+int
56551+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
56552+{
56553+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
56554+ if (!grsec_enable_chroot_fchdir)
56555+ return 1;
56556+
56557+ if (!proc_is_chrooted(current))
56558+ return 1;
56559+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
56560+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
56561+ return 0;
56562+ }
56563+#endif
56564+ return 1;
56565+}
56566+
56567+int
56568+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56569+ const time_t shm_createtime)
56570+{
56571+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
15a11c5b 56572+ struct task_struct *p;
58c5fc13
MT
56573+ time_t starttime;
56574+
56575+ if (unlikely(!grsec_enable_chroot_shmat))
56576+ return 1;
56577+
56578+ if (likely(!proc_is_chrooted(current)))
56579+ return 1;
56580+
df50ba0c 56581+ rcu_read_lock();
58c5fc13
MT
56582+ read_lock(&tasklist_lock);
56583+
15a11c5b 56584+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
58c5fc13 56585+ starttime = p->start_time.tv_sec;
15a11c5b
MT
56586+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
56587+ if (have_same_root(current, p)) {
56588+ goto allow;
56589+ } else {
58c5fc13 56590+ read_unlock(&tasklist_lock);
df50ba0c 56591+ rcu_read_unlock();
58c5fc13
MT
56592+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56593+ return 0;
56594+ }
58c5fc13 56595+ }
15a11c5b 56596+ /* creator exited, pid reuse, fall through to next check */
58c5fc13 56597+ }
15a11c5b
MT
56598+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
56599+ if (unlikely(!have_same_root(current, p))) {
56600+ read_unlock(&tasklist_lock);
56601+ rcu_read_unlock();
56602+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
56603+ return 0;
56604+ }
56605+ }
56606+
56607+allow:
58c5fc13 56608+ read_unlock(&tasklist_lock);
df50ba0c 56609+ rcu_read_unlock();
58c5fc13
MT
56610+#endif
56611+ return 1;
56612+}
56613+
56614+void
56615+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
56616+{
56617+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
56618+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
56619+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
56620+#endif
56621+ return;
56622+}
56623+
56624+int
56625+gr_handle_chroot_mknod(const struct dentry *dentry,
56626+ const struct vfsmount *mnt, const int mode)
56627+{
56628+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
56629+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
56630+ proc_is_chrooted(current)) {
56631+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
56632+ return -EPERM;
56633+ }
56634+#endif
56635+ return 0;
56636+}
56637+
56638+int
56639+gr_handle_chroot_mount(const struct dentry *dentry,
56640+ const struct vfsmount *mnt, const char *dev_name)
56641+{
56642+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
56643+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
15a11c5b 56644+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
58c5fc13
MT
56645+ return -EPERM;
56646+ }
56647+#endif
56648+ return 0;
56649+}
56650+
56651+int
56652+gr_handle_chroot_pivot(void)
56653+{
56654+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
56655+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
56656+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
56657+ return -EPERM;
56658+ }
56659+#endif
56660+ return 0;
56661+}
56662+
56663+int
56664+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
56665+{
56666+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
56667+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
56668+ !gr_is_outside_chroot(dentry, mnt)) {
56669+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
56670+ return -EPERM;
56671+ }
56672+#endif
56673+ return 0;
56674+}
56675+
15a11c5b
MT
56676+extern const char *captab_log[];
56677+extern int captab_log_entries;
56678+
58c5fc13 56679+int
5e856224 56680+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
58c5fc13
MT
56681+{
56682+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
5e856224 56683+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
58c5fc13 56684+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
15a11c5b 56685+ if (cap_raised(chroot_caps, cap)) {
5e856224
MT
56686+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
56687+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
15a11c5b
MT
56688+ }
56689+ return 0;
56690+ }
56691+ }
56692+#endif
56693+ return 1;
56694+}
58c5fc13 56695+
15a11c5b 56696+int
5e856224 56697+gr_chroot_is_capable(const int cap)
15a11c5b
MT
56698+{
56699+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
5e856224
MT
56700+ return gr_task_chroot_is_capable(current, current_cred(), cap);
56701+#endif
56702+ return 1;
56703+}
56704+
56705+int
56706+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
56707+{
56708+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56709+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
15a11c5b
MT
56710+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
56711+ if (cap_raised(chroot_caps, cap)) {
56712+ return 0;
56713+ }
58c5fc13
MT
56714+ }
56715+#endif
15a11c5b 56716+ return 1;
58c5fc13
MT
56717+}
56718+
56719+int
5e856224
MT
56720+gr_chroot_is_capable_nolog(const int cap)
56721+{
56722+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56723+ return gr_task_chroot_is_capable_nolog(current, cap);
56724+#endif
56725+ return 1;
56726+}
56727+
56728+int
58c5fc13
MT
56729+gr_handle_chroot_sysctl(const int op)
56730+{
56731+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
ae4e228f
MT
56732+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
56733+ proc_is_chrooted(current))
58c5fc13
MT
56734+ return -EACCES;
56735+#endif
56736+ return 0;
56737+}
56738+
56739+void
56740+gr_handle_chroot_chdir(struct path *path)
56741+{
56742+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
56743+ if (grsec_enable_chroot_chdir)
56744+ set_fs_pwd(current->fs, path);
56745+#endif
56746+ return;
56747+}
56748+
56749+int
56750+gr_handle_chroot_chmod(const struct dentry *dentry,
56751+ const struct vfsmount *mnt, const int mode)
56752+{
56753+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
bc901d79
MT
56754+ /* allow chmod +s on directories, but not files */
56755+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
58c5fc13
MT
56756+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
56757+ proc_is_chrooted(current)) {
56758+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
56759+ return -EPERM;
56760+ }
56761+#endif
56762+ return 0;
56763+}
fe2de317
MT
56764diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
56765new file mode 100644
4c928ab7 56766index 0000000..213ad8b
fe2de317
MT
56767--- /dev/null
56768+++ b/grsecurity/grsec_disabled.c
4c928ab7 56769@@ -0,0 +1,437 @@
58c5fc13
MT
56770+#include <linux/kernel.h>
56771+#include <linux/module.h>
56772+#include <linux/sched.h>
56773+#include <linux/file.h>
56774+#include <linux/fs.h>
56775+#include <linux/kdev_t.h>
56776+#include <linux/net.h>
56777+#include <linux/in.h>
56778+#include <linux/ip.h>
56779+#include <linux/skbuff.h>
56780+#include <linux/sysctl.h>
56781+
56782+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56783+void
56784+pax_set_initial_flags(struct linux_binprm *bprm)
56785+{
56786+ return;
56787+}
56788+#endif
56789+
56790+#ifdef CONFIG_SYSCTL
56791+__u32
56792+gr_handle_sysctl(const struct ctl_table * table, const int op)
56793+{
56794+ return 0;
56795+}
56796+#endif
56797+
56798+#ifdef CONFIG_TASKSTATS
56799+int gr_is_taskstats_denied(int pid)
56800+{
56801+ return 0;
56802+}
56803+#endif
56804+
56805+int
56806+gr_acl_is_enabled(void)
56807+{
56808+ return 0;
56809+}
56810+
6e9df6a3
MT
56811+void
56812+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
56813+{
56814+ return;
56815+}
56816+
58c5fc13
MT
56817+int
56818+gr_handle_rawio(const struct inode *inode)
56819+{
56820+ return 0;
56821+}
56822+
56823+void
56824+gr_acl_handle_psacct(struct task_struct *task, const long code)
56825+{
56826+ return;
56827+}
56828+
56829+int
56830+gr_handle_ptrace(struct task_struct *task, const long request)
56831+{
56832+ return 0;
56833+}
56834+
56835+int
56836+gr_handle_proc_ptrace(struct task_struct *task)
56837+{
56838+ return 0;
56839+}
56840+
56841+void
56842+gr_learn_resource(const struct task_struct *task,
56843+ const int res, const unsigned long wanted, const int gt)
56844+{
56845+ return;
56846+}
56847+
56848+int
56849+gr_set_acls(const int type)
56850+{
56851+ return 0;
56852+}
56853+
56854+int
56855+gr_check_hidden_task(const struct task_struct *tsk)
56856+{
56857+ return 0;
56858+}
56859+
56860+int
56861+gr_check_protected_task(const struct task_struct *task)
56862+{
56863+ return 0;
56864+}
56865+
57199397
MT
56866+int
56867+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
56868+{
56869+ return 0;
56870+}
56871+
58c5fc13
MT
56872+void
56873+gr_copy_label(struct task_struct *tsk)
56874+{
56875+ return;
56876+}
56877+
56878+void
56879+gr_set_pax_flags(struct task_struct *task)
56880+{
56881+ return;
56882+}
56883+
56884+int
56885+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
56886+ const int unsafe_share)
56887+{
56888+ return 0;
56889+}
56890+
56891+void
56892+gr_handle_delete(const ino_t ino, const dev_t dev)
56893+{
56894+ return;
56895+}
56896+
56897+void
56898+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
56899+{
56900+ return;
56901+}
56902+
56903+void
56904+gr_handle_crash(struct task_struct *task, const int sig)
56905+{
56906+ return;
56907+}
56908+
56909+int
56910+gr_check_crash_exec(const struct file *filp)
56911+{
56912+ return 0;
56913+}
56914+
56915+int
56916+gr_check_crash_uid(const uid_t uid)
56917+{
56918+ return 0;
56919+}
56920+
56921+void
56922+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56923+ struct dentry *old_dentry,
56924+ struct dentry *new_dentry,
56925+ struct vfsmount *mnt, const __u8 replace)
56926+{
56927+ return;
56928+}
56929+
56930+int
56931+gr_search_socket(const int family, const int type, const int protocol)
56932+{
56933+ return 1;
56934+}
56935+
56936+int
56937+gr_search_connectbind(const int mode, const struct socket *sock,
56938+ const struct sockaddr_in *addr)
56939+{
56940+ return 0;
56941+}
56942+
58c5fc13
MT
56943+void
56944+gr_handle_alertkill(struct task_struct *task)
56945+{
56946+ return;
56947+}
56948+
56949+__u32
56950+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
56951+{
56952+ return 1;
56953+}
56954+
56955+__u32
56956+gr_acl_handle_hidden_file(const struct dentry * dentry,
56957+ const struct vfsmount * mnt)
56958+{
56959+ return 1;
56960+}
56961+
56962+__u32
56963+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
6e9df6a3 56964+ int acc_mode)
58c5fc13
MT
56965+{
56966+ return 1;
56967+}
56968+
56969+__u32
56970+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
56971+{
56972+ return 1;
56973+}
56974+
56975+__u32
56976+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
56977+{
56978+ return 1;
56979+}
56980+
56981+int
56982+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
56983+ unsigned int *vm_flags)
56984+{
56985+ return 1;
56986+}
56987+
56988+__u32
56989+gr_acl_handle_truncate(const struct dentry * dentry,
56990+ const struct vfsmount * mnt)
56991+{
56992+ return 1;
56993+}
56994+
56995+__u32
56996+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
56997+{
56998+ return 1;
56999+}
57000+
57001+__u32
57002+gr_acl_handle_access(const struct dentry * dentry,
57003+ const struct vfsmount * mnt, const int fmode)
57004+{
57005+ return 1;
57006+}
57007+
57008+__u32
58c5fc13 57009+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
4c928ab7 57010+ umode_t *mode)
58c5fc13
MT
57011+{
57012+ return 1;
57013+}
57014+
57015+__u32
57016+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
57017+{
57018+ return 1;
57019+}
57020+
bc901d79
MT
57021+__u32
57022+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
57023+{
57024+ return 1;
57025+}
57026+
58c5fc13
MT
57027+void
57028+grsecurity_init(void)
57029+{
57030+ return;
57031+}
57032+
4c928ab7
MT
57033+umode_t gr_acl_umask(void)
57034+{
57035+ return 0;
57036+}
57037+
58c5fc13
MT
57038+__u32
57039+gr_acl_handle_mknod(const struct dentry * new_dentry,
57040+ const struct dentry * parent_dentry,
57041+ const struct vfsmount * parent_mnt,
57042+ const int mode)
57043+{
57044+ return 1;
57045+}
57046+
57047+__u32
57048+gr_acl_handle_mkdir(const struct dentry * new_dentry,
57049+ const struct dentry * parent_dentry,
57050+ const struct vfsmount * parent_mnt)
57051+{
57052+ return 1;
57053+}
57054+
57055+__u32
57056+gr_acl_handle_symlink(const struct dentry * new_dentry,
57057+ const struct dentry * parent_dentry,
57058+ const struct vfsmount * parent_mnt, const char *from)
57059+{
57060+ return 1;
57061+}
57062+
57063+__u32
57064+gr_acl_handle_link(const struct dentry * new_dentry,
57065+ const struct dentry * parent_dentry,
57066+ const struct vfsmount * parent_mnt,
57067+ const struct dentry * old_dentry,
57068+ const struct vfsmount * old_mnt, const char *to)
57069+{
57070+ return 1;
57071+}
57072+
57073+int
57074+gr_acl_handle_rename(const struct dentry *new_dentry,
57075+ const struct dentry *parent_dentry,
57076+ const struct vfsmount *parent_mnt,
57077+ const struct dentry *old_dentry,
57078+ const struct inode *old_parent_inode,
57079+ const struct vfsmount *old_mnt, const char *newname)
57080+{
57081+ return 0;
57082+}
57083+
57084+int
57085+gr_acl_handle_filldir(const struct file *file, const char *name,
57086+ const int namelen, const ino_t ino)
57087+{
57088+ return 1;
57089+}
57090+
57091+int
57092+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
57093+ const time_t shm_createtime, const uid_t cuid, const int shmid)
57094+{
57095+ return 1;
57096+}
57097+
57098+int
57099+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
57100+{
57101+ return 0;
57102+}
57103+
57104+int
57105+gr_search_accept(const struct socket *sock)
57106+{
57107+ return 0;
57108+}
57109+
57110+int
57111+gr_search_listen(const struct socket *sock)
57112+{
57113+ return 0;
57114+}
57115+
57116+int
57117+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
57118+{
57119+ return 0;
57120+}
57121+
57122+__u32
57123+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
57124+{
57125+ return 1;
57126+}
57127+
57128+__u32
57129+gr_acl_handle_creat(const struct dentry * dentry,
57130+ const struct dentry * p_dentry,
6e9df6a3 57131+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
58c5fc13
MT
57132+ const int imode)
57133+{
57134+ return 1;
57135+}
57136+
57137+void
57138+gr_acl_handle_exit(void)
57139+{
57140+ return;
57141+}
57142+
57143+int
57144+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
57145+{
57146+ return 1;
57147+}
57148+
57149+void
57150+gr_set_role_label(const uid_t uid, const gid_t gid)
57151+{
57152+ return;
57153+}
57154+
57155+int
57156+gr_acl_handle_procpidmem(const struct task_struct *task)
57157+{
57158+ return 0;
57159+}
57160+
57161+int
57162+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
57163+{
57164+ return 0;
57165+}
57166+
57167+int
57168+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
57169+{
57170+ return 0;
57171+}
57172+
57173+void
57174+gr_set_kernel_label(struct task_struct *task)
57175+{
57176+ return;
57177+}
57178+
57179+int
57180+gr_check_user_change(int real, int effective, int fs)
57181+{
57182+ return 0;
57183+}
57184+
57185+int
57186+gr_check_group_change(int real, int effective, int fs)
57187+{
57188+ return 0;
57189+}
57190+
bc901d79
MT
57191+int gr_acl_enable_at_secure(void)
57192+{
57193+ return 0;
57194+}
57195+
16454cff
MT
57196+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
57197+{
57198+ return dentry->d_inode->i_sb->s_dev;
57199+}
57200+
58c5fc13
MT
57201+EXPORT_SYMBOL(gr_learn_resource);
57202+EXPORT_SYMBOL(gr_set_kernel_label);
57203+#ifdef CONFIG_SECURITY
57204+EXPORT_SYMBOL(gr_check_user_change);
57205+EXPORT_SYMBOL(gr_check_group_change);
57206+#endif
fe2de317
MT
57207diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
57208new file mode 100644
5e856224 57209index 0000000..abfa971
fe2de317
MT
57210--- /dev/null
57211+++ b/grsecurity/grsec_exec.c
5e856224 57212@@ -0,0 +1,174 @@
58c5fc13
MT
57213+#include <linux/kernel.h>
57214+#include <linux/sched.h>
57215+#include <linux/file.h>
57216+#include <linux/binfmts.h>
58c5fc13
MT
57217+#include <linux/fs.h>
57218+#include <linux/types.h>
57219+#include <linux/grdefs.h>
15a11c5b 57220+#include <linux/grsecurity.h>
58c5fc13
MT
57221+#include <linux/grinternal.h>
57222+#include <linux/capability.h>
15a11c5b 57223+#include <linux/module.h>
58c5fc13
MT
57224+
57225+#include <asm/uaccess.h>
57226+
57227+#ifdef CONFIG_GRKERNSEC_EXECLOG
57228+static char gr_exec_arg_buf[132];
bc901d79 57229+static DEFINE_MUTEX(gr_exec_arg_mutex);
58c5fc13
MT
57230+#endif
57231+
15a11c5b 57232+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
58c5fc13
MT
57233+
57234+void
15a11c5b 57235+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
58c5fc13
MT
57236+{
57237+#ifdef CONFIG_GRKERNSEC_EXECLOG
57238+ char *grarg = gr_exec_arg_buf;
57239+ unsigned int i, x, execlen = 0;
57240+ char c;
57241+
57242+ if (!((grsec_enable_execlog && grsec_enable_group &&
57243+ in_group_p(grsec_audit_gid))
57244+ || (grsec_enable_execlog && !grsec_enable_group)))
57245+ return;
57246+
bc901d79 57247+ mutex_lock(&gr_exec_arg_mutex);
58c5fc13
MT
57248+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
57249+
58c5fc13
MT
57250+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
57251+ const char __user *p;
57252+ unsigned int len;
57253+
15a11c5b
MT
57254+ p = get_user_arg_ptr(argv, i);
57255+ if (IS_ERR(p))
58c5fc13 57256+ goto log;
15a11c5b 57257+
58c5fc13
MT
57258+ len = strnlen_user(p, 128 - execlen);
57259+ if (len > 128 - execlen)
57260+ len = 128 - execlen;
57261+ else if (len > 0)
57262+ len--;
57263+ if (copy_from_user(grarg + execlen, p, len))
57264+ goto log;
57265+
57266+ /* rewrite unprintable characters */
57267+ for (x = 0; x < len; x++) {
57268+ c = *(grarg + execlen + x);
57269+ if (c < 32 || c > 126)
57270+ *(grarg + execlen + x) = ' ';
57271+ }
57272+
57273+ execlen += len;
57274+ *(grarg + execlen) = ' ';
57275+ *(grarg + execlen + 1) = '\0';
57276+ execlen++;
57277+ }
57278+
57279+ log:
57280+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
57281+ bprm->file->f_path.mnt, grarg);
bc901d79 57282+ mutex_unlock(&gr_exec_arg_mutex);
58c5fc13
MT
57283+#endif
57284+ return;
57285+}
bc901d79 57286+
15a11c5b
MT
57287+#ifdef CONFIG_GRKERNSEC
57288+extern int gr_acl_is_capable(const int cap);
57289+extern int gr_acl_is_capable_nolog(const int cap);
5e856224
MT
57290+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57291+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
15a11c5b
MT
57292+extern int gr_chroot_is_capable(const int cap);
57293+extern int gr_chroot_is_capable_nolog(const int cap);
5e856224
MT
57294+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
57295+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
15a11c5b 57296+#endif
bc901d79 57297+
15a11c5b
MT
57298+const char *captab_log[] = {
57299+ "CAP_CHOWN",
57300+ "CAP_DAC_OVERRIDE",
57301+ "CAP_DAC_READ_SEARCH",
57302+ "CAP_FOWNER",
57303+ "CAP_FSETID",
57304+ "CAP_KILL",
57305+ "CAP_SETGID",
57306+ "CAP_SETUID",
57307+ "CAP_SETPCAP",
57308+ "CAP_LINUX_IMMUTABLE",
57309+ "CAP_NET_BIND_SERVICE",
57310+ "CAP_NET_BROADCAST",
57311+ "CAP_NET_ADMIN",
57312+ "CAP_NET_RAW",
57313+ "CAP_IPC_LOCK",
57314+ "CAP_IPC_OWNER",
57315+ "CAP_SYS_MODULE",
57316+ "CAP_SYS_RAWIO",
57317+ "CAP_SYS_CHROOT",
57318+ "CAP_SYS_PTRACE",
57319+ "CAP_SYS_PACCT",
57320+ "CAP_SYS_ADMIN",
57321+ "CAP_SYS_BOOT",
57322+ "CAP_SYS_NICE",
57323+ "CAP_SYS_RESOURCE",
57324+ "CAP_SYS_TIME",
57325+ "CAP_SYS_TTY_CONFIG",
57326+ "CAP_MKNOD",
57327+ "CAP_LEASE",
57328+ "CAP_AUDIT_WRITE",
57329+ "CAP_AUDIT_CONTROL",
57330+ "CAP_SETFCAP",
57331+ "CAP_MAC_OVERRIDE",
57332+ "CAP_MAC_ADMIN",
6e9df6a3
MT
57333+ "CAP_SYSLOG",
57334+ "CAP_WAKE_ALARM"
15a11c5b 57335+};
bc901d79 57336+
15a11c5b 57337+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
bc901d79 57338+
15a11c5b
MT
57339+int gr_is_capable(const int cap)
57340+{
57341+#ifdef CONFIG_GRKERNSEC
57342+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
57343+ return 1;
57344+ return 0;
57345+#else
57346+ return 1;
bc901d79 57347+#endif
bc901d79 57348+}
15a11c5b 57349+
5e856224
MT
57350+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
57351+{
57352+#ifdef CONFIG_GRKERNSEC
57353+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
57354+ return 1;
57355+ return 0;
57356+#else
57357+ return 1;
57358+#endif
57359+}
57360+
15a11c5b
MT
57361+int gr_is_capable_nolog(const int cap)
57362+{
57363+#ifdef CONFIG_GRKERNSEC
57364+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
57365+ return 1;
57366+ return 0;
57367+#else
57368+ return 1;
bc901d79 57369+#endif
15a11c5b
MT
57370+}
57371+
5e856224
MT
57372+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
57373+{
57374+#ifdef CONFIG_GRKERNSEC
57375+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
57376+ return 1;
57377+ return 0;
57378+#else
57379+ return 1;
57380+#endif
57381+}
57382+
15a11c5b
MT
57383+EXPORT_SYMBOL(gr_is_capable);
57384+EXPORT_SYMBOL(gr_is_capable_nolog);
5e856224
MT
57385+EXPORT_SYMBOL(gr_task_is_capable);
57386+EXPORT_SYMBOL(gr_task_is_capable_nolog);
fe2de317
MT
57387diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
57388new file mode 100644
57389index 0000000..d3ee748
57390--- /dev/null
57391+++ b/grsecurity/grsec_fifo.c
58c5fc13
MT
57392@@ -0,0 +1,24 @@
57393+#include <linux/kernel.h>
57394+#include <linux/sched.h>
57395+#include <linux/fs.h>
57396+#include <linux/file.h>
57397+#include <linux/grinternal.h>
57398+
57399+int
57400+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
57401+ const struct dentry *dir, const int flag, const int acc_mode)
57402+{
57403+#ifdef CONFIG_GRKERNSEC_FIFO
57404+ const struct cred *cred = current_cred();
57405+
57406+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
57407+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
57408+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
57409+ (cred->fsuid != dentry->d_inode->i_uid)) {
16454cff 57410+ if (!inode_permission(dentry->d_inode, acc_mode))
58c5fc13
MT
57411+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
57412+ return -EACCES;
57413+ }
57414+#endif
57415+ return 0;
57416+}
fe2de317
MT
57417diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
57418new file mode 100644
57419index 0000000..8ca18bf
57420--- /dev/null
57421+++ b/grsecurity/grsec_fork.c
6892158b 57422@@ -0,0 +1,23 @@
58c5fc13
MT
57423+#include <linux/kernel.h>
57424+#include <linux/sched.h>
57425+#include <linux/grsecurity.h>
57426+#include <linux/grinternal.h>
57427+#include <linux/errno.h>
57428+
57429+void
57430+gr_log_forkfail(const int retval)
57431+{
57432+#ifdef CONFIG_GRKERNSEC_FORKFAIL
6892158b
MT
57433+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
57434+ switch (retval) {
57435+ case -EAGAIN:
57436+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
57437+ break;
57438+ case -ENOMEM:
57439+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
57440+ break;
57441+ }
57442+ }
58c5fc13
MT
57443+#endif
57444+ return;
57445+}
fe2de317
MT
57446diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
57447new file mode 100644
572b4308 57448index 0000000..05a6015
fe2de317
MT
57449--- /dev/null
57450+++ b/grsecurity/grsec_init.c
572b4308 57451@@ -0,0 +1,283 @@
58c5fc13
MT
57452+#include <linux/kernel.h>
57453+#include <linux/sched.h>
57454+#include <linux/mm.h>
58c5fc13
MT
57455+#include <linux/gracl.h>
57456+#include <linux/slab.h>
57457+#include <linux/vmalloc.h>
57458+#include <linux/percpu.h>
df50ba0c 57459+#include <linux/module.h>
58c5fc13 57460+
4c928ab7
MT
57461+int grsec_enable_ptrace_readexec;
57462+int grsec_enable_setxid;
572b4308
MT
57463+int grsec_enable_symlinkown;
57464+int grsec_symlinkown_gid;
15a11c5b 57465+int grsec_enable_brute;
58c5fc13
MT
57466+int grsec_enable_link;
57467+int grsec_enable_dmesg;
57468+int grsec_enable_harden_ptrace;
57469+int grsec_enable_fifo;
58c5fc13
MT
57470+int grsec_enable_execlog;
57471+int grsec_enable_signal;
57472+int grsec_enable_forkfail;
ae4e228f 57473+int grsec_enable_audit_ptrace;
58c5fc13
MT
57474+int grsec_enable_time;
57475+int grsec_enable_audit_textrel;
57476+int grsec_enable_group;
57477+int grsec_audit_gid;
57478+int grsec_enable_chdir;
57479+int grsec_enable_mount;
ae4e228f 57480+int grsec_enable_rofs;
58c5fc13
MT
57481+int grsec_enable_chroot_findtask;
57482+int grsec_enable_chroot_mount;
57483+int grsec_enable_chroot_shmat;
57484+int grsec_enable_chroot_fchdir;
57485+int grsec_enable_chroot_double;
57486+int grsec_enable_chroot_pivot;
57487+int grsec_enable_chroot_chdir;
57488+int grsec_enable_chroot_chmod;
57489+int grsec_enable_chroot_mknod;
57490+int grsec_enable_chroot_nice;
57491+int grsec_enable_chroot_execlog;
57492+int grsec_enable_chroot_caps;
57493+int grsec_enable_chroot_sysctl;
57494+int grsec_enable_chroot_unix;
57495+int grsec_enable_tpe;
57496+int grsec_tpe_gid;
ae4e228f 57497+int grsec_enable_blackhole;
df50ba0c
MT
57498+#ifdef CONFIG_IPV6_MODULE
57499+EXPORT_SYMBOL(grsec_enable_blackhole);
57500+#endif
ae4e228f 57501+int grsec_lastack_retries;
58c5fc13 57502+int grsec_enable_tpe_all;
57199397 57503+int grsec_enable_tpe_invert;
58c5fc13
MT
57504+int grsec_enable_socket_all;
57505+int grsec_socket_all_gid;
57506+int grsec_enable_socket_client;
57507+int grsec_socket_client_gid;
57508+int grsec_enable_socket_server;
57509+int grsec_socket_server_gid;
57510+int grsec_resource_logging;
df50ba0c 57511+int grsec_disable_privio;
6892158b 57512+int grsec_enable_log_rwxmaps;
58c5fc13
MT
57513+int grsec_lock;
57514+
57515+DEFINE_SPINLOCK(grsec_alert_lock);
57516+unsigned long grsec_alert_wtime = 0;
57517+unsigned long grsec_alert_fyet = 0;
57518+
57519+DEFINE_SPINLOCK(grsec_audit_lock);
57520+
57521+DEFINE_RWLOCK(grsec_exec_file_lock);
57522+
57523+char *gr_shared_page[4];
57524+
57525+char *gr_alert_log_fmt;
57526+char *gr_audit_log_fmt;
57527+char *gr_alert_log_buf;
57528+char *gr_audit_log_buf;
57529+
57530+extern struct gr_arg *gr_usermode;
57531+extern unsigned char *gr_system_salt;
57532+extern unsigned char *gr_system_sum;
57533+
57534+void __init
57535+grsecurity_init(void)
57536+{
57537+ int j;
57538+ /* create the per-cpu shared pages */
57539+
57540+#ifdef CONFIG_X86
57541+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
57542+#endif
57543+
57544+ for (j = 0; j < 4; j++) {
57545+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
57546+ if (gr_shared_page[j] == NULL) {
57547+ panic("Unable to allocate grsecurity shared page");
57548+ return;
57549+ }
57550+ }
57551+
57552+ /* allocate log buffers */
57553+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
57554+ if (!gr_alert_log_fmt) {
57555+ panic("Unable to allocate grsecurity alert log format buffer");
57556+ return;
57557+ }
57558+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
57559+ if (!gr_audit_log_fmt) {
57560+ panic("Unable to allocate grsecurity audit log format buffer");
57561+ return;
57562+ }
57563+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57564+ if (!gr_alert_log_buf) {
57565+ panic("Unable to allocate grsecurity alert log buffer");
57566+ return;
57567+ }
57568+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
57569+ if (!gr_audit_log_buf) {
57570+ panic("Unable to allocate grsecurity audit log buffer");
57571+ return;
57572+ }
57573+
57574+ /* allocate memory for authentication structure */
57575+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
57576+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
57577+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
57578+
57579+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
57580+ panic("Unable to allocate grsecurity authentication structure");
57581+ return;
57582+ }
57583+
df50ba0c
MT
57584+
57585+#ifdef CONFIG_GRKERNSEC_IO
57586+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
57587+ grsec_disable_privio = 1;
57588+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57589+ grsec_disable_privio = 1;
57590+#else
57591+ grsec_disable_privio = 0;
57592+#endif
57593+#endif
57594+
57199397
MT
57595+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
57596+ /* for backward compatibility, tpe_invert always defaults to on if
57597+ enabled in the kernel
57598+ */
57599+ grsec_enable_tpe_invert = 1;
57600+#endif
57601+
58c5fc13
MT
57602+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
57603+#ifndef CONFIG_GRKERNSEC_SYSCTL
57604+ grsec_lock = 1;
57605+#endif
df50ba0c 57606+
58c5fc13
MT
57607+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
57608+ grsec_enable_audit_textrel = 1;
57609+#endif
6892158b
MT
57610+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
57611+ grsec_enable_log_rwxmaps = 1;
57612+#endif
58c5fc13
MT
57613+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
57614+ grsec_enable_group = 1;
57615+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
57616+#endif
4c928ab7
MT
57617+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
57618+ grsec_enable_ptrace_readexec = 1;
57619+#endif
58c5fc13
MT
57620+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
57621+ grsec_enable_chdir = 1;
57622+#endif
57623+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
57624+ grsec_enable_harden_ptrace = 1;
57625+#endif
57626+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
57627+ grsec_enable_mount = 1;
57628+#endif
57629+#ifdef CONFIG_GRKERNSEC_LINK
57630+ grsec_enable_link = 1;
57631+#endif
15a11c5b
MT
57632+#ifdef CONFIG_GRKERNSEC_BRUTE
57633+ grsec_enable_brute = 1;
57634+#endif
58c5fc13
MT
57635+#ifdef CONFIG_GRKERNSEC_DMESG
57636+ grsec_enable_dmesg = 1;
57637+#endif
ae4e228f
MT
57638+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
57639+ grsec_enable_blackhole = 1;
57640+ grsec_lastack_retries = 4;
57641+#endif
58c5fc13
MT
57642+#ifdef CONFIG_GRKERNSEC_FIFO
57643+ grsec_enable_fifo = 1;
57644+#endif
58c5fc13
MT
57645+#ifdef CONFIG_GRKERNSEC_EXECLOG
57646+ grsec_enable_execlog = 1;
57647+#endif
4c928ab7
MT
57648+#ifdef CONFIG_GRKERNSEC_SETXID
57649+ grsec_enable_setxid = 1;
57650+#endif
58c5fc13
MT
57651+#ifdef CONFIG_GRKERNSEC_SIGNAL
57652+ grsec_enable_signal = 1;
57653+#endif
57654+#ifdef CONFIG_GRKERNSEC_FORKFAIL
57655+ grsec_enable_forkfail = 1;
57656+#endif
57657+#ifdef CONFIG_GRKERNSEC_TIME
57658+ grsec_enable_time = 1;
57659+#endif
57660+#ifdef CONFIG_GRKERNSEC_RESLOG
57661+ grsec_resource_logging = 1;
57662+#endif
57663+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
57664+ grsec_enable_chroot_findtask = 1;
57665+#endif
57666+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
57667+ grsec_enable_chroot_unix = 1;
57668+#endif
57669+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
57670+ grsec_enable_chroot_mount = 1;
57671+#endif
57672+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
57673+ grsec_enable_chroot_fchdir = 1;
57674+#endif
57675+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
57676+ grsec_enable_chroot_shmat = 1;
57677+#endif
ae4e228f
MT
57678+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
57679+ grsec_enable_audit_ptrace = 1;
57680+#endif
58c5fc13
MT
57681+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
57682+ grsec_enable_chroot_double = 1;
57683+#endif
57684+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
57685+ grsec_enable_chroot_pivot = 1;
57686+#endif
57687+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
57688+ grsec_enable_chroot_chdir = 1;
57689+#endif
57690+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
57691+ grsec_enable_chroot_chmod = 1;
57692+#endif
57693+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
57694+ grsec_enable_chroot_mknod = 1;
57695+#endif
57696+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
57697+ grsec_enable_chroot_nice = 1;
57698+#endif
57699+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
57700+ grsec_enable_chroot_execlog = 1;
57701+#endif
57702+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
57703+ grsec_enable_chroot_caps = 1;
57704+#endif
57705+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
57706+ grsec_enable_chroot_sysctl = 1;
57707+#endif
572b4308
MT
57708+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
57709+ grsec_enable_symlinkown = 1;
57710+ grsec_symlinkown_gid = CONFIG_GRKERNSEC_SYMLINKOWN_GID;
57711+#endif
58c5fc13
MT
57712+#ifdef CONFIG_GRKERNSEC_TPE
57713+ grsec_enable_tpe = 1;
57714+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
57715+#ifdef CONFIG_GRKERNSEC_TPE_ALL
57716+ grsec_enable_tpe_all = 1;
57717+#endif
57718+#endif
57719+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
57720+ grsec_enable_socket_all = 1;
57721+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
57722+#endif
57723+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
57724+ grsec_enable_socket_client = 1;
57725+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
57726+#endif
57727+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
57728+ grsec_enable_socket_server = 1;
57729+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
57730+#endif
57731+#endif
57732+
57733+ return;
57734+}
fe2de317
MT
57735diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
57736new file mode 100644
572b4308 57737index 0000000..35a96d1
fe2de317
MT
57738--- /dev/null
57739+++ b/grsecurity/grsec_link.c
572b4308 57740@@ -0,0 +1,59 @@
58c5fc13
MT
57741+#include <linux/kernel.h>
57742+#include <linux/sched.h>
57743+#include <linux/fs.h>
57744+#include <linux/file.h>
57745+#include <linux/grinternal.h>
57746+
572b4308
MT
57747+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
57748+{
57749+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
57750+ const struct inode *link_inode = link->dentry->d_inode;
57751+
57752+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
57753+ /* ignore root-owned links, e.g. /proc/self */
57754+ link_inode->i_uid &&
57755+ link_inode->i_uid != target->i_uid) {
57756+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
57757+ return 1;
57758+ }
57759+#endif
57760+ return 0;
57761+}
57762+
58c5fc13
MT
57763+int
57764+gr_handle_follow_link(const struct inode *parent,
57765+ const struct inode *inode,
57766+ const struct dentry *dentry, const struct vfsmount *mnt)
57767+{
57768+#ifdef CONFIG_GRKERNSEC_LINK
57769+ const struct cred *cred = current_cred();
57770+
57771+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
57772+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
57773+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
57774+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
57775+ return -EACCES;
57776+ }
57777+#endif
57778+ return 0;
57779+}
57780+
57781+int
57782+gr_handle_hardlink(const struct dentry *dentry,
57783+ const struct vfsmount *mnt,
57784+ struct inode *inode, const int mode, const char *to)
57785+{
57786+#ifdef CONFIG_GRKERNSEC_LINK
57787+ const struct cred *cred = current_cred();
57788+
57789+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
57790+ (!S_ISREG(mode) || (mode & S_ISUID) ||
57791+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
16454cff 57792+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
58c5fc13
MT
57793+ !capable(CAP_FOWNER) && cred->uid) {
57794+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
57795+ return -EPERM;
57796+ }
57797+#endif
57798+ return 0;
57799+}
fe2de317
MT
57800diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
57801new file mode 100644
57802index 0000000..a45d2e9
57803--- /dev/null
57804+++ b/grsecurity/grsec_log.c
6e9df6a3 57805@@ -0,0 +1,322 @@
58c5fc13
MT
57806+#include <linux/kernel.h>
57807+#include <linux/sched.h>
57808+#include <linux/file.h>
57809+#include <linux/tty.h>
57810+#include <linux/fs.h>
57811+#include <linux/grinternal.h>
57812+
df50ba0c
MT
57813+#ifdef CONFIG_TREE_PREEMPT_RCU
57814+#define DISABLE_PREEMPT() preempt_disable()
57815+#define ENABLE_PREEMPT() preempt_enable()
57816+#else
57817+#define DISABLE_PREEMPT()
57818+#define ENABLE_PREEMPT()
57819+#endif
57820+
58c5fc13 57821+#define BEGIN_LOCKS(x) \
df50ba0c 57822+ DISABLE_PREEMPT(); \
ae4e228f 57823+ rcu_read_lock(); \
58c5fc13
MT
57824+ read_lock(&tasklist_lock); \
57825+ read_lock(&grsec_exec_file_lock); \
57826+ if (x != GR_DO_AUDIT) \
57827+ spin_lock(&grsec_alert_lock); \
57828+ else \
57829+ spin_lock(&grsec_audit_lock)
57830+
57831+#define END_LOCKS(x) \
57832+ if (x != GR_DO_AUDIT) \
57833+ spin_unlock(&grsec_alert_lock); \
57834+ else \
57835+ spin_unlock(&grsec_audit_lock); \
57836+ read_unlock(&grsec_exec_file_lock); \
57837+ read_unlock(&tasklist_lock); \
ae4e228f 57838+ rcu_read_unlock(); \
df50ba0c 57839+ ENABLE_PREEMPT(); \
58c5fc13
MT
57840+ if (x == GR_DONT_AUDIT) \
57841+ gr_handle_alertkill(current)
57842+
57843+enum {
57844+ FLOODING,
57845+ NO_FLOODING
57846+};
57847+
57848+extern char *gr_alert_log_fmt;
57849+extern char *gr_audit_log_fmt;
57850+extern char *gr_alert_log_buf;
57851+extern char *gr_audit_log_buf;
57852+
57853+static int gr_log_start(int audit)
57854+{
57855+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
57856+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
57857+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
15a11c5b
MT
57858+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
57859+ unsigned long curr_secs = get_seconds();
58c5fc13
MT
57860+
57861+ if (audit == GR_DO_AUDIT)
57862+ goto set_fmt;
57863+
15a11c5b
MT
57864+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
57865+ grsec_alert_wtime = curr_secs;
58c5fc13 57866+ grsec_alert_fyet = 0;
15a11c5b
MT
57867+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
57868+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
58c5fc13
MT
57869+ grsec_alert_fyet++;
57870+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
15a11c5b 57871+ grsec_alert_wtime = curr_secs;
58c5fc13
MT
57872+ grsec_alert_fyet++;
57873+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
57874+ return FLOODING;
15a11c5b
MT
57875+ }
57876+ else return FLOODING;
58c5fc13
MT
57877+
57878+set_fmt:
15a11c5b 57879+#endif
58c5fc13
MT
57880+ memset(buf, 0, PAGE_SIZE);
57881+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
ae4e228f
MT
57882+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
57883+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
58c5fc13 57884+ } else if (current->signal->curr_ip) {
ae4e228f
MT
57885+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
57886+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
58c5fc13
MT
57887+ } else if (gr_acl_is_enabled()) {
57888+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
57889+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
57890+ } else {
57891+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
57892+ strcpy(buf, fmt);
57893+ }
57894+
57895+ return NO_FLOODING;
57896+}
57897+
57898+static void gr_log_middle(int audit, const char *msg, va_list ap)
57899+ __attribute__ ((format (printf, 2, 0)));
57900+
57901+static void gr_log_middle(int audit, const char *msg, va_list ap)
57902+{
57903+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57904+ unsigned int len = strlen(buf);
57905+
57906+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57907+
57908+ return;
57909+}
57910+
57911+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57912+ __attribute__ ((format (printf, 2, 3)));
57913+
57914+static void gr_log_middle_varargs(int audit, const char *msg, ...)
57915+{
57916+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
57917+ unsigned int len = strlen(buf);
57918+ va_list ap;
57919+
57920+ va_start(ap, msg);
57921+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
57922+ va_end(ap);
57923+
57924+ return;
57925+}
57926+
6e9df6a3 57927+static void gr_log_end(int audit, int append_default)
58c5fc13
MT
57928+{
57929+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
58c5fc13 57930+
6e9df6a3
MT
57931+ if (append_default) {
57932+ unsigned int len = strlen(buf);
57933+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
57934+ }
57935+
58c5fc13
MT
57936+ printk("%s\n", buf);
57937+
57938+ return;
57939+}
57940+
57941+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
57942+{
57943+ int logtype;
57944+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
66a7e928
MT
57945+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
57946+ void *voidptr = NULL;
57947+ int num1 = 0, num2 = 0;
57948+ unsigned long ulong1 = 0, ulong2 = 0;
57949+ struct dentry *dentry = NULL;
57950+ struct vfsmount *mnt = NULL;
57951+ struct file *file = NULL;
57952+ struct task_struct *task = NULL;
58c5fc13
MT
57953+ const struct cred *cred, *pcred;
57954+ va_list ap;
57955+
57956+ BEGIN_LOCKS(audit);
57957+ logtype = gr_log_start(audit);
57958+ if (logtype == FLOODING) {
57959+ END_LOCKS(audit);
57960+ return;
57961+ }
57962+ va_start(ap, argtypes);
57963+ switch (argtypes) {
57964+ case GR_TTYSNIFF:
57965+ task = va_arg(ap, struct task_struct *);
6892158b 57966+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
58c5fc13
MT
57967+ break;
57968+ case GR_SYSCTL_HIDDEN:
57969+ str1 = va_arg(ap, char *);
57970+ gr_log_middle_varargs(audit, msg, result, str1);
57971+ break;
57972+ case GR_RBAC:
57973+ dentry = va_arg(ap, struct dentry *);
57974+ mnt = va_arg(ap, struct vfsmount *);
57975+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
57976+ break;
57977+ case GR_RBAC_STR:
57978+ dentry = va_arg(ap, struct dentry *);
57979+ mnt = va_arg(ap, struct vfsmount *);
57980+ str1 = va_arg(ap, char *);
57981+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
57982+ break;
57983+ case GR_STR_RBAC:
57984+ str1 = va_arg(ap, char *);
57985+ dentry = va_arg(ap, struct dentry *);
57986+ mnt = va_arg(ap, struct vfsmount *);
57987+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
57988+ break;
57989+ case GR_RBAC_MODE2:
57990+ dentry = va_arg(ap, struct dentry *);
57991+ mnt = va_arg(ap, struct vfsmount *);
57992+ str1 = va_arg(ap, char *);
57993+ str2 = va_arg(ap, char *);
57994+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
57995+ break;
57996+ case GR_RBAC_MODE3:
57997+ dentry = va_arg(ap, struct dentry *);
57998+ mnt = va_arg(ap, struct vfsmount *);
57999+ str1 = va_arg(ap, char *);
58000+ str2 = va_arg(ap, char *);
58001+ str3 = va_arg(ap, char *);
58002+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
58003+ break;
58004+ case GR_FILENAME:
58005+ dentry = va_arg(ap, struct dentry *);
58006+ mnt = va_arg(ap, struct vfsmount *);
58007+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
58008+ break;
58009+ case GR_STR_FILENAME:
58010+ str1 = va_arg(ap, char *);
58011+ dentry = va_arg(ap, struct dentry *);
58012+ mnt = va_arg(ap, struct vfsmount *);
58013+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
58014+ break;
58015+ case GR_FILENAME_STR:
58016+ dentry = va_arg(ap, struct dentry *);
58017+ mnt = va_arg(ap, struct vfsmount *);
58018+ str1 = va_arg(ap, char *);
58019+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
58020+ break;
58021+ case GR_FILENAME_TWO_INT:
58022+ dentry = va_arg(ap, struct dentry *);
58023+ mnt = va_arg(ap, struct vfsmount *);
58024+ num1 = va_arg(ap, int);
58025+ num2 = va_arg(ap, int);
58026+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
58027+ break;
58028+ case GR_FILENAME_TWO_INT_STR:
58029+ dentry = va_arg(ap, struct dentry *);
58030+ mnt = va_arg(ap, struct vfsmount *);
58031+ num1 = va_arg(ap, int);
58032+ num2 = va_arg(ap, int);
58033+ str1 = va_arg(ap, char *);
58034+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
58035+ break;
58036+ case GR_TEXTREL:
58037+ file = va_arg(ap, struct file *);
58038+ ulong1 = va_arg(ap, unsigned long);
58039+ ulong2 = va_arg(ap, unsigned long);
58040+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
58041+ break;
58042+ case GR_PTRACE:
58043+ task = va_arg(ap, struct task_struct *);
58044+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
58045+ break;
58046+ case GR_RESOURCE:
58047+ task = va_arg(ap, struct task_struct *);
58048+ cred = __task_cred(task);
6892158b 58049+ pcred = __task_cred(task->real_parent);
58c5fc13
MT
58050+ ulong1 = va_arg(ap, unsigned long);
58051+ str1 = va_arg(ap, char *);
58052+ ulong2 = va_arg(ap, unsigned long);
6892158b 58053+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58c5fc13
MT
58054+ break;
58055+ case GR_CAP:
58056+ task = va_arg(ap, struct task_struct *);
58057+ cred = __task_cred(task);
6892158b 58058+ pcred = __task_cred(task->real_parent);
58c5fc13 58059+ str1 = va_arg(ap, char *);
6892158b 58060+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58c5fc13
MT
58061+ break;
58062+ case GR_SIG:
58063+ str1 = va_arg(ap, char *);
58064+ voidptr = va_arg(ap, void *);
58065+ gr_log_middle_varargs(audit, msg, str1, voidptr);
58066+ break;
58067+ case GR_SIG2:
58068+ task = va_arg(ap, struct task_struct *);
58069+ cred = __task_cred(task);
6892158b 58070+ pcred = __task_cred(task->real_parent);
58c5fc13 58071+ num1 = va_arg(ap, int);
6892158b 58072+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58c5fc13
MT
58073+ break;
58074+ case GR_CRASH1:
58075+ task = va_arg(ap, struct task_struct *);
58076+ cred = __task_cred(task);
6892158b 58077+ pcred = __task_cred(task->real_parent);
58c5fc13 58078+ ulong1 = va_arg(ap, unsigned long);
6892158b 58079+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
58c5fc13
MT
58080+ break;
58081+ case GR_CRASH2:
58082+ task = va_arg(ap, struct task_struct *);
58083+ cred = __task_cred(task);
6892158b 58084+ pcred = __task_cred(task->real_parent);
58c5fc13 58085+ ulong1 = va_arg(ap, unsigned long);
6892158b
MT
58086+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
58087+ break;
58088+ case GR_RWXMAP:
58089+ file = va_arg(ap, struct file *);
58090+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
58c5fc13
MT
58091+ break;
58092+ case GR_PSACCT:
58093+ {
58094+ unsigned int wday, cday;
58095+ __u8 whr, chr;
58096+ __u8 wmin, cmin;
58097+ __u8 wsec, csec;
58098+ char cur_tty[64] = { 0 };
58099+ char parent_tty[64] = { 0 };
58100+
58101+ task = va_arg(ap, struct task_struct *);
58102+ wday = va_arg(ap, unsigned int);
58103+ cday = va_arg(ap, unsigned int);
58104+ whr = va_arg(ap, int);
58105+ chr = va_arg(ap, int);
58106+ wmin = va_arg(ap, int);
58107+ cmin = va_arg(ap, int);
58108+ wsec = va_arg(ap, int);
58109+ csec = va_arg(ap, int);
58110+ ulong1 = va_arg(ap, unsigned long);
58111+ cred = __task_cred(task);
6892158b 58112+ pcred = __task_cred(task->real_parent);
58c5fc13 58113+
6892158b 58114+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
58c5fc13
MT
58115+ }
58116+ break;
58117+ default:
58118+ gr_log_middle(audit, msg, ap);
58119+ }
58120+ va_end(ap);
6e9df6a3
MT
58121+ // these don't need DEFAULTSECARGS printed on the end
58122+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
58123+ gr_log_end(audit, 0);
58124+ else
58125+ gr_log_end(audit, 1);
58c5fc13
MT
58126+ END_LOCKS(audit);
58127+}
fe2de317
MT
58128diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
58129new file mode 100644
4c928ab7 58130index 0000000..f536303
fe2de317
MT
58131--- /dev/null
58132+++ b/grsecurity/grsec_mem.c
4c928ab7 58133@@ -0,0 +1,40 @@
58c5fc13
MT
58134+#include <linux/kernel.h>
58135+#include <linux/sched.h>
58136+#include <linux/mm.h>
58137+#include <linux/mman.h>
58138+#include <linux/grinternal.h>
58139+
58140+void
58141+gr_handle_ioperm(void)
58142+{
58143+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
58144+ return;
58145+}
58146+
58147+void
58148+gr_handle_iopl(void)
58149+{
58150+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
58151+ return;
58152+}
58153+
58154+void
71d190be 58155+gr_handle_mem_readwrite(u64 from, u64 to)
58c5fc13 58156+{
71d190be 58157+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
58c5fc13
MT
58158+ return;
58159+}
58160+
58161+void
ae4e228f
MT
58162+gr_handle_vm86(void)
58163+{
58164+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
58165+ return;
58166+}
4c928ab7
MT
58167+
58168+void
58169+gr_log_badprocpid(const char *entry)
58170+{
58171+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
58172+ return;
58173+}
fe2de317
MT
58174diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
58175new file mode 100644
58176index 0000000..2131422
58177--- /dev/null
58178+++ b/grsecurity/grsec_mount.c
ae4e228f 58179@@ -0,0 +1,62 @@
58c5fc13
MT
58180+#include <linux/kernel.h>
58181+#include <linux/sched.h>
ae4e228f 58182+#include <linux/mount.h>
58c5fc13
MT
58183+#include <linux/grsecurity.h>
58184+#include <linux/grinternal.h>
58185+
58186+void
58187+gr_log_remount(const char *devname, const int retval)
58188+{
58189+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58190+ if (grsec_enable_mount && (retval >= 0))
58191+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
58192+#endif
58193+ return;
58194+}
58195+
58196+void
58197+gr_log_unmount(const char *devname, const int retval)
58198+{
58199+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58200+ if (grsec_enable_mount && (retval >= 0))
58201+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
58202+#endif
58203+ return;
58204+}
58205+
58206+void
58207+gr_log_mount(const char *from, const char *to, const int retval)
58208+{
58209+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
58210+ if (grsec_enable_mount && (retval >= 0))
15a11c5b 58211+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
58c5fc13
MT
58212+#endif
58213+ return;
58214+}
ae4e228f
MT
58215+
58216+int
58217+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
58218+{
58219+#ifdef CONFIG_GRKERNSEC_ROFS
58220+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
58221+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
58222+ return -EPERM;
58223+ } else
58224+ return 0;
58225+#endif
58226+ return 0;
58227+}
58228+
58229+int
58230+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
58231+{
58232+#ifdef CONFIG_GRKERNSEC_ROFS
58233+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
58234+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
58235+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
58236+ return -EPERM;
58237+ } else
58238+ return 0;
58239+#endif
58240+ return 0;
58241+}
fe2de317
MT
58242diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
58243new file mode 100644
58244index 0000000..a3b12a0
58245--- /dev/null
58246+++ b/grsecurity/grsec_pax.c
6892158b
MT
58247@@ -0,0 +1,36 @@
58248+#include <linux/kernel.h>
58249+#include <linux/sched.h>
58250+#include <linux/mm.h>
58251+#include <linux/file.h>
58252+#include <linux/grinternal.h>
58253+#include <linux/grsecurity.h>
58254+
58255+void
58256+gr_log_textrel(struct vm_area_struct * vma)
58257+{
58258+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
58259+ if (grsec_enable_audit_textrel)
58260+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
58261+#endif
58262+ return;
58263+}
58264+
58265+void
58266+gr_log_rwxmmap(struct file *file)
58267+{
58268+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58269+ if (grsec_enable_log_rwxmaps)
58270+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
58271+#endif
58272+ return;
58273+}
58274+
58275+void
58276+gr_log_rwxmprotect(struct file *file)
58277+{
58278+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58279+ if (grsec_enable_log_rwxmaps)
58280+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
58281+#endif
58282+ return;
58283+}
fe2de317
MT
58284diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
58285new file mode 100644
4c928ab7 58286index 0000000..f7f29aa
fe2de317
MT
58287--- /dev/null
58288+++ b/grsecurity/grsec_ptrace.c
4c928ab7 58289@@ -0,0 +1,30 @@
ae4e228f
MT
58290+#include <linux/kernel.h>
58291+#include <linux/sched.h>
58292+#include <linux/grinternal.h>
4c928ab7 58293+#include <linux/security.h>
ae4e228f
MT
58294+
58295+void
58296+gr_audit_ptrace(struct task_struct *task)
58297+{
58298+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
58299+ if (grsec_enable_audit_ptrace)
58300+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
58301+#endif
58302+ return;
58303+}
4c928ab7
MT
58304+
58305+int
58306+gr_ptrace_readexec(struct file *file, int unsafe_flags)
58307+{
58308+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58309+ const struct dentry *dentry = file->f_path.dentry;
58310+ const struct vfsmount *mnt = file->f_path.mnt;
58311+
58312+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
58313+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
58314+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
58315+ return -EACCES;
58316+ }
58317+#endif
58318+ return 0;
58319+}
fe2de317
MT
58320diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
58321new file mode 100644
4c928ab7 58322index 0000000..7a5b2de
fe2de317
MT
58323--- /dev/null
58324+++ b/grsecurity/grsec_sig.c
4c928ab7 58325@@ -0,0 +1,207 @@
58c5fc13
MT
58326+#include <linux/kernel.h>
58327+#include <linux/sched.h>
58328+#include <linux/delay.h>
58329+#include <linux/grsecurity.h>
58330+#include <linux/grinternal.h>
71d190be 58331+#include <linux/hardirq.h>
58c5fc13
MT
58332+
58333+char *signames[] = {
58334+ [SIGSEGV] = "Segmentation fault",
58335+ [SIGILL] = "Illegal instruction",
58336+ [SIGABRT] = "Abort",
58337+ [SIGBUS] = "Invalid alignment/Bus error"
58338+};
58339+
58340+void
58341+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
58342+{
58343+#ifdef CONFIG_GRKERNSEC_SIGNAL
58344+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
58345+ (sig == SIGABRT) || (sig == SIGBUS))) {
58346+ if (t->pid == current->pid) {
58347+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
58348+ } else {
58349+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
58350+ }
58351+ }
58352+#endif
58353+ return;
58354+}
58355+
58356+int
58357+gr_handle_signal(const struct task_struct *p, const int sig)
58358+{
58359+#ifdef CONFIG_GRKERNSEC
4c928ab7
MT
58360+ /* ignore the 0 signal for protected task checks */
58361+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
58c5fc13
MT
58362+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
58363+ return -EPERM;
58364+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
58365+ return -EPERM;
58366+ }
58367+#endif
58368+ return 0;
58369+}
58370+
71d190be
MT
58371+#ifdef CONFIG_GRKERNSEC
58372+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
58373+
58374+int gr_fake_force_sig(int sig, struct task_struct *t)
58375+{
58376+ unsigned long int flags;
58377+ int ret, blocked, ignored;
58378+ struct k_sigaction *action;
58379+
58380+ spin_lock_irqsave(&t->sighand->siglock, flags);
58381+ action = &t->sighand->action[sig-1];
58382+ ignored = action->sa.sa_handler == SIG_IGN;
58383+ blocked = sigismember(&t->blocked, sig);
58384+ if (blocked || ignored) {
58385+ action->sa.sa_handler = SIG_DFL;
58386+ if (blocked) {
58387+ sigdelset(&t->blocked, sig);
58388+ recalc_sigpending_and_wake(t);
58389+ }
58390+ }
58391+ if (action->sa.sa_handler == SIG_DFL)
58392+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
58393+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
58394+
58395+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
58396+
58397+ return ret;
58398+}
58399+#endif
58400+
58401+#ifdef CONFIG_GRKERNSEC_BRUTE
58402+#define GR_USER_BAN_TIME (15 * 60)
58403+
58404+static int __get_dumpable(unsigned long mm_flags)
58405+{
58406+ int ret;
58407+
58408+ ret = mm_flags & MMF_DUMPABLE_MASK;
58409+ return (ret >= 2) ? 2 : ret;
58410+}
58411+#endif
58412+
58413+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
58c5fc13
MT
58414+{
58415+#ifdef CONFIG_GRKERNSEC_BRUTE
71d190be
MT
58416+ uid_t uid = 0;
58417+
15a11c5b
MT
58418+ if (!grsec_enable_brute)
58419+ return;
58420+
71d190be 58421+ rcu_read_lock();
58c5fc13
MT
58422+ read_lock(&tasklist_lock);
58423+ read_lock(&grsec_exec_file_lock);
6892158b
MT
58424+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
58425+ p->real_parent->brute = 1;
71d190be
MT
58426+ else {
58427+ const struct cred *cred = __task_cred(p), *cred2;
58428+ struct task_struct *tsk, *tsk2;
58429+
58430+ if (!__get_dumpable(mm_flags) && cred->uid) {
58431+ struct user_struct *user;
58432+
58433+ uid = cred->uid;
58434+
58435+ /* this is put upon execution past expiration */
58436+ user = find_user(uid);
58437+ if (user == NULL)
58438+ goto unlock;
58439+ user->banned = 1;
58440+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
58441+ if (user->ban_expires == ~0UL)
58442+ user->ban_expires--;
58443+
58444+ do_each_thread(tsk2, tsk) {
58445+ cred2 = __task_cred(tsk);
58446+ if (tsk != p && cred2->uid == uid)
58447+ gr_fake_force_sig(SIGKILL, tsk);
58448+ } while_each_thread(tsk2, tsk);
58449+ }
58450+ }
58451+unlock:
58c5fc13
MT
58452+ read_unlock(&grsec_exec_file_lock);
58453+ read_unlock(&tasklist_lock);
71d190be
MT
58454+ rcu_read_unlock();
58455+
58456+ if (uid)
58457+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
58458+
58c5fc13
MT
58459+#endif
58460+ return;
58461+}
58462+
58463+void gr_handle_brute_check(void)
58464+{
58465+#ifdef CONFIG_GRKERNSEC_BRUTE
58466+ if (current->brute)
58467+ msleep(30 * 1000);
58468+#endif
58469+ return;
58470+}
58471+
71d190be
MT
58472+void gr_handle_kernel_exploit(void)
58473+{
58474+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
58475+ const struct cred *cred;
58476+ struct task_struct *tsk, *tsk2;
58477+ struct user_struct *user;
58478+ uid_t uid;
58479+
58480+ if (in_irq() || in_serving_softirq() || in_nmi())
58481+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
58482+
58483+ uid = current_uid();
58484+
58485+ if (uid == 0)
58486+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
58487+ else {
58488+ /* kill all the processes of this user, hold a reference
58489+ to their creds struct, and prevent them from creating
58490+ another process until system reset
58491+ */
58492+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
58493+ /* we intentionally leak this ref */
58494+ user = get_uid(current->cred->user);
58495+ if (user) {
58496+ user->banned = 1;
58497+ user->ban_expires = ~0UL;
58498+ }
58499+
58500+ read_lock(&tasklist_lock);
58501+ do_each_thread(tsk2, tsk) {
58502+ cred = __task_cred(tsk);
58503+ if (cred->uid == uid)
58504+ gr_fake_force_sig(SIGKILL, tsk);
58505+ } while_each_thread(tsk2, tsk);
58506+ read_unlock(&tasklist_lock);
58507+ }
58508+#endif
58509+}
58510+
66a7e928 58511+int __gr_process_user_ban(struct user_struct *user)
71d190be
MT
58512+{
58513+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
66a7e928 58514+ if (unlikely(user->banned)) {
71d190be
MT
58515+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
58516+ user->banned = 0;
58517+ user->ban_expires = 0;
58518+ free_uid(user);
58519+ } else
58520+ return -EPERM;
58521+ }
58522+#endif
58523+ return 0;
66a7e928 58524+}
71d190be 58525+
66a7e928
MT
58526+int gr_process_user_ban(void)
58527+{
58528+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58529+ return __gr_process_user_ban(current->cred->user);
58530+#endif
58531+ return 0;
71d190be 58532+}
fe2de317
MT
58533diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
58534new file mode 100644
58535index 0000000..4030d57
58536--- /dev/null
58537+++ b/grsecurity/grsec_sock.c
66a7e928 58538@@ -0,0 +1,244 @@
58c5fc13
MT
58539+#include <linux/kernel.h>
58540+#include <linux/module.h>
58541+#include <linux/sched.h>
58542+#include <linux/file.h>
58543+#include <linux/net.h>
58544+#include <linux/in.h>
58545+#include <linux/ip.h>
58546+#include <net/sock.h>
58547+#include <net/inet_sock.h>
58548+#include <linux/grsecurity.h>
58549+#include <linux/grinternal.h>
58550+#include <linux/gracl.h>
58551+
58c5fc13
MT
58552+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
58553+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
58554+
58555+EXPORT_SYMBOL(gr_search_udp_recvmsg);
58556+EXPORT_SYMBOL(gr_search_udp_sendmsg);
58557+
58558+#ifdef CONFIG_UNIX_MODULE
58559+EXPORT_SYMBOL(gr_acl_handle_unix);
58560+EXPORT_SYMBOL(gr_acl_handle_mknod);
58561+EXPORT_SYMBOL(gr_handle_chroot_unix);
58562+EXPORT_SYMBOL(gr_handle_create);
58563+#endif
58564+
58565+#ifdef CONFIG_GRKERNSEC
58566+#define gr_conn_table_size 32749
58567+struct conn_table_entry {
58568+ struct conn_table_entry *next;
58569+ struct signal_struct *sig;
58570+};
58571+
58572+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
58573+DEFINE_SPINLOCK(gr_conn_table_lock);
58574+
58575+extern const char * gr_socktype_to_name(unsigned char type);
58576+extern const char * gr_proto_to_name(unsigned char proto);
bc901d79 58577+extern const char * gr_sockfamily_to_name(unsigned char family);
58c5fc13
MT
58578+
58579+static __inline__ int
58580+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
58581+{
58582+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
58583+}
58584+
58585+static __inline__ int
58586+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
58587+ __u16 sport, __u16 dport)
58588+{
58589+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
58590+ sig->gr_sport == sport && sig->gr_dport == dport))
58591+ return 1;
58592+ else
58593+ return 0;
58594+}
58595+
58596+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
58597+{
58598+ struct conn_table_entry **match;
58599+ unsigned int index;
58600+
58601+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58602+ sig->gr_sport, sig->gr_dport,
58603+ gr_conn_table_size);
58604+
58605+ newent->sig = sig;
58606+
58607+ match = &gr_conn_table[index];
58608+ newent->next = *match;
58609+ *match = newent;
58610+
58611+ return;
58612+}
58613+
58614+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
58615+{
58616+ struct conn_table_entry *match, *last = NULL;
58617+ unsigned int index;
58618+
58619+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
58620+ sig->gr_sport, sig->gr_dport,
58621+ gr_conn_table_size);
58622+
58623+ match = gr_conn_table[index];
58624+ while (match && !conn_match(match->sig,
58625+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
58626+ sig->gr_dport)) {
58627+ last = match;
58628+ match = match->next;
58629+ }
58630+
58631+ if (match) {
58632+ if (last)
58633+ last->next = match->next;
58634+ else
58635+ gr_conn_table[index] = NULL;
58636+ kfree(match);
58637+ }
58638+
58639+ return;
58640+}
58641+
58642+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
58643+ __u16 sport, __u16 dport)
58644+{
58645+ struct conn_table_entry *match;
58646+ unsigned int index;
58647+
58648+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
58649+
58650+ match = gr_conn_table[index];
58651+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
58652+ match = match->next;
58653+
58654+ if (match)
58655+ return match->sig;
58656+ else
58657+ return NULL;
58658+}
58659+
58660+#endif
58661+
58662+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
58663+{
58664+#ifdef CONFIG_GRKERNSEC
58665+ struct signal_struct *sig = task->signal;
58666+ struct conn_table_entry *newent;
58667+
58668+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
58669+ if (newent == NULL)
58670+ return;
58671+ /* no bh lock needed since we are called with bh disabled */
58672+ spin_lock(&gr_conn_table_lock);
58673+ gr_del_task_from_ip_table_nolock(sig);
ae4e228f
MT
58674+ sig->gr_saddr = inet->inet_rcv_saddr;
58675+ sig->gr_daddr = inet->inet_daddr;
58676+ sig->gr_sport = inet->inet_sport;
58677+ sig->gr_dport = inet->inet_dport;
58c5fc13
MT
58678+ gr_add_to_task_ip_table_nolock(sig, newent);
58679+ spin_unlock(&gr_conn_table_lock);
58680+#endif
58681+ return;
58682+}
58683+
58684+void gr_del_task_from_ip_table(struct task_struct *task)
58685+{
58686+#ifdef CONFIG_GRKERNSEC
58687+ spin_lock_bh(&gr_conn_table_lock);
58688+ gr_del_task_from_ip_table_nolock(task->signal);
58689+ spin_unlock_bh(&gr_conn_table_lock);
58690+#endif
58691+ return;
58692+}
58693+
58694+void
58695+gr_attach_curr_ip(const struct sock *sk)
58696+{
58697+#ifdef CONFIG_GRKERNSEC
58698+ struct signal_struct *p, *set;
58699+ const struct inet_sock *inet = inet_sk(sk);
58700+
58701+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
58702+ return;
58703+
58704+ set = current->signal;
58705+
58706+ spin_lock_bh(&gr_conn_table_lock);
ae4e228f
MT
58707+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
58708+ inet->inet_dport, inet->inet_sport);
58c5fc13
MT
58709+ if (unlikely(p != NULL)) {
58710+ set->curr_ip = p->curr_ip;
58711+ set->used_accept = 1;
58712+ gr_del_task_from_ip_table_nolock(p);
58713+ spin_unlock_bh(&gr_conn_table_lock);
58714+ return;
58715+ }
58716+ spin_unlock_bh(&gr_conn_table_lock);
58717+
ae4e228f 58718+ set->curr_ip = inet->inet_daddr;
58c5fc13
MT
58719+ set->used_accept = 1;
58720+#endif
58721+ return;
58722+}
58723+
58724+int
58725+gr_handle_sock_all(const int family, const int type, const int protocol)
58726+{
58727+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
58728+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
bc901d79
MT
58729+ (family != AF_UNIX)) {
58730+ if (family == AF_INET)
58731+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
58732+ else
58733+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
58c5fc13
MT
58734+ return -EACCES;
58735+ }
58736+#endif
58737+ return 0;
58738+}
58739+
58740+int
58741+gr_handle_sock_server(const struct sockaddr *sck)
58742+{
58743+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58744+ if (grsec_enable_socket_server &&
58745+ in_group_p(grsec_socket_server_gid) &&
58746+ sck && (sck->sa_family != AF_UNIX) &&
58747+ (sck->sa_family != AF_LOCAL)) {
58748+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58749+ return -EACCES;
58750+ }
58751+#endif
58752+ return 0;
58753+}
58754+
58755+int
58756+gr_handle_sock_server_other(const struct sock *sck)
58757+{
58758+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
58759+ if (grsec_enable_socket_server &&
58760+ in_group_p(grsec_socket_server_gid) &&
58761+ sck && (sck->sk_family != AF_UNIX) &&
58762+ (sck->sk_family != AF_LOCAL)) {
58763+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
58764+ return -EACCES;
58765+ }
58766+#endif
58767+ return 0;
58768+}
58769+
58770+int
58771+gr_handle_sock_client(const struct sockaddr *sck)
58772+{
58773+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
58774+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
58775+ sck && (sck->sa_family != AF_UNIX) &&
58776+ (sck->sa_family != AF_LOCAL)) {
58777+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
58778+ return -EACCES;
58779+ }
58780+#endif
58781+ return 0;
58782+}
fe2de317
MT
58783diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
58784new file mode 100644
572b4308 58785index 0000000..f55ef0f
fe2de317
MT
58786--- /dev/null
58787+++ b/grsecurity/grsec_sysctl.c
572b4308 58788@@ -0,0 +1,469 @@
58c5fc13
MT
58789+#include <linux/kernel.h>
58790+#include <linux/sched.h>
58791+#include <linux/sysctl.h>
58792+#include <linux/grsecurity.h>
58793+#include <linux/grinternal.h>
58794+
58795+int
58796+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
58797+{
58798+#ifdef CONFIG_GRKERNSEC_SYSCTL
c6e2a6c8
MT
58799+ if (dirname == NULL || name == NULL)
58800+ return 0;
58c5fc13
MT
58801+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
58802+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
58803+ return -EACCES;
58804+ }
58805+#endif
58806+ return 0;
58807+}
58808+
ae4e228f
MT
58809+#ifdef CONFIG_GRKERNSEC_ROFS
58810+static int __maybe_unused one = 1;
58811+#endif
58812+
58813+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
57199397 58814+struct ctl_table grsecurity_table[] = {
58c5fc13 58815+#ifdef CONFIG_GRKERNSEC_SYSCTL
df50ba0c
MT
58816+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
58817+#ifdef CONFIG_GRKERNSEC_IO
58818+ {
58819+ .procname = "disable_priv_io",
58820+ .data = &grsec_disable_privio,
58821+ .maxlen = sizeof(int),
58822+ .mode = 0600,
58823+ .proc_handler = &proc_dointvec,
58824+ },
58825+#endif
58826+#endif
58c5fc13
MT
58827+#ifdef CONFIG_GRKERNSEC_LINK
58828+ {
58c5fc13
MT
58829+ .procname = "linking_restrictions",
58830+ .data = &grsec_enable_link,
58831+ .maxlen = sizeof(int),
58832+ .mode = 0600,
58833+ .proc_handler = &proc_dointvec,
58834+ },
58835+#endif
572b4308
MT
58836+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
58837+ {
58838+ .procname = "enforce_symlinksifowner",
58839+ .data = &grsec_enable_symlinkown,
58840+ .maxlen = sizeof(int),
58841+ .mode = 0600,
58842+ .proc_handler = &proc_dointvec,
58843+ },
58844+ {
58845+ .procname = "symlinkown_gid",
58846+ .data = &grsec_symlinkown_gid,
58847+ .maxlen = sizeof(int),
58848+ .mode = 0600,
58849+ .proc_handler = &proc_dointvec,
58850+ },
58851+#endif
15a11c5b 58852+#ifdef CONFIG_GRKERNSEC_BRUTE
58c5fc13 58853+ {
15a11c5b
MT
58854+ .procname = "deter_bruteforce",
58855+ .data = &grsec_enable_brute,
58c5fc13
MT
58856+ .maxlen = sizeof(int),
58857+ .mode = 0600,
58858+ .proc_handler = &proc_dointvec,
58859+ },
58860+#endif
15a11c5b 58861+#ifdef CONFIG_GRKERNSEC_FIFO
58c5fc13 58862+ {
15a11c5b
MT
58863+ .procname = "fifo_restrictions",
58864+ .data = &grsec_enable_fifo,
58c5fc13
MT
58865+ .maxlen = sizeof(int),
58866+ .mode = 0600,
58867+ .proc_handler = &proc_dointvec,
58868+ },
58869+#endif
4c928ab7
MT
58870+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
58871+ {
58872+ .procname = "ptrace_readexec",
58873+ .data = &grsec_enable_ptrace_readexec,
58874+ .maxlen = sizeof(int),
58875+ .mode = 0600,
58876+ .proc_handler = &proc_dointvec,
58877+ },
58878+#endif
58879+#ifdef CONFIG_GRKERNSEC_SETXID
58880+ {
58881+ .procname = "consistent_setxid",
58882+ .data = &grsec_enable_setxid,
58883+ .maxlen = sizeof(int),
58884+ .mode = 0600,
58885+ .proc_handler = &proc_dointvec,
58886+ },
58887+#endif
ae4e228f
MT
58888+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
58889+ {
58890+ .procname = "ip_blackhole",
58891+ .data = &grsec_enable_blackhole,
58892+ .maxlen = sizeof(int),
58893+ .mode = 0600,
58894+ .proc_handler = &proc_dointvec,
58895+ },
58896+ {
58897+ .procname = "lastack_retries",
58898+ .data = &grsec_lastack_retries,
58899+ .maxlen = sizeof(int),
58900+ .mode = 0600,
58901+ .proc_handler = &proc_dointvec,
58902+ },
58903+#endif
58c5fc13
MT
58904+#ifdef CONFIG_GRKERNSEC_EXECLOG
58905+ {
58c5fc13
MT
58906+ .procname = "exec_logging",
58907+ .data = &grsec_enable_execlog,
58908+ .maxlen = sizeof(int),
58909+ .mode = 0600,
58910+ .proc_handler = &proc_dointvec,
58911+ },
58912+#endif
6892158b
MT
58913+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
58914+ {
58915+ .procname = "rwxmap_logging",
58916+ .data = &grsec_enable_log_rwxmaps,
58917+ .maxlen = sizeof(int),
58918+ .mode = 0600,
58919+ .proc_handler = &proc_dointvec,
58920+ },
58921+#endif
58c5fc13
MT
58922+#ifdef CONFIG_GRKERNSEC_SIGNAL
58923+ {
58c5fc13
MT
58924+ .procname = "signal_logging",
58925+ .data = &grsec_enable_signal,
58926+ .maxlen = sizeof(int),
58927+ .mode = 0600,
58928+ .proc_handler = &proc_dointvec,
58929+ },
58930+#endif
58931+#ifdef CONFIG_GRKERNSEC_FORKFAIL
58932+ {
58c5fc13
MT
58933+ .procname = "forkfail_logging",
58934+ .data = &grsec_enable_forkfail,
58935+ .maxlen = sizeof(int),
58936+ .mode = 0600,
58937+ .proc_handler = &proc_dointvec,
58938+ },
58939+#endif
58940+#ifdef CONFIG_GRKERNSEC_TIME
58941+ {
58c5fc13
MT
58942+ .procname = "timechange_logging",
58943+ .data = &grsec_enable_time,
58944+ .maxlen = sizeof(int),
58945+ .mode = 0600,
58946+ .proc_handler = &proc_dointvec,
58947+ },
58948+#endif
58949+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
58950+ {
58c5fc13
MT
58951+ .procname = "chroot_deny_shmat",
58952+ .data = &grsec_enable_chroot_shmat,
58953+ .maxlen = sizeof(int),
58954+ .mode = 0600,
58955+ .proc_handler = &proc_dointvec,
58956+ },
58957+#endif
58958+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
58959+ {
58c5fc13
MT
58960+ .procname = "chroot_deny_unix",
58961+ .data = &grsec_enable_chroot_unix,
58962+ .maxlen = sizeof(int),
58963+ .mode = 0600,
58964+ .proc_handler = &proc_dointvec,
58965+ },
58966+#endif
58967+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
58968+ {
58c5fc13
MT
58969+ .procname = "chroot_deny_mount",
58970+ .data = &grsec_enable_chroot_mount,
58971+ .maxlen = sizeof(int),
58972+ .mode = 0600,
58973+ .proc_handler = &proc_dointvec,
58974+ },
58975+#endif
58976+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
58977+ {
58c5fc13
MT
58978+ .procname = "chroot_deny_fchdir",
58979+ .data = &grsec_enable_chroot_fchdir,
58980+ .maxlen = sizeof(int),
58981+ .mode = 0600,
58982+ .proc_handler = &proc_dointvec,
58983+ },
58984+#endif
58985+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
58986+ {
58c5fc13
MT
58987+ .procname = "chroot_deny_chroot",
58988+ .data = &grsec_enable_chroot_double,
58989+ .maxlen = sizeof(int),
58990+ .mode = 0600,
58991+ .proc_handler = &proc_dointvec,
58992+ },
58993+#endif
58994+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
58995+ {
58c5fc13
MT
58996+ .procname = "chroot_deny_pivot",
58997+ .data = &grsec_enable_chroot_pivot,
58998+ .maxlen = sizeof(int),
58999+ .mode = 0600,
59000+ .proc_handler = &proc_dointvec,
59001+ },
59002+#endif
59003+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
59004+ {
58c5fc13
MT
59005+ .procname = "chroot_enforce_chdir",
59006+ .data = &grsec_enable_chroot_chdir,
59007+ .maxlen = sizeof(int),
59008+ .mode = 0600,
59009+ .proc_handler = &proc_dointvec,
59010+ },
59011+#endif
59012+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
59013+ {
58c5fc13
MT
59014+ .procname = "chroot_deny_chmod",
59015+ .data = &grsec_enable_chroot_chmod,
59016+ .maxlen = sizeof(int),
59017+ .mode = 0600,
59018+ .proc_handler = &proc_dointvec,
59019+ },
59020+#endif
59021+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
59022+ {
58c5fc13
MT
59023+ .procname = "chroot_deny_mknod",
59024+ .data = &grsec_enable_chroot_mknod,
59025+ .maxlen = sizeof(int),
59026+ .mode = 0600,
59027+ .proc_handler = &proc_dointvec,
59028+ },
59029+#endif
59030+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
59031+ {
58c5fc13
MT
59032+ .procname = "chroot_restrict_nice",
59033+ .data = &grsec_enable_chroot_nice,
59034+ .maxlen = sizeof(int),
59035+ .mode = 0600,
59036+ .proc_handler = &proc_dointvec,
59037+ },
59038+#endif
59039+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
59040+ {
58c5fc13
MT
59041+ .procname = "chroot_execlog",
59042+ .data = &grsec_enable_chroot_execlog,
59043+ .maxlen = sizeof(int),
59044+ .mode = 0600,
59045+ .proc_handler = &proc_dointvec,
59046+ },
59047+#endif
59048+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
fe2de317
MT
59049+ {
59050+ .procname = "chroot_caps",
59051+ .data = &grsec_enable_chroot_caps,
59052+ .maxlen = sizeof(int),
59053+ .mode = 0600,
59054+ .proc_handler = &proc_dointvec,
59055+ },
59056+#endif
59057+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
59058+ {
59059+ .procname = "chroot_deny_sysctl",
59060+ .data = &grsec_enable_chroot_sysctl,
59061+ .maxlen = sizeof(int),
59062+ .mode = 0600,
59063+ .proc_handler = &proc_dointvec,
59064+ },
59065+#endif
59066+#ifdef CONFIG_GRKERNSEC_TPE
59067+ {
59068+ .procname = "tpe",
59069+ .data = &grsec_enable_tpe,
59070+ .maxlen = sizeof(int),
59071+ .mode = 0600,
59072+ .proc_handler = &proc_dointvec,
59073+ },
59074+ {
59075+ .procname = "tpe_gid",
59076+ .data = &grsec_tpe_gid,
59077+ .maxlen = sizeof(int),
59078+ .mode = 0600,
59079+ .proc_handler = &proc_dointvec,
59080+ },
59081+#endif
59082+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
59083+ {
59084+ .procname = "tpe_invert",
59085+ .data = &grsec_enable_tpe_invert,
59086+ .maxlen = sizeof(int),
59087+ .mode = 0600,
59088+ .proc_handler = &proc_dointvec,
59089+ },
59090+#endif
59091+#ifdef CONFIG_GRKERNSEC_TPE_ALL
59092+ {
59093+ .procname = "tpe_restrict_all",
59094+ .data = &grsec_enable_tpe_all,
59095+ .maxlen = sizeof(int),
59096+ .mode = 0600,
59097+ .proc_handler = &proc_dointvec,
59098+ },
59099+#endif
59100+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
59101+ {
59102+ .procname = "socket_all",
59103+ .data = &grsec_enable_socket_all,
59104+ .maxlen = sizeof(int),
59105+ .mode = 0600,
59106+ .proc_handler = &proc_dointvec,
59107+ },
59108+ {
59109+ .procname = "socket_all_gid",
59110+ .data = &grsec_socket_all_gid,
59111+ .maxlen = sizeof(int),
59112+ .mode = 0600,
59113+ .proc_handler = &proc_dointvec,
59114+ },
59115+#endif
59116+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
59117+ {
59118+ .procname = "socket_client",
59119+ .data = &grsec_enable_socket_client,
59120+ .maxlen = sizeof(int),
59121+ .mode = 0600,
59122+ .proc_handler = &proc_dointvec,
59123+ },
59124+ {
59125+ .procname = "socket_client_gid",
59126+ .data = &grsec_socket_client_gid,
59127+ .maxlen = sizeof(int),
59128+ .mode = 0600,
59129+ .proc_handler = &proc_dointvec,
59130+ },
59131+#endif
59132+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
59133+ {
59134+ .procname = "socket_server",
59135+ .data = &grsec_enable_socket_server,
59136+ .maxlen = sizeof(int),
59137+ .mode = 0600,
59138+ .proc_handler = &proc_dointvec,
59139+ },
59140+ {
59141+ .procname = "socket_server_gid",
59142+ .data = &grsec_socket_server_gid,
59143+ .maxlen = sizeof(int),
59144+ .mode = 0600,
59145+ .proc_handler = &proc_dointvec,
59146+ },
59147+#endif
59148+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
59149+ {
59150+ .procname = "audit_group",
59151+ .data = &grsec_enable_group,
59152+ .maxlen = sizeof(int),
59153+ .mode = 0600,
59154+ .proc_handler = &proc_dointvec,
59155+ },
59156+ {
59157+ .procname = "audit_gid",
59158+ .data = &grsec_audit_gid,
59159+ .maxlen = sizeof(int),
59160+ .mode = 0600,
59161+ .proc_handler = &proc_dointvec,
59162+ },
59163+#endif
59164+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
59165+ {
59166+ .procname = "audit_chdir",
59167+ .data = &grsec_enable_chdir,
59168+ .maxlen = sizeof(int),
59169+ .mode = 0600,
59170+ .proc_handler = &proc_dointvec,
59171+ },
59172+#endif
59173+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
59174+ {
59175+ .procname = "audit_mount",
59176+ .data = &grsec_enable_mount,
59177+ .maxlen = sizeof(int),
59178+ .mode = 0600,
59179+ .proc_handler = &proc_dointvec,
59180+ },
59181+#endif
59182+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
59183+ {
59184+ .procname = "audit_textrel",
59185+ .data = &grsec_enable_audit_textrel,
59186+ .maxlen = sizeof(int),
59187+ .mode = 0600,
59188+ .proc_handler = &proc_dointvec,
59189+ },
59190+#endif
59191+#ifdef CONFIG_GRKERNSEC_DMESG
59192+ {
59193+ .procname = "dmesg",
59194+ .data = &grsec_enable_dmesg,
59195+ .maxlen = sizeof(int),
59196+ .mode = 0600,
59197+ .proc_handler = &proc_dointvec,
59198+ },
59199+#endif
59200+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59201+ {
59202+ .procname = "chroot_findtask",
59203+ .data = &grsec_enable_chroot_findtask,
59204+ .maxlen = sizeof(int),
59205+ .mode = 0600,
59206+ .proc_handler = &proc_dointvec,
59207+ },
59208+#endif
59209+#ifdef CONFIG_GRKERNSEC_RESLOG
59210+ {
59211+ .procname = "resource_logging",
59212+ .data = &grsec_resource_logging,
59213+ .maxlen = sizeof(int),
59214+ .mode = 0600,
59215+ .proc_handler = &proc_dointvec,
59216+ },
59217+#endif
59218+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
59219+ {
59220+ .procname = "audit_ptrace",
59221+ .data = &grsec_enable_audit_ptrace,
59222+ .maxlen = sizeof(int),
59223+ .mode = 0600,
59224+ .proc_handler = &proc_dointvec,
59225+ },
59226+#endif
59227+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59228+ {
59229+ .procname = "harden_ptrace",
59230+ .data = &grsec_enable_harden_ptrace,
59231+ .maxlen = sizeof(int),
59232+ .mode = 0600,
59233+ .proc_handler = &proc_dointvec,
59234+ },
59235+#endif
59236+ {
59237+ .procname = "grsec_lock",
59238+ .data = &grsec_lock,
59239+ .maxlen = sizeof(int),
59240+ .mode = 0600,
59241+ .proc_handler = &proc_dointvec,
59242+ },
59243+#endif
59244+#ifdef CONFIG_GRKERNSEC_ROFS
59245+ {
59246+ .procname = "romount_protect",
59247+ .data = &grsec_enable_rofs,
59248+ .maxlen = sizeof(int),
59249+ .mode = 0600,
59250+ .proc_handler = &proc_dointvec_minmax,
59251+ .extra1 = &one,
59252+ .extra2 = &one,
59253+ },
59254+#endif
59255+ { }
59256+};
59257+#endif
59258diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
59259new file mode 100644
59260index 0000000..0dc13c3
59261--- /dev/null
59262+++ b/grsecurity/grsec_time.c
59263@@ -0,0 +1,16 @@
59264+#include <linux/kernel.h>
59265+#include <linux/sched.h>
59266+#include <linux/grinternal.h>
59267+#include <linux/module.h>
58c5fc13 59268+
fe2de317
MT
59269+void
59270+gr_log_timechange(void)
59271+{
59272+#ifdef CONFIG_GRKERNSEC_TIME
59273+ if (grsec_enable_time)
59274+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
59275+#endif
59276+ return;
59277+}
58c5fc13 59278+
fe2de317
MT
59279+EXPORT_SYMBOL(gr_log_timechange);
59280diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
59281new file mode 100644
4c928ab7 59282index 0000000..07e0dc0
fe2de317
MT
59283--- /dev/null
59284+++ b/grsecurity/grsec_tpe.c
4c928ab7 59285@@ -0,0 +1,73 @@
fe2de317
MT
59286+#include <linux/kernel.h>
59287+#include <linux/sched.h>
59288+#include <linux/file.h>
59289+#include <linux/fs.h>
59290+#include <linux/grinternal.h>
58c5fc13 59291+
fe2de317 59292+extern int gr_acl_tpe_check(void);
58c5fc13 59293+
fe2de317
MT
59294+int
59295+gr_tpe_allow(const struct file *file)
59296+{
59297+#ifdef CONFIG_GRKERNSEC
59298+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
59299+ const struct cred *cred = current_cred();
4c928ab7
MT
59300+ char *msg = NULL;
59301+ char *msg2 = NULL;
59302+
59303+ // never restrict root
59304+ if (!cred->uid)
59305+ return 1;
58c5fc13 59306+
4c928ab7 59307+ if (grsec_enable_tpe) {
fe2de317 59308+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
4c928ab7
MT
59309+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
59310+ msg = "not being in trusted group";
59311+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
59312+ msg = "being in untrusted group";
fe2de317 59313+#else
4c928ab7
MT
59314+ if (in_group_p(grsec_tpe_gid))
59315+ msg = "being in untrusted group";
fe2de317 59316+#endif
4c928ab7
MT
59317+ }
59318+ if (!msg && gr_acl_tpe_check())
59319+ msg = "being in untrusted role";
59320+
59321+ // not in any affected group/role
59322+ if (!msg)
59323+ goto next_check;
59324+
59325+ if (inode->i_uid)
59326+ msg2 = "file in non-root-owned directory";
59327+ else if (inode->i_mode & S_IWOTH)
59328+ msg2 = "file in world-writable directory";
59329+ else if (inode->i_mode & S_IWGRP)
59330+ msg2 = "file in group-writable directory";
59331+
59332+ if (msg && msg2) {
59333+ char fullmsg[70] = {0};
59334+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
59335+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
fe2de317
MT
59336+ return 0;
59337+ }
4c928ab7
MT
59338+ msg = NULL;
59339+next_check:
fe2de317 59340+#ifdef CONFIG_GRKERNSEC_TPE_ALL
4c928ab7
MT
59341+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
59342+ return 1;
59343+
59344+ if (inode->i_uid && (inode->i_uid != cred->uid))
59345+ msg = "directory not owned by user";
59346+ else if (inode->i_mode & S_IWOTH)
59347+ msg = "file in world-writable directory";
59348+ else if (inode->i_mode & S_IWGRP)
59349+ msg = "file in group-writable directory";
59350+
59351+ if (msg) {
59352+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
fe2de317
MT
59353+ return 0;
59354+ }
59355+#endif
59356+#endif
59357+ return 1;
59358+}
59359diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
59360new file mode 100644
59361index 0000000..9f7b1ac
59362--- /dev/null
59363+++ b/grsecurity/grsum.c
59364@@ -0,0 +1,61 @@
59365+#include <linux/err.h>
59366+#include <linux/kernel.h>
59367+#include <linux/sched.h>
59368+#include <linux/mm.h>
59369+#include <linux/scatterlist.h>
59370+#include <linux/crypto.h>
59371+#include <linux/gracl.h>
58c5fc13 59372+
58c5fc13 59373+
fe2de317
MT
59374+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
59375+#error "crypto and sha256 must be built into the kernel"
59376+#endif
58c5fc13 59377+
fe2de317
MT
59378+int
59379+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
59380+{
59381+ char *p;
59382+ struct crypto_hash *tfm;
59383+ struct hash_desc desc;
59384+ struct scatterlist sg;
59385+ unsigned char temp_sum[GR_SHA_LEN];
59386+ volatile int retval = 0;
59387+ volatile int dummy = 0;
59388+ unsigned int i;
57199397 59389+
fe2de317 59390+ sg_init_table(&sg, 1);
57199397 59391+
fe2de317
MT
59392+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
59393+ if (IS_ERR(tfm)) {
59394+ /* should never happen, since sha256 should be built in */
59395+ return 1;
59396+ }
57199397 59397+
fe2de317
MT
59398+ desc.tfm = tfm;
59399+ desc.flags = 0;
57199397 59400+
fe2de317 59401+ crypto_hash_init(&desc);
57199397 59402+
fe2de317
MT
59403+ p = salt;
59404+ sg_set_buf(&sg, p, GR_SALT_LEN);
59405+ crypto_hash_update(&desc, &sg, sg.length);
57199397 59406+
fe2de317
MT
59407+ p = entry->pw;
59408+ sg_set_buf(&sg, p, strlen(p));
59409+
59410+ crypto_hash_update(&desc, &sg, sg.length);
57199397 59411+
fe2de317 59412+ crypto_hash_final(&desc, temp_sum);
57199397 59413+
fe2de317 59414+ memset(entry->pw, 0, GR_PW_LEN);
57199397 59415+
fe2de317
MT
59416+ for (i = 0; i < GR_SHA_LEN; i++)
59417+ if (sum[i] != temp_sum[i])
59418+ retval = 1;
59419+ else
59420+ dummy = 1; // waste a cycle
15a11c5b 59421+
fe2de317 59422+ crypto_free_hash(tfm);
57199397 59423+
fe2de317
MT
59424+ return retval;
59425+}
59426diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
c6e2a6c8 59427index f1c8ca6..b5c1cc7 100644
fe2de317
MT
59428--- a/include/acpi/acpi_bus.h
59429+++ b/include/acpi/acpi_bus.h
15a11c5b
MT
59430@@ -107,7 +107,7 @@ struct acpi_device_ops {
59431 acpi_op_bind bind;
59432 acpi_op_unbind unbind;
59433 acpi_op_notify notify;
59434-};
59435+} __no_const;
59436
59437 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
59438
fe2de317 59439diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
572b4308 59440index b7babf0..3ba8aee 100644
fe2de317
MT
59441--- a/include/asm-generic/atomic-long.h
59442+++ b/include/asm-generic/atomic-long.h
ae4e228f
MT
59443@@ -22,6 +22,12 @@
59444
59445 typedef atomic64_t atomic_long_t;
59446
59447+#ifdef CONFIG_PAX_REFCOUNT
59448+typedef atomic64_unchecked_t atomic_long_unchecked_t;
59449+#else
59450+typedef atomic64_t atomic_long_unchecked_t;
59451+#endif
58c5fc13 59452+
ae4e228f 59453 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
58c5fc13 59454
ae4e228f 59455 static inline long atomic_long_read(atomic_long_t *l)
fe2de317 59456@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
ae4e228f
MT
59457 return (long)atomic64_read(v);
59458 }
59459
59460+#ifdef CONFIG_PAX_REFCOUNT
59461+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59462+{
59463+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
58c5fc13 59464+
ae4e228f
MT
59465+ return (long)atomic64_read_unchecked(v);
59466+}
59467+#endif
59468+
59469 static inline void atomic_long_set(atomic_long_t *l, long i)
59470 {
59471 atomic64_t *v = (atomic64_t *)l;
fe2de317 59472@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
ae4e228f
MT
59473 atomic64_set(v, i);
59474 }
58c5fc13 59475
ae4e228f
MT
59476+#ifdef CONFIG_PAX_REFCOUNT
59477+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59478+{
59479+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59480+
59481+ atomic64_set_unchecked(v, i);
59482+}
59483+#endif
59484+
59485 static inline void atomic_long_inc(atomic_long_t *l)
59486 {
59487 atomic64_t *v = (atomic64_t *)l;
fe2de317 59488@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
ae4e228f 59489 atomic64_inc(v);
58c5fc13
MT
59490 }
59491
ae4e228f
MT
59492+#ifdef CONFIG_PAX_REFCOUNT
59493+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
58c5fc13 59494+{
ae4e228f
MT
59495+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59496+
59497+ atomic64_inc_unchecked(v);
58c5fc13 59498+}
ae4e228f 59499+#endif
58c5fc13 59500+
ae4e228f 59501 static inline void atomic_long_dec(atomic_long_t *l)
58c5fc13 59502 {
ae4e228f 59503 atomic64_t *v = (atomic64_t *)l;
fe2de317 59504@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
df50ba0c
MT
59505 atomic64_dec(v);
59506 }
59507
59508+#ifdef CONFIG_PAX_REFCOUNT
59509+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59510+{
59511+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59512+
59513+ atomic64_dec_unchecked(v);
59514+}
59515+#endif
59516+
59517 static inline void atomic_long_add(long i, atomic_long_t *l)
59518 {
59519 atomic64_t *v = (atomic64_t *)l;
fe2de317 59520@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
ae4e228f 59521 atomic64_add(i, v);
58c5fc13
MT
59522 }
59523
ae4e228f
MT
59524+#ifdef CONFIG_PAX_REFCOUNT
59525+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
58c5fc13 59526+{
ae4e228f
MT
59527+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59528+
59529+ atomic64_add_unchecked(i, v);
58c5fc13 59530+}
ae4e228f 59531+#endif
58c5fc13 59532+
ae4e228f 59533 static inline void atomic_long_sub(long i, atomic_long_t *l)
58c5fc13 59534 {
ae4e228f 59535 atomic64_t *v = (atomic64_t *)l;
fe2de317 59536@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
6892158b
MT
59537 atomic64_sub(i, v);
59538 }
59539
59540+#ifdef CONFIG_PAX_REFCOUNT
59541+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59542+{
59543+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59544+
59545+ atomic64_sub_unchecked(i, v);
59546+}
59547+#endif
59548+
59549 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59550 {
59551 atomic64_t *v = (atomic64_t *)l;
fe2de317 59552@@ -115,6 +175,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
ae4e228f 59553 return (long)atomic64_inc_return(v);
58c5fc13
MT
59554 }
59555
ae4e228f
MT
59556+#ifdef CONFIG_PAX_REFCOUNT
59557+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
58c5fc13 59558+{
ae4e228f
MT
59559+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
59560+
59561+ return (long)atomic64_inc_return_unchecked(v);
58c5fc13 59562+}
ae4e228f 59563+#endif
58c5fc13 59564+
ae4e228f
MT
59565 static inline long atomic_long_dec_return(atomic_long_t *l)
59566 {
59567 atomic64_t *v = (atomic64_t *)l;
fe2de317 59568@@ -140,6 +209,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
ae4e228f
MT
59569
59570 typedef atomic_t atomic_long_t;
59571
59572+#ifdef CONFIG_PAX_REFCOUNT
59573+typedef atomic_unchecked_t atomic_long_unchecked_t;
59574+#else
59575+typedef atomic_t atomic_long_unchecked_t;
59576+#endif
59577+
59578 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
59579 static inline long atomic_long_read(atomic_long_t *l)
59580 {
fe2de317 59581@@ -148,6 +223,15 @@ static inline long atomic_long_read(atomic_long_t *l)
ae4e228f
MT
59582 return (long)atomic_read(v);
59583 }
59584
59585+#ifdef CONFIG_PAX_REFCOUNT
59586+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
59587+{
59588+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59589+
59590+ return (long)atomic_read_unchecked(v);
59591+}
59592+#endif
59593+
59594 static inline void atomic_long_set(atomic_long_t *l, long i)
59595 {
59596 atomic_t *v = (atomic_t *)l;
fe2de317 59597@@ -155,6 +239,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
ae4e228f
MT
59598 atomic_set(v, i);
59599 }
59600
59601+#ifdef CONFIG_PAX_REFCOUNT
59602+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
59603+{
59604+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59605+
59606+ atomic_set_unchecked(v, i);
59607+}
59608+#endif
59609+
59610 static inline void atomic_long_inc(atomic_long_t *l)
59611 {
59612 atomic_t *v = (atomic_t *)l;
fe2de317 59613@@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
ae4e228f
MT
59614 atomic_inc(v);
59615 }
59616
59617+#ifdef CONFIG_PAX_REFCOUNT
59618+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
59619+{
59620+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59621+
59622+ atomic_inc_unchecked(v);
59623+}
59624+#endif
59625+
59626 static inline void atomic_long_dec(atomic_long_t *l)
59627 {
59628 atomic_t *v = (atomic_t *)l;
fe2de317 59629@@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
df50ba0c
MT
59630 atomic_dec(v);
59631 }
59632
59633+#ifdef CONFIG_PAX_REFCOUNT
59634+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
59635+{
59636+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59637+
59638+ atomic_dec_unchecked(v);
59639+}
59640+#endif
59641+
59642 static inline void atomic_long_add(long i, atomic_long_t *l)
59643 {
59644 atomic_t *v = (atomic_t *)l;
fe2de317 59645@@ -176,6 +287,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
ae4e228f
MT
59646 atomic_add(i, v);
59647 }
59648
59649+#ifdef CONFIG_PAX_REFCOUNT
59650+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
59651+{
59652+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59653+
59654+ atomic_add_unchecked(i, v);
59655+}
59656+#endif
59657+
59658 static inline void atomic_long_sub(long i, atomic_long_t *l)
58c5fc13 59659 {
ae4e228f 59660 atomic_t *v = (atomic_t *)l;
fe2de317 59661@@ -183,6 +303,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
6892158b
MT
59662 atomic_sub(i, v);
59663 }
59664
59665+#ifdef CONFIG_PAX_REFCOUNT
59666+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
59667+{
59668+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59669+
59670+ atomic_sub_unchecked(i, v);
59671+}
59672+#endif
59673+
59674 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
59675 {
59676 atomic_t *v = (atomic_t *)l;
fe2de317 59677@@ -232,6 +361,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
ae4e228f
MT
59678 return (long)atomic_inc_return(v);
59679 }
59680
59681+#ifdef CONFIG_PAX_REFCOUNT
59682+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
59683+{
59684+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
59685+
59686+ return (long)atomic_inc_return_unchecked(v);
59687+}
59688+#endif
59689+
59690 static inline long atomic_long_dec_return(atomic_long_t *l)
59691 {
59692 atomic_t *v = (atomic_t *)l;
572b4308 59693@@ -255,4 +393,55 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
ae4e228f
MT
59694
59695 #endif /* BITS_PER_LONG == 64 */
59696
59697+#ifdef CONFIG_PAX_REFCOUNT
59698+static inline void pax_refcount_needs_these_functions(void)
59699+{
59700+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
59701+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
59702+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
59703+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
59704+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
15a11c5b 59705+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57199397 59706+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
6892158b 59707+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
8308f9c9
MT
59708+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
59709+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
15a11c5b 59710+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
572b4308
MT
59711+#ifdef CONFIG_X86
59712+ atomic_clear_mask_unchecked(0, NULL);
59713+ atomic_set_mask_unchecked(0, NULL);
59714+#endif
ae4e228f
MT
59715+
59716+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
59717+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
59718+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
6892158b 59719+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
ae4e228f
MT
59720+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
59721+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
df50ba0c 59722+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
ae4e228f
MT
59723+}
59724+#else
59725+#define atomic_read_unchecked(v) atomic_read(v)
59726+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
59727+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
59728+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
59729+#define atomic_inc_unchecked(v) atomic_inc(v)
66a7e928 59730+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57199397 59731+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
6892158b 59732+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
8308f9c9
MT
59733+#define atomic_dec_unchecked(v) atomic_dec(v)
59734+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
59735+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
572b4308
MT
59736+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
59737+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
ae4e228f
MT
59738+
59739+#define atomic_long_read_unchecked(v) atomic_long_read(v)
59740+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
59741+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
6892158b 59742+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
ae4e228f
MT
59743+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
59744+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
df50ba0c 59745+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
ae4e228f
MT
59746+#endif
59747+
59748 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
572b4308
MT
59749diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
59750index 1ced641..c896ee8 100644
59751--- a/include/asm-generic/atomic.h
59752+++ b/include/asm-generic/atomic.h
59753@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
59754 * Atomically clears the bits set in @mask from @v
59755 */
59756 #ifndef atomic_clear_mask
59757-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
59758+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
59759 {
59760 unsigned long flags;
59761
fe2de317
MT
59762diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
59763index b18ce4f..2ee2843 100644
59764--- a/include/asm-generic/atomic64.h
59765+++ b/include/asm-generic/atomic64.h
59766@@ -16,6 +16,8 @@ typedef struct {
59767 long long counter;
59768 } atomic64_t;
59769
59770+typedef atomic64_t atomic64_unchecked_t;
59771+
59772 #define ATOMIC64_INIT(i) { (i) }
59773
59774 extern long long atomic64_read(const atomic64_t *v);
59775@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
59776 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
59777 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
59778
59779+#define atomic64_read_unchecked(v) atomic64_read(v)
59780+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
59781+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
59782+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
59783+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
59784+#define atomic64_inc_unchecked(v) atomic64_inc(v)
59785+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
59786+#define atomic64_dec_unchecked(v) atomic64_dec(v)
59787+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
59788+
59789 #endif /* _ASM_GENERIC_ATOMIC64_H */
59790diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
59791index 1bfcfe5..e04c5c9 100644
59792--- a/include/asm-generic/cache.h
59793+++ b/include/asm-generic/cache.h
8308f9c9
MT
59794@@ -6,7 +6,7 @@
59795 * cache lines need to provide their own cache.h.
59796 */
59797
59798-#define L1_CACHE_SHIFT 5
59799-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
15a11c5b
MT
59800+#define L1_CACHE_SHIFT 5UL
59801+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
8308f9c9
MT
59802
59803 #endif /* __ASM_GENERIC_CACHE_H */
4c928ab7
MT
59804diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
59805index 0d68a1e..b74a761 100644
59806--- a/include/asm-generic/emergency-restart.h
59807+++ b/include/asm-generic/emergency-restart.h
59808@@ -1,7 +1,7 @@
59809 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
59810 #define _ASM_GENERIC_EMERGENCY_RESTART_H
59811
59812-static inline void machine_emergency_restart(void)
59813+static inline __noreturn void machine_emergency_restart(void)
59814 {
59815 machine_restart(NULL);
59816 }
fe2de317
MT
59817diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
59818index 0232ccb..13d9165 100644
59819--- a/include/asm-generic/kmap_types.h
59820+++ b/include/asm-generic/kmap_types.h
57199397 59821@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
ae4e228f
MT
59822 KMAP_D(17) KM_NMI,
59823 KMAP_D(18) KM_NMI_PTE,
57199397
MT
59824 KMAP_D(19) KM_KDB,
59825+KMAP_D(20) KM_CLEARPAGE,
59826 /*
59827 * Remember to update debug_kmap_atomic() when adding new kmap types!
59828 */
59829-KMAP_D(20) KM_TYPE_NR
59830+KMAP_D(21) KM_TYPE_NR
58c5fc13
MT
59831 };
59832
ae4e228f 59833 #undef KMAP_D
4c928ab7
MT
59834diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
59835index 9ceb03b..2efbcbd 100644
59836--- a/include/asm-generic/local.h
59837+++ b/include/asm-generic/local.h
59838@@ -39,6 +39,7 @@ typedef struct
59839 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
59840 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
59841 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
59842+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
59843
59844 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
59845 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
fe2de317
MT
59846diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
59847index 725612b..9cc513a 100644
59848--- a/include/asm-generic/pgtable-nopmd.h
59849+++ b/include/asm-generic/pgtable-nopmd.h
57199397
MT
59850@@ -1,14 +1,19 @@
59851 #ifndef _PGTABLE_NOPMD_H
59852 #define _PGTABLE_NOPMD_H
59853
59854-#ifndef __ASSEMBLY__
59855-
59856 #include <asm-generic/pgtable-nopud.h>
59857
59858-struct mm_struct;
59859-
59860 #define __PAGETABLE_PMD_FOLDED
59861
59862+#define PMD_SHIFT PUD_SHIFT
59863+#define PTRS_PER_PMD 1
59864+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
59865+#define PMD_MASK (~(PMD_SIZE-1))
59866+
59867+#ifndef __ASSEMBLY__
59868+
59869+struct mm_struct;
59870+
59871 /*
59872 * Having the pmd type consist of a pud gets the size right, and allows
59873 * us to conceptually access the pud entry that this pmd is folded into
59874@@ -16,11 +21,6 @@ struct mm_struct;
59875 */
59876 typedef struct { pud_t pud; } pmd_t;
59877
59878-#define PMD_SHIFT PUD_SHIFT
59879-#define PTRS_PER_PMD 1
59880-#define PMD_SIZE (1UL << PMD_SHIFT)
59881-#define PMD_MASK (~(PMD_SIZE-1))
59882-
59883 /*
59884 * The "pud_xxx()" functions here are trivial for a folded two-level
59885 * setup: the pmd is never bad, and a pmd always exists (as it's folded
fe2de317 59886diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
c6e2a6c8 59887index 810431d..0ec4804f 100644
fe2de317
MT
59888--- a/include/asm-generic/pgtable-nopud.h
59889+++ b/include/asm-generic/pgtable-nopud.h
57199397
MT
59890@@ -1,10 +1,15 @@
59891 #ifndef _PGTABLE_NOPUD_H
59892 #define _PGTABLE_NOPUD_H
59893
59894-#ifndef __ASSEMBLY__
59895-
59896 #define __PAGETABLE_PUD_FOLDED
59897
59898+#define PUD_SHIFT PGDIR_SHIFT
59899+#define PTRS_PER_PUD 1
59900+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
59901+#define PUD_MASK (~(PUD_SIZE-1))
59902+
59903+#ifndef __ASSEMBLY__
59904+
59905 /*
59906 * Having the pud type consist of a pgd gets the size right, and allows
59907 * us to conceptually access the pgd entry that this pud is folded into
59908@@ -12,11 +17,6 @@
59909 */
59910 typedef struct { pgd_t pgd; } pud_t;
59911
59912-#define PUD_SHIFT PGDIR_SHIFT
59913-#define PTRS_PER_PUD 1
59914-#define PUD_SIZE (1UL << PUD_SHIFT)
59915-#define PUD_MASK (~(PUD_SIZE-1))
59916-
59917 /*
59918 * The "pgd_xxx()" functions here are trivial for a folded two-level
59919 * setup: the pud is never bad, and a pud always exists (as it's folded
c6e2a6c8
MT
59920@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
59921 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
59922
59923 #define pgd_populate(mm, pgd, pud) do { } while (0)
59924+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
59925 /*
59926 * (puds are folded into pgds so this doesn't get actually called,
59927 * but the define is needed for a generic inline function.)
fe2de317 59928diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
572b4308 59929index c7ec2cd..909d125 100644
fe2de317
MT
59930--- a/include/asm-generic/pgtable.h
59931+++ b/include/asm-generic/pgtable.h
572b4308 59932@@ -531,6 +531,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
fe2de317 59933 #endif
5e856224 59934 }
fe2de317
MT
59935
59936+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
59937+static inline unsigned long pax_open_kernel(void) { return 0; }
59938+#endif
59939+
59940+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
59941+static inline unsigned long pax_close_kernel(void) { return 0; }
59942+#endif
59943+
5e856224 59944 #endif /* CONFIG_MMU */
fe2de317 59945
5e856224 59946 #endif /* !__ASSEMBLY__ */
fe2de317 59947diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
c6e2a6c8 59948index 8aeadf6..f1dc019 100644
fe2de317
MT
59949--- a/include/asm-generic/vmlinux.lds.h
59950+++ b/include/asm-generic/vmlinux.lds.h
c6e2a6c8 59951@@ -218,6 +218,7 @@
58c5fc13
MT
59952 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
59953 VMLINUX_SYMBOL(__start_rodata) = .; \
59954 *(.rodata) *(.rodata.*) \
57199397 59955+ *(.data..read_only) \
58c5fc13 59956 *(__vermagic) /* Kernel version magic */ \
16454cff
MT
59957 . = ALIGN(8); \
59958 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
c6e2a6c8 59959@@ -716,17 +717,18 @@
58c5fc13
MT
59960 * section in the linker script will go there too. @phdr should have
59961 * a leading colon.
59962 *
59963- * Note that this macros defines __per_cpu_load as an absolute symbol.
59964+ * Note that this macros defines per_cpu_load as an absolute symbol.
59965 * If there is no need to put the percpu section at a predetermined
15a11c5b 59966 * address, use PERCPU_SECTION.
58c5fc13 59967 */
66a7e928 59968 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
58c5fc13 59969- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57199397 59970- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
58c5fc13 59971+ per_cpu_load = .; \
57199397 59972+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
58c5fc13
MT
59973 - LOAD_OFFSET) { \
59974+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
15a11c5b 59975 PERCPU_INPUT(cacheline) \
58c5fc13 59976 } phdr \
57199397
MT
59977- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
59978+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
58c5fc13
MT
59979
59980 /**
15a11c5b 59981 * PERCPU_SECTION - define output section for percpu area, simple version
fe2de317 59982diff --git a/include/drm/drmP.h b/include/drm/drmP.h
c6e2a6c8 59983index dd73104..fde86bd 100644
fe2de317
MT
59984--- a/include/drm/drmP.h
59985+++ b/include/drm/drmP.h
4c928ab7 59986@@ -72,6 +72,7 @@
c52201e0
MT
59987 #include <linux/workqueue.h>
59988 #include <linux/poll.h>
59989 #include <asm/pgalloc.h>
59990+#include <asm/local.h>
59991 #include "drm.h"
59992
59993 #include <linux/idr.h>
c6e2a6c8 59994@@ -1074,7 +1075,7 @@ struct drm_device {
57199397
MT
59995
59996 /** \name Usage Counters */
59997 /*@{ */
59998- int open_count; /**< Outstanding files open */
c52201e0 59999+ local_t open_count; /**< Outstanding files open */
57199397
MT
60000 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
60001 atomic_t vma_count; /**< Outstanding vma areas open */
60002 int buf_use; /**< Buffers in use -- cannot alloc */
c6e2a6c8 60003@@ -1085,7 +1086,7 @@ struct drm_device {
57199397
MT
60004 /*@{ */
60005 unsigned long counters;
60006 enum drm_stat_type types[15];
60007- atomic_t counts[15];
60008+ atomic_unchecked_t counts[15];
60009 /*@} */
60010
60011 struct list_head filelist;
fe2de317 60012diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
5e856224 60013index 37515d1..34fa8b0 100644
fe2de317
MT
60014--- a/include/drm/drm_crtc_helper.h
60015+++ b/include/drm/drm_crtc_helper.h
60016@@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
60017
60018 /* disable crtc when not in use - more explicit than dpms off */
60019 void (*disable)(struct drm_crtc *crtc);
60020-};
60021+} __no_const;
60022
60023 struct drm_encoder_helper_funcs {
60024 void (*dpms)(struct drm_encoder *encoder, int mode);
60025@@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
60026 struct drm_connector *connector);
60027 /* disable encoder when not in use - more explicit than dpms off */
60028 void (*disable)(struct drm_encoder *encoder);
60029-};
60030+} __no_const;
60031
60032 struct drm_connector_helper_funcs {
60033 int (*get_modes)(struct drm_connector *connector);
60034diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
c6e2a6c8 60035index d6d1da4..fdd1ac5 100644
fe2de317
MT
60036--- a/include/drm/ttm/ttm_memory.h
60037+++ b/include/drm/ttm/ttm_memory.h
c6e2a6c8 60038@@ -48,7 +48,7 @@
15a11c5b
MT
60039
60040 struct ttm_mem_shrink {
60041 int (*do_shrink) (struct ttm_mem_shrink *);
60042-};
60043+} __no_const;
60044
60045 /**
60046 * struct ttm_mem_global - Global memory accounting structure.
fe2de317
MT
60047diff --git a/include/linux/a.out.h b/include/linux/a.out.h
60048index e86dfca..40cc55f 100644
60049--- a/include/linux/a.out.h
60050+++ b/include/linux/a.out.h
58c5fc13
MT
60051@@ -39,6 +39,14 @@ enum machine_type {
60052 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
60053 };
60054
60055+/* Constants for the N_FLAGS field */
60056+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60057+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
60058+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
60059+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
60060+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60061+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60062+
60063 #if !defined (N_MAGIC)
60064 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
60065 #endif
fe2de317 60066diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
c6e2a6c8 60067index 06fd4bb..1caec0d 100644
fe2de317
MT
60068--- a/include/linux/atmdev.h
60069+++ b/include/linux/atmdev.h
58c5fc13
MT
60070@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
60071 #endif
60072
60073 struct k_atm_aal_stats {
60074-#define __HANDLE_ITEM(i) atomic_t i
60075+#define __HANDLE_ITEM(i) atomic_unchecked_t i
60076 __AAL_STAT_ITEMS
60077 #undef __HANDLE_ITEM
60078 };
fe2de317 60079diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
c6e2a6c8 60080index 366422b..1fa7f84 100644
fe2de317
MT
60081--- a/include/linux/binfmts.h
60082+++ b/include/linux/binfmts.h
5e856224 60083@@ -89,6 +89,7 @@ struct linux_binfmt {
58c5fc13
MT
60084 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
60085 int (*load_shlib)(struct file *);
ae4e228f 60086 int (*core_dump)(struct coredump_params *cprm);
58c5fc13
MT
60087+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
60088 unsigned long min_coredump; /* minimal dump size */
58c5fc13 60089 };
16454cff 60090
fe2de317 60091diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
c6e2a6c8 60092index 4d4ac24..2c3ccce 100644
fe2de317
MT
60093--- a/include/linux/blkdev.h
60094+++ b/include/linux/blkdev.h
c6e2a6c8 60095@@ -1376,7 +1376,7 @@ struct block_device_operations {
57199397 60096 /* this callback is with swap_lock and sometimes page table lock held */
15a11c5b
MT
60097 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
60098 struct module *owner;
60099-};
60100+} __do_const;
ae4e228f
MT
60101
60102 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
15a11c5b 60103 unsigned long);
fe2de317 60104diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
4c928ab7 60105index 4d1a074..88f929a 100644
fe2de317
MT
60106--- a/include/linux/blktrace_api.h
60107+++ b/include/linux/blktrace_api.h
6e9df6a3 60108@@ -162,7 +162,7 @@ struct blk_trace {
8308f9c9
MT
60109 struct dentry *dir;
60110 struct dentry *dropped_file;
60111 struct dentry *msg_file;
60112- atomic_t dropped;
60113+ atomic_unchecked_t dropped;
60114 };
60115
60116 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
fe2de317
MT
60117diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
60118index 83195fb..0b0f77d 100644
60119--- a/include/linux/byteorder/little_endian.h
60120+++ b/include/linux/byteorder/little_endian.h
bc901d79
MT
60121@@ -42,51 +42,51 @@
60122
60123 static inline __le64 __cpu_to_le64p(const __u64 *p)
60124 {
60125- return (__force __le64)*p;
60126+ return (__force const __le64)*p;
60127 }
60128 static inline __u64 __le64_to_cpup(const __le64 *p)
60129 {
60130- return (__force __u64)*p;
60131+ return (__force const __u64)*p;
60132 }
60133 static inline __le32 __cpu_to_le32p(const __u32 *p)
60134 {
60135- return (__force __le32)*p;
60136+ return (__force const __le32)*p;
60137 }
60138 static inline __u32 __le32_to_cpup(const __le32 *p)
60139 {
60140- return (__force __u32)*p;
60141+ return (__force const __u32)*p;
60142 }
60143 static inline __le16 __cpu_to_le16p(const __u16 *p)
60144 {
60145- return (__force __le16)*p;
60146+ return (__force const __le16)*p;
60147 }
60148 static inline __u16 __le16_to_cpup(const __le16 *p)
60149 {
60150- return (__force __u16)*p;
60151+ return (__force const __u16)*p;
60152 }
60153 static inline __be64 __cpu_to_be64p(const __u64 *p)
60154 {
60155- return (__force __be64)__swab64p(p);
60156+ return (__force const __be64)__swab64p(p);
60157 }
60158 static inline __u64 __be64_to_cpup(const __be64 *p)
60159 {
60160- return __swab64p((__u64 *)p);
60161+ return __swab64p((const __u64 *)p);
60162 }
60163 static inline __be32 __cpu_to_be32p(const __u32 *p)
60164 {
60165- return (__force __be32)__swab32p(p);
60166+ return (__force const __be32)__swab32p(p);
60167 }
60168 static inline __u32 __be32_to_cpup(const __be32 *p)
60169 {
60170- return __swab32p((__u32 *)p);
60171+ return __swab32p((const __u32 *)p);
60172 }
60173 static inline __be16 __cpu_to_be16p(const __u16 *p)
60174 {
60175- return (__force __be16)__swab16p(p);
60176+ return (__force const __be16)__swab16p(p);
60177 }
60178 static inline __u16 __be16_to_cpup(const __be16 *p)
60179 {
60180- return __swab16p((__u16 *)p);
60181+ return __swab16p((const __u16 *)p);
60182 }
60183 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
60184 #define __le64_to_cpus(x) do { (void)(x); } while (0)
fe2de317
MT
60185diff --git a/include/linux/cache.h b/include/linux/cache.h
60186index 4c57065..4307975 100644
60187--- a/include/linux/cache.h
60188+++ b/include/linux/cache.h
58c5fc13
MT
60189@@ -16,6 +16,10 @@
60190 #define __read_mostly
60191 #endif
60192
60193+#ifndef __read_only
60194+#define __read_only __read_mostly
60195+#endif
60196+
60197 #ifndef ____cacheline_aligned
60198 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
60199 #endif
fe2de317 60200diff --git a/include/linux/capability.h b/include/linux/capability.h
5e856224 60201index 12d52de..b5f7fa7 100644
fe2de317
MT
60202--- a/include/linux/capability.h
60203+++ b/include/linux/capability.h
5e856224
MT
60204@@ -548,6 +548,8 @@ extern bool has_ns_capability_noaudit(struct task_struct *t,
60205 extern bool capable(int cap);
66a7e928 60206 extern bool ns_capable(struct user_namespace *ns, int cap);
66a7e928 60207 extern bool nsown_capable(int cap);
66a7e928 60208+extern bool capable_nolog(int cap);
5e856224 60209+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
58c5fc13
MT
60210
60211 /* audit system wants to get cap info from files as well */
66a7e928 60212 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
fe2de317 60213diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
c6e2a6c8 60214index 42e55de..1cd0e66 100644
fe2de317
MT
60215--- a/include/linux/cleancache.h
60216+++ b/include/linux/cleancache.h
15a11c5b 60217@@ -31,7 +31,7 @@ struct cleancache_ops {
c6e2a6c8
MT
60218 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
60219 void (*invalidate_inode)(int, struct cleancache_filekey);
60220 void (*invalidate_fs)(int);
15a11c5b
MT
60221-};
60222+} __no_const;
60223
60224 extern struct cleancache_ops
60225 cleancache_register_ops(struct cleancache_ops *ops);
fe2de317 60226diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
572b4308 60227index 2f40791..9c9e13c 100644
fe2de317
MT
60228--- a/include/linux/compiler-gcc4.h
60229+++ b/include/linux/compiler-gcc4.h
572b4308 60230@@ -32,6 +32,20 @@
5e856224 60231 #define __linktime_error(message) __attribute__((__error__(message)))
15a11c5b
MT
60232
60233 #if __GNUC_MINOR__ >= 5
60234+
60235+#ifdef CONSTIFY_PLUGIN
60236+#define __no_const __attribute__((no_const))
60237+#define __do_const __attribute__((do_const))
60238+#endif
60239+
4c928ab7
MT
60240+#ifdef SIZE_OVERFLOW_PLUGIN
60241+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
60242+#endif
572b4308
MT
60243+
60244+#ifdef LATENT_ENTROPY_PLUGIN
60245+#define __latent_entropy __attribute__((latent_entropy))
60246+#endif
c6e2a6c8 60247+
15a11c5b
MT
60248 /*
60249 * Mark a position in code as unreachable. This can be used to
60250 * suppress control flow warnings after asm blocks that transfer
572b4308 60251@@ -47,6 +61,11 @@
66a7e928 60252 #define __noclone __attribute__((__noclone__))
57199397 60253
ae4e228f 60254 #endif
66a7e928 60255+
58c5fc13
MT
60256+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
60257+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
60258+#define __bos0(ptr) __bos((ptr), 0)
60259+#define __bos1(ptr) __bos((ptr), 1)
60260 #endif
ae4e228f
MT
60261
60262 #if __GNUC_MINOR__ > 0
fe2de317 60263diff --git a/include/linux/compiler.h b/include/linux/compiler.h
572b4308 60264index 923d093..1fef491 100644
fe2de317
MT
60265--- a/include/linux/compiler.h
60266+++ b/include/linux/compiler.h
6e9df6a3
MT
60267@@ -5,31 +5,62 @@
60268
60269 #ifdef __CHECKER__
60270 # define __user __attribute__((noderef, address_space(1)))
60271+# define __force_user __force __user
60272 # define __kernel __attribute__((address_space(0)))
60273+# define __force_kernel __force __kernel
60274 # define __safe __attribute__((safe))
60275 # define __force __attribute__((force))
60276 # define __nocast __attribute__((nocast))
60277 # define __iomem __attribute__((noderef, address_space(2)))
60278+# define __force_iomem __force __iomem
60279 # define __acquires(x) __attribute__((context(x,0,1)))
60280 # define __releases(x) __attribute__((context(x,1,0)))
60281 # define __acquire(x) __context__(x,1)
60282 # define __release(x) __context__(x,-1)
60283 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
60284 # define __percpu __attribute__((noderef, address_space(3)))
60285+# define __force_percpu __force __percpu
60286 #ifdef CONFIG_SPARSE_RCU_POINTER
60287 # define __rcu __attribute__((noderef, address_space(4)))
60288+# define __force_rcu __force __rcu
60289 #else
60290 # define __rcu
60291+# define __force_rcu
60292 #endif
60293 extern void __chk_user_ptr(const volatile void __user *);
60294 extern void __chk_io_ptr(const volatile void __iomem *);
60295+#elif defined(CHECKER_PLUGIN)
60296+//# define __user
60297+//# define __force_user
60298+//# define __kernel
60299+//# define __force_kernel
60300+# define __safe
60301+# define __force
60302+# define __nocast
60303+# define __iomem
60304+# define __force_iomem
60305+# define __chk_user_ptr(x) (void)0
60306+# define __chk_io_ptr(x) (void)0
60307+# define __builtin_warning(x, y...) (1)
60308+# define __acquires(x)
60309+# define __releases(x)
60310+# define __acquire(x) (void)0
60311+# define __release(x) (void)0
60312+# define __cond_lock(x,c) (c)
60313+# define __percpu
60314+# define __force_percpu
60315+# define __rcu
60316+# define __force_rcu
60317 #else
60318 # define __user
60319+# define __force_user
60320 # define __kernel
60321+# define __force_kernel
60322 # define __safe
60323 # define __force
60324 # define __nocast
60325 # define __iomem
60326+# define __force_iomem
60327 # define __chk_user_ptr(x) (void)0
60328 # define __chk_io_ptr(x) (void)0
60329 # define __builtin_warning(x, y...) (1)
fe2de317 60330@@ -39,7 +70,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
6e9df6a3
MT
60331 # define __release(x) (void)0
60332 # define __cond_lock(x,c) (c)
60333 # define __percpu
60334+# define __force_percpu
60335 # define __rcu
60336+# define __force_rcu
60337 #endif
60338
60339 #ifdef __KERNEL__
572b4308 60340@@ -264,6 +297,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
15a11c5b
MT
60341 # define __attribute_const__ /* unimplemented */
60342 #endif
60343
60344+#ifndef __no_const
60345+# define __no_const
60346+#endif
60347+
60348+#ifndef __do_const
60349+# define __do_const
60350+#endif
60351+
4c928ab7
MT
60352+#ifndef __size_overflow
60353+# define __size_overflow(...)
60354+#endif
572b4308
MT
60355+
60356+#ifndef __latent_entropy
60357+# define __latent_entropy
60358+#endif
c6e2a6c8 60359+
15a11c5b
MT
60360 /*
60361 * Tell gcc if a function is cold. The compiler will assume any path
60362 * directly leading to the call is unlikely.
572b4308 60363@@ -273,6 +322,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
58c5fc13
MT
60364 #define __cold
60365 #endif
60366
60367+#ifndef __alloc_size
15a11c5b 60368+#define __alloc_size(...)
58c5fc13
MT
60369+#endif
60370+
60371+#ifndef __bos
15a11c5b 60372+#define __bos(ptr, arg)
58c5fc13
MT
60373+#endif
60374+
60375+#ifndef __bos0
15a11c5b 60376+#define __bos0(ptr)
58c5fc13
MT
60377+#endif
60378+
60379+#ifndef __bos1
15a11c5b 60380+#define __bos1(ptr)
58c5fc13
MT
60381+#endif
60382+
60383 /* Simple shorthand for a section definition */
60384 #ifndef __section
60385 # define __section(S) __attribute__ ((__section__(#S)))
572b4308 60386@@ -308,6 +373,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
bc901d79
MT
60387 * use is to mediate communication between process-level code and irq/NMI
60388 * handlers, all running on the same CPU.
60389 */
60390-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
60391+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
60392+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
60393
60394 #endif /* __LINUX_COMPILER_H */
4c928ab7 60395diff --git a/include/linux/cred.h b/include/linux/cred.h
5e856224 60396index adadf71..6af5560 100644
4c928ab7
MT
60397--- a/include/linux/cred.h
60398+++ b/include/linux/cred.h
60399@@ -207,6 +207,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
60400 static inline void validate_process_creds(void)
60401 {
60402 }
60403+static inline void validate_task_creds(struct task_struct *task)
60404+{
60405+}
60406 #endif
60407
60408 /**
fe2de317 60409diff --git a/include/linux/crypto.h b/include/linux/crypto.h
c6e2a6c8 60410index b92eadf..b4ecdc1 100644
fe2de317
MT
60411--- a/include/linux/crypto.h
60412+++ b/include/linux/crypto.h
c6e2a6c8 60413@@ -373,7 +373,7 @@ struct cipher_tfm {
15a11c5b
MT
60414 const u8 *key, unsigned int keylen);
60415 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60416 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
60417-};
60418+} __no_const;
60419
60420 struct hash_tfm {
60421 int (*init)(struct hash_desc *desc);
c6e2a6c8 60422@@ -394,13 +394,13 @@ struct compress_tfm {
15a11c5b
MT
60423 int (*cot_decompress)(struct crypto_tfm *tfm,
60424 const u8 *src, unsigned int slen,
60425 u8 *dst, unsigned int *dlen);
60426-};
60427+} __no_const;
60428
60429 struct rng_tfm {
60430 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
60431 unsigned int dlen);
60432 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
60433-};
60434+} __no_const;
60435
60436 #define crt_ablkcipher crt_u.ablkcipher
60437 #define crt_aead crt_u.aead
fe2de317
MT
60438diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
60439index 7925bf0..d5143d2 100644
60440--- a/include/linux/decompress/mm.h
60441+++ b/include/linux/decompress/mm.h
16454cff 60442@@ -77,7 +77,7 @@ static void free(void *where)
58c5fc13
MT
60443 * warnings when not needed (indeed large_malloc / large_free are not
60444 * needed by inflate */
60445
60446-#define malloc(a) kmalloc(a, GFP_KERNEL)
60447+#define malloc(a) kmalloc((a), GFP_KERNEL)
60448 #define free(a) kfree(a)
60449
60450 #define large_malloc(a) vmalloc(a)
fe2de317 60451diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
c6e2a6c8 60452index dfc099e..e583e66 100644
fe2de317
MT
60453--- a/include/linux/dma-mapping.h
60454+++ b/include/linux/dma-mapping.h
c6e2a6c8 60455@@ -51,7 +51,7 @@ struct dma_map_ops {
4c928ab7
MT
60456 u64 (*get_required_mask)(struct device *dev);
60457 #endif
15a11c5b
MT
60458 int is_phys;
60459-};
60460+} __do_const;
ae4e228f
MT
60461
60462 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
15a11c5b 60463
fe2de317 60464diff --git a/include/linux/efi.h b/include/linux/efi.h
c6e2a6c8 60465index ec45ccd..9923c32 100644
fe2de317
MT
60466--- a/include/linux/efi.h
60467+++ b/include/linux/efi.h
c6e2a6c8 60468@@ -635,7 +635,7 @@ struct efivar_operations {
15a11c5b
MT
60469 efi_get_variable_t *get_variable;
60470 efi_get_next_variable_t *get_next_variable;
60471 efi_set_variable_t *set_variable;
60472-};
60473+} __no_const;
60474
60475 struct efivars {
60476 /*
fe2de317 60477diff --git a/include/linux/elf.h b/include/linux/elf.h
5e856224 60478index 999b4f5..57753b4 100644
fe2de317
MT
60479--- a/include/linux/elf.h
60480+++ b/include/linux/elf.h
5e856224 60481@@ -40,6 +40,17 @@ typedef __s64 Elf64_Sxword;
58c5fc13
MT
60482 #define PT_GNU_EH_FRAME 0x6474e550
60483
60484 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
60485+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
60486+
60487+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
60488+
60489+/* Constants for the e_flags field */
60490+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
60491+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
60492+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
60493+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
60494+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
60495+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
60496
df50ba0c
MT
60497 /*
60498 * Extended Numbering
5e856224 60499@@ -97,6 +108,8 @@ typedef __s64 Elf64_Sxword;
58c5fc13
MT
60500 #define DT_DEBUG 21
60501 #define DT_TEXTREL 22
60502 #define DT_JMPREL 23
60503+#define DT_FLAGS 30
60504+ #define DF_TEXTREL 0x00000004
60505 #define DT_ENCODING 32
60506 #define OLD_DT_LOOS 0x60000000
60507 #define DT_LOOS 0x6000000d
5e856224 60508@@ -243,6 +256,19 @@ typedef struct elf64_hdr {
58c5fc13
MT
60509 #define PF_W 0x2
60510 #define PF_X 0x1
60511
60512+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
60513+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
60514+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
60515+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
60516+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
60517+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
60518+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
60519+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
60520+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
60521+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
60522+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
60523+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
60524+
60525 typedef struct elf32_phdr{
60526 Elf32_Word p_type;
60527 Elf32_Off p_offset;
5e856224 60528@@ -335,6 +361,8 @@ typedef struct elf64_shdr {
58c5fc13
MT
60529 #define EI_OSABI 7
60530 #define EI_PAD 8
60531
60532+#define EI_PAX 14
60533+
60534 #define ELFMAG0 0x7f /* EI_MAG */
60535 #define ELFMAG1 'E'
60536 #define ELFMAG2 'L'
5e856224 60537@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
58c5fc13
MT
60538 #define elf_note elf32_note
60539 #define elf_addr_t Elf32_Off
df50ba0c 60540 #define Elf_Half Elf32_Half
58c5fc13
MT
60541+#define elf_dyn Elf32_Dyn
60542
60543 #else
60544
5e856224 60545@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
58c5fc13
MT
60546 #define elf_note elf64_note
60547 #define elf_addr_t Elf64_Off
df50ba0c 60548 #define Elf_Half Elf64_Half
58c5fc13
MT
60549+#define elf_dyn Elf64_Dyn
60550
60551 #endif
60552
fe2de317 60553diff --git a/include/linux/filter.h b/include/linux/filter.h
4c928ab7 60554index 8eeb205..d59bfa2 100644
fe2de317
MT
60555--- a/include/linux/filter.h
60556+++ b/include/linux/filter.h
60557@@ -134,6 +134,7 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
883a9837
MT
60558
60559 struct sk_buff;
60560 struct sock;
60561+struct bpf_jit_work;
60562
60563 struct sk_filter
60564 {
60565@@ -141,6 +142,9 @@ struct sk_filter
60566 unsigned int len; /* Number of filter blocks */
60567 unsigned int (*bpf_func)(const struct sk_buff *skb,
60568 const struct sock_filter *filter);
60569+#ifdef CONFIG_BPF_JIT
60570+ struct bpf_jit_work *work;
60571+#endif
60572 struct rcu_head rcu;
60573 struct sock_filter insns[0];
60574 };
fe2de317 60575diff --git a/include/linux/firewire.h b/include/linux/firewire.h
c6e2a6c8 60576index cdc9b71..ce69fb5 100644
fe2de317
MT
60577--- a/include/linux/firewire.h
60578+++ b/include/linux/firewire.h
c6e2a6c8 60579@@ -413,7 +413,7 @@ struct fw_iso_context {
15a11c5b
MT
60580 union {
60581 fw_iso_callback_t sc;
60582 fw_iso_mc_callback_t mc;
60583- } callback;
60584+ } __no_const callback;
60585 void *callback_data;
66a7e928 60586 };
15a11c5b 60587
fe2de317 60588diff --git a/include/linux/fs.h b/include/linux/fs.h
c6e2a6c8 60589index 25c40b9..1bfd4f4 100644
fe2de317
MT
60590--- a/include/linux/fs.h
60591+++ b/include/linux/fs.h
c6e2a6c8 60592@@ -1634,7 +1634,8 @@ struct file_operations {
fe2de317
MT
60593 int (*setlease)(struct file *, long, struct file_lock **);
60594 long (*fallocate)(struct file *file, int mode, loff_t offset,
60595 loff_t len);
60596-};
60597+} __do_const;
60598+typedef struct file_operations __no_const file_operations_no_const;
60599
60600 struct inode_operations {
60601 struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
60602diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
60603index 003dc0f..3c4ea97 100644
60604--- a/include/linux/fs_struct.h
60605+++ b/include/linux/fs_struct.h
60606@@ -6,7 +6,7 @@
60607 #include <linux/seqlock.h>
60608
60609 struct fs_struct {
60610- int users;
60611+ atomic_t users;
60612 spinlock_t lock;
60613 seqcount_t seq;
60614 int umask;
60615diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
4c928ab7 60616index ce31408..b1ad003 100644
fe2de317
MT
60617--- a/include/linux/fscache-cache.h
60618+++ b/include/linux/fscache-cache.h
15a11c5b
MT
60619@@ -102,7 +102,7 @@ struct fscache_operation {
60620 fscache_operation_release_t release;
8308f9c9
MT
60621 };
60622
60623-extern atomic_t fscache_op_debug_id;
60624+extern atomic_unchecked_t fscache_op_debug_id;
60625 extern void fscache_op_work_func(struct work_struct *work);
60626
60627 extern void fscache_enqueue_operation(struct fscache_operation *);
fe2de317 60628@@ -122,7 +122,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
8308f9c9
MT
60629 {
60630 INIT_WORK(&op->work, fscache_op_work_func);
60631 atomic_set(&op->usage, 1);
60632- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
60633+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
60634 op->processor = processor;
60635 op->release = release;
60636 INIT_LIST_HEAD(&op->pend_link);
fe2de317 60637diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
c6e2a6c8 60638index a6dfe69..569586df 100644
fe2de317
MT
60639--- a/include/linux/fsnotify.h
60640+++ b/include/linux/fsnotify.h
c6e2a6c8 60641@@ -315,7 +315,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
15a11c5b
MT
60642 */
60643 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
60644 {
60645- return kstrdup(name, GFP_KERNEL);
60646+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
60647 }
ae4e228f
MT
60648
60649 /*
4c928ab7
MT
60650diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
60651index 91d0e0a3..035666b 100644
60652--- a/include/linux/fsnotify_backend.h
60653+++ b/include/linux/fsnotify_backend.h
60654@@ -105,6 +105,7 @@ struct fsnotify_ops {
60655 void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
60656 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
60657 };
60658+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
60659
60660 /*
60661 * A group is a "thing" that wants to receive notification about filesystem
fe2de317 60662diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
c6e2a6c8 60663index 176a939..1462211 100644
fe2de317
MT
60664--- a/include/linux/ftrace_event.h
60665+++ b/include/linux/ftrace_event.h
6e9df6a3 60666@@ -97,7 +97,7 @@ struct trace_event_functions {
15a11c5b
MT
60667 trace_print_func raw;
60668 trace_print_func hex;
60669 trace_print_func binary;
60670-};
60671+} __no_const;
60672
60673 struct trace_event {
60674 struct hlist_node node;
c6e2a6c8 60675@@ -263,7 +263,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
66a7e928
MT
60676 extern int trace_add_event_call(struct ftrace_event_call *call);
60677 extern void trace_remove_event_call(struct ftrace_event_call *call);
60678
60679-#define is_signed_type(type) (((type)(-1)) < 0)
60680+#define is_signed_type(type) (((type)(-1)) < (type)1)
60681
60682 int trace_set_clr_event(const char *system, const char *event, int set);
60683
fe2de317 60684diff --git a/include/linux/genhd.h b/include/linux/genhd.h
c6e2a6c8 60685index 017a7fb..33a8507 100644
fe2de317
MT
60686--- a/include/linux/genhd.h
60687+++ b/include/linux/genhd.h
4c928ab7 60688@@ -185,7 +185,7 @@ struct gendisk {
16454cff 60689 struct kobject *slave_dir;
58c5fc13
MT
60690
60691 struct timer_rand_state *random;
58c5fc13
MT
60692- atomic_t sync_io; /* RAID */
60693+ atomic_unchecked_t sync_io; /* RAID */
16454cff 60694 struct disk_events *ev;
58c5fc13
MT
60695 #ifdef CONFIG_BLK_DEV_INTEGRITY
60696 struct blk_integrity *integrity;
572b4308
MT
60697diff --git a/include/linux/gfp.h b/include/linux/gfp.h
60698index 581e74b..8c34a24 100644
60699--- a/include/linux/gfp.h
60700+++ b/include/linux/gfp.h
60701@@ -38,6 +38,12 @@ struct vm_area_struct;
60702 #define ___GFP_OTHER_NODE 0x800000u
60703 #define ___GFP_WRITE 0x1000000u
60704
60705+#ifdef CONFIG_PAX_USERCOPY_SLABS
60706+#define ___GFP_USERCOPY 0x2000000u
60707+#else
60708+#define ___GFP_USERCOPY 0
60709+#endif
60710+
60711 /*
60712 * GFP bitmasks..
60713 *
60714@@ -87,6 +93,7 @@ struct vm_area_struct;
60715 #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
60716 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
60717 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
60718+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
60719
60720 /*
60721 * This may seem redundant, but it's a way of annotating false positives vs.
60722@@ -94,7 +101,7 @@ struct vm_area_struct;
60723 */
60724 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
60725
60726-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
60727+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
60728 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
60729
60730 /* This equals 0, but use constants in case they ever change */
60731@@ -148,6 +155,8 @@ struct vm_area_struct;
60732 /* 4GB DMA on some platforms */
60733 #define GFP_DMA32 __GFP_DMA32
60734
60735+#define GFP_USERCOPY __GFP_USERCOPY
60736+
60737 /* Convert GFP flags to their corresponding migrate type */
60738 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
60739 {
fe2de317
MT
60740diff --git a/include/linux/gracl.h b/include/linux/gracl.h
60741new file mode 100644
c6e2a6c8 60742index 0000000..c938b1f
fe2de317
MT
60743--- /dev/null
60744+++ b/include/linux/gracl.h
4c928ab7 60745@@ -0,0 +1,319 @@
58c5fc13
MT
60746+#ifndef GR_ACL_H
60747+#define GR_ACL_H
60748+
60749+#include <linux/grdefs.h>
60750+#include <linux/resource.h>
60751+#include <linux/capability.h>
60752+#include <linux/dcache.h>
60753+#include <asm/resource.h>
60754+
60755+/* Major status information */
60756+
c6e2a6c8
MT
60757+#define GR_VERSION "grsecurity 2.9.1"
60758+#define GRSECURITY_VERSION 0x2901
58c5fc13
MT
60759+
60760+enum {
60761+ GR_SHUTDOWN = 0,
60762+ GR_ENABLE = 1,
60763+ GR_SPROLE = 2,
60764+ GR_RELOAD = 3,
60765+ GR_SEGVMOD = 4,
60766+ GR_STATUS = 5,
60767+ GR_UNSPROLE = 6,
60768+ GR_PASSSET = 7,
60769+ GR_SPROLEPAM = 8,
60770+};
60771+
60772+/* Password setup definitions
60773+ * kernel/grhash.c */
60774+enum {
60775+ GR_PW_LEN = 128,
60776+ GR_SALT_LEN = 16,
60777+ GR_SHA_LEN = 32,
60778+};
60779+
60780+enum {
60781+ GR_SPROLE_LEN = 64,
60782+};
60783+
bc901d79
MT
60784+enum {
60785+ GR_NO_GLOB = 0,
60786+ GR_REG_GLOB,
60787+ GR_CREATE_GLOB
60788+};
60789+
58c5fc13
MT
60790+#define GR_NLIMITS 32
60791+
60792+/* Begin Data Structures */
60793+
60794+struct sprole_pw {
60795+ unsigned char *rolename;
60796+ unsigned char salt[GR_SALT_LEN];
60797+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
60798+};
60799+
60800+struct name_entry {
60801+ __u32 key;
60802+ ino_t inode;
60803+ dev_t device;
60804+ char *name;
60805+ __u16 len;
60806+ __u8 deleted;
60807+ struct name_entry *prev;
60808+ struct name_entry *next;
60809+};
60810+
60811+struct inodev_entry {
60812+ struct name_entry *nentry;
60813+ struct inodev_entry *prev;
60814+ struct inodev_entry *next;
60815+};
60816+
60817+struct acl_role_db {
60818+ struct acl_role_label **r_hash;
60819+ __u32 r_size;
60820+};
60821+
60822+struct inodev_db {
60823+ struct inodev_entry **i_hash;
60824+ __u32 i_size;
60825+};
60826+
60827+struct name_db {
60828+ struct name_entry **n_hash;
60829+ __u32 n_size;
60830+};
60831+
60832+struct crash_uid {
60833+ uid_t uid;
60834+ unsigned long expires;
60835+};
60836+
60837+struct gr_hash_struct {
60838+ void **table;
60839+ void **nametable;
60840+ void *first;
60841+ __u32 table_size;
60842+ __u32 used_size;
60843+ int type;
60844+};
60845+
60846+/* Userspace Grsecurity ACL data structures */
60847+
60848+struct acl_subject_label {
60849+ char *filename;
60850+ ino_t inode;
60851+ dev_t device;
60852+ __u32 mode;
60853+ kernel_cap_t cap_mask;
60854+ kernel_cap_t cap_lower;
df50ba0c 60855+ kernel_cap_t cap_invert_audit;
58c5fc13
MT
60856+
60857+ struct rlimit res[GR_NLIMITS];
60858+ __u32 resmask;
60859+
60860+ __u8 user_trans_type;
60861+ __u8 group_trans_type;
60862+ uid_t *user_transitions;
60863+ gid_t *group_transitions;
60864+ __u16 user_trans_num;
60865+ __u16 group_trans_num;
60866+
bc901d79 60867+ __u32 sock_families[2];
58c5fc13
MT
60868+ __u32 ip_proto[8];
60869+ __u32 ip_type;
60870+ struct acl_ip_label **ips;
60871+ __u32 ip_num;
60872+ __u32 inaddr_any_override;
60873+
60874+ __u32 crashes;
60875+ unsigned long expires;
60876+
60877+ struct acl_subject_label *parent_subject;
60878+ struct gr_hash_struct *hash;
60879+ struct acl_subject_label *prev;
60880+ struct acl_subject_label *next;
60881+
60882+ struct acl_object_label **obj_hash;
60883+ __u32 obj_hash_size;
60884+ __u16 pax_flags;
60885+};
60886+
60887+struct role_allowed_ip {
60888+ __u32 addr;
60889+ __u32 netmask;
60890+
60891+ struct role_allowed_ip *prev;
60892+ struct role_allowed_ip *next;
60893+};
60894+
60895+struct role_transition {
60896+ char *rolename;
60897+
60898+ struct role_transition *prev;
60899+ struct role_transition *next;
60900+};
60901+
60902+struct acl_role_label {
60903+ char *rolename;
60904+ uid_t uidgid;
60905+ __u16 roletype;
60906+
60907+ __u16 auth_attempts;
60908+ unsigned long expires;
60909+
60910+ struct acl_subject_label *root_label;
60911+ struct gr_hash_struct *hash;
60912+
60913+ struct acl_role_label *prev;
60914+ struct acl_role_label *next;
60915+
60916+ struct role_transition *transitions;
60917+ struct role_allowed_ip *allowed_ips;
60918+ uid_t *domain_children;
60919+ __u16 domain_child_num;
60920+
4c928ab7
MT
60921+ umode_t umask;
60922+
58c5fc13
MT
60923+ struct acl_subject_label **subj_hash;
60924+ __u32 subj_hash_size;
60925+};
60926+
60927+struct user_acl_role_db {
60928+ struct acl_role_label **r_table;
60929+ __u32 num_pointers; /* Number of allocations to track */
60930+ __u32 num_roles; /* Number of roles */
60931+ __u32 num_domain_children; /* Number of domain children */
60932+ __u32 num_subjects; /* Number of subjects */
60933+ __u32 num_objects; /* Number of objects */
60934+};
60935+
60936+struct acl_object_label {
60937+ char *filename;
60938+ ino_t inode;
60939+ dev_t device;
60940+ __u32 mode;
60941+
60942+ struct acl_subject_label *nested;
60943+ struct acl_object_label *globbed;
60944+
60945+ /* next two structures not used */
60946+
60947+ struct acl_object_label *prev;
60948+ struct acl_object_label *next;
60949+};
60950+
60951+struct acl_ip_label {
60952+ char *iface;
60953+ __u32 addr;
60954+ __u32 netmask;
60955+ __u16 low, high;
60956+ __u8 mode;
60957+ __u32 type;
60958+ __u32 proto[8];
60959+
60960+ /* next two structures not used */
60961+
60962+ struct acl_ip_label *prev;
60963+ struct acl_ip_label *next;
60964+};
60965+
60966+struct gr_arg {
60967+ struct user_acl_role_db role_db;
60968+ unsigned char pw[GR_PW_LEN];
60969+ unsigned char salt[GR_SALT_LEN];
60970+ unsigned char sum[GR_SHA_LEN];
60971+ unsigned char sp_role[GR_SPROLE_LEN];
60972+ struct sprole_pw *sprole_pws;
60973+ dev_t segv_device;
60974+ ino_t segv_inode;
60975+ uid_t segv_uid;
60976+ __u16 num_sprole_pws;
60977+ __u16 mode;
60978+};
60979+
60980+struct gr_arg_wrapper {
60981+ struct gr_arg *arg;
60982+ __u32 version;
60983+ __u32 size;
60984+};
60985+
60986+struct subject_map {
60987+ struct acl_subject_label *user;
60988+ struct acl_subject_label *kernel;
60989+ struct subject_map *prev;
60990+ struct subject_map *next;
60991+};
60992+
60993+struct acl_subj_map_db {
60994+ struct subject_map **s_hash;
60995+ __u32 s_size;
60996+};
60997+
60998+/* End Data Structures Section */
60999+
61000+/* Hash functions generated by empirical testing by Brad Spengler
61001+ Makes good use of the low bits of the inode. Generally 0-1 times
61002+ in loop for successful match. 0-3 for unsuccessful match.
61003+ Shift/add algorithm with modulus of table size and an XOR*/
61004+
61005+static __inline__ unsigned int
61006+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
61007+{
ae4e228f 61008+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58c5fc13
MT
61009+}
61010+
61011+ static __inline__ unsigned int
61012+shash(const struct acl_subject_label *userp, const unsigned int sz)
61013+{
61014+ return ((const unsigned long)userp % sz);
61015+}
61016+
61017+static __inline__ unsigned int
61018+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
61019+{
61020+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
61021+}
61022+
61023+static __inline__ unsigned int
61024+nhash(const char *name, const __u16 len, const unsigned int sz)
61025+{
61026+ return full_name_hash((const unsigned char *)name, len) % sz;
61027+}
61028+
ae4e228f
MT
61029+#define FOR_EACH_ROLE_START(role) \
61030+ role = role_list; \
61031+ while (role) {
58c5fc13 61032+
ae4e228f
MT
61033+#define FOR_EACH_ROLE_END(role) \
61034+ role = role->prev; \
58c5fc13
MT
61035+ }
61036+
61037+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
61038+ subj = NULL; \
61039+ iter = 0; \
61040+ while (iter < role->subj_hash_size) { \
61041+ if (subj == NULL) \
61042+ subj = role->subj_hash[iter]; \
61043+ if (subj == NULL) { \
61044+ iter++; \
61045+ continue; \
61046+ }
61047+
61048+#define FOR_EACH_SUBJECT_END(subj,iter) \
61049+ subj = subj->next; \
61050+ if (subj == NULL) \
61051+ iter++; \
61052+ }
61053+
61054+
61055+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
61056+ subj = role->hash->first; \
61057+ while (subj != NULL) {
61058+
61059+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
61060+ subj = subj->next; \
61061+ }
61062+
61063+#endif
61064+
fe2de317
MT
61065diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
61066new file mode 100644
61067index 0000000..323ecf2
61068--- /dev/null
61069+++ b/include/linux/gralloc.h
58c5fc13
MT
61070@@ -0,0 +1,9 @@
61071+#ifndef __GRALLOC_H
61072+#define __GRALLOC_H
61073+
61074+void acl_free_all(void);
61075+int acl_alloc_stack_init(unsigned long size);
61076+void *acl_alloc(unsigned long len);
61077+void *acl_alloc_num(unsigned long num, unsigned long len);
61078+
61079+#endif
fe2de317
MT
61080diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
61081new file mode 100644
61082index 0000000..b30e9bc
61083--- /dev/null
61084+++ b/include/linux/grdefs.h
15a11c5b 61085@@ -0,0 +1,140 @@
58c5fc13
MT
61086+#ifndef GRDEFS_H
61087+#define GRDEFS_H
61088+
61089+/* Begin grsecurity status declarations */
61090+
61091+enum {
61092+ GR_READY = 0x01,
61093+ GR_STATUS_INIT = 0x00 // disabled state
61094+};
61095+
61096+/* Begin ACL declarations */
61097+
61098+/* Role flags */
61099+
61100+enum {
61101+ GR_ROLE_USER = 0x0001,
61102+ GR_ROLE_GROUP = 0x0002,
61103+ GR_ROLE_DEFAULT = 0x0004,
61104+ GR_ROLE_SPECIAL = 0x0008,
61105+ GR_ROLE_AUTH = 0x0010,
61106+ GR_ROLE_NOPW = 0x0020,
61107+ GR_ROLE_GOD = 0x0040,
61108+ GR_ROLE_LEARN = 0x0080,
61109+ GR_ROLE_TPE = 0x0100,
61110+ GR_ROLE_DOMAIN = 0x0200,
16454cff
MT
61111+ GR_ROLE_PAM = 0x0400,
61112+ GR_ROLE_PERSIST = 0x0800
58c5fc13
MT
61113+};
61114+
61115+/* ACL Subject and Object mode flags */
61116+enum {
61117+ GR_DELETED = 0x80000000
61118+};
61119+
61120+/* ACL Object-only mode flags */
61121+enum {
61122+ GR_READ = 0x00000001,
61123+ GR_APPEND = 0x00000002,
61124+ GR_WRITE = 0x00000004,
61125+ GR_EXEC = 0x00000008,
61126+ GR_FIND = 0x00000010,
61127+ GR_INHERIT = 0x00000020,
61128+ GR_SETID = 0x00000040,
61129+ GR_CREATE = 0x00000080,
61130+ GR_DELETE = 0x00000100,
61131+ GR_LINK = 0x00000200,
61132+ GR_AUDIT_READ = 0x00000400,
61133+ GR_AUDIT_APPEND = 0x00000800,
61134+ GR_AUDIT_WRITE = 0x00001000,
61135+ GR_AUDIT_EXEC = 0x00002000,
61136+ GR_AUDIT_FIND = 0x00004000,
61137+ GR_AUDIT_INHERIT= 0x00008000,
61138+ GR_AUDIT_SETID = 0x00010000,
61139+ GR_AUDIT_CREATE = 0x00020000,
61140+ GR_AUDIT_DELETE = 0x00040000,
61141+ GR_AUDIT_LINK = 0x00080000,
61142+ GR_PTRACERD = 0x00100000,
61143+ GR_NOPTRACE = 0x00200000,
61144+ GR_SUPPRESS = 0x00400000,
16454cff
MT
61145+ GR_NOLEARN = 0x00800000,
61146+ GR_INIT_TRANSFER= 0x01000000
58c5fc13
MT
61147+};
61148+
61149+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
61150+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
61151+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
61152+
61153+/* ACL subject-only mode flags */
61154+enum {
61155+ GR_KILL = 0x00000001,
61156+ GR_VIEW = 0x00000002,
61157+ GR_PROTECTED = 0x00000004,
61158+ GR_LEARN = 0x00000008,
61159+ GR_OVERRIDE = 0x00000010,
61160+ /* just a placeholder, this mode is only used in userspace */
61161+ GR_DUMMY = 0x00000020,
61162+ GR_PROTSHM = 0x00000040,
61163+ GR_KILLPROC = 0x00000080,
61164+ GR_KILLIPPROC = 0x00000100,
61165+ /* just a placeholder, this mode is only used in userspace */
61166+ GR_NOTROJAN = 0x00000200,
61167+ GR_PROTPROCFD = 0x00000400,
61168+ GR_PROCACCT = 0x00000800,
61169+ GR_RELAXPTRACE = 0x00001000,
61170+ GR_NESTED = 0x00002000,
61171+ GR_INHERITLEARN = 0x00004000,
61172+ GR_PROCFIND = 0x00008000,
61173+ GR_POVERRIDE = 0x00010000,
61174+ GR_KERNELAUTH = 0x00020000,
15a11c5b
MT
61175+ GR_ATSECURE = 0x00040000,
61176+ GR_SHMEXEC = 0x00080000
58c5fc13
MT
61177+};
61178+
61179+enum {
61180+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
61181+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
61182+ GR_PAX_ENABLE_MPROTECT = 0x0004,
61183+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
61184+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
61185+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
61186+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
61187+ GR_PAX_DISABLE_MPROTECT = 0x0400,
61188+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
61189+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
61190+};
61191+
61192+enum {
61193+ GR_ID_USER = 0x01,
61194+ GR_ID_GROUP = 0x02,
61195+};
61196+
61197+enum {
61198+ GR_ID_ALLOW = 0x01,
61199+ GR_ID_DENY = 0x02,
61200+};
61201+
61202+#define GR_CRASH_RES 31
61203+#define GR_UIDTABLE_MAX 500
61204+
61205+/* begin resource learning section */
61206+enum {
61207+ GR_RLIM_CPU_BUMP = 60,
61208+ GR_RLIM_FSIZE_BUMP = 50000,
61209+ GR_RLIM_DATA_BUMP = 10000,
61210+ GR_RLIM_STACK_BUMP = 1000,
61211+ GR_RLIM_CORE_BUMP = 10000,
61212+ GR_RLIM_RSS_BUMP = 500000,
61213+ GR_RLIM_NPROC_BUMP = 1,
61214+ GR_RLIM_NOFILE_BUMP = 5,
61215+ GR_RLIM_MEMLOCK_BUMP = 50000,
61216+ GR_RLIM_AS_BUMP = 500000,
61217+ GR_RLIM_LOCKS_BUMP = 2,
61218+ GR_RLIM_SIGPENDING_BUMP = 5,
61219+ GR_RLIM_MSGQUEUE_BUMP = 10000,
61220+ GR_RLIM_NICE_BUMP = 1,
61221+ GR_RLIM_RTPRIO_BUMP = 1,
61222+ GR_RLIM_RTTIME_BUMP = 1000000
61223+};
61224+
61225+#endif
fe2de317
MT
61226diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
61227new file mode 100644
572b4308 61228index 0000000..c9292f7
fe2de317
MT
61229--- /dev/null
61230+++ b/include/linux/grinternal.h
572b4308 61231@@ -0,0 +1,223 @@
58c5fc13
MT
61232+#ifndef __GRINTERNAL_H
61233+#define __GRINTERNAL_H
61234+
61235+#ifdef CONFIG_GRKERNSEC
61236+
61237+#include <linux/fs.h>
61238+#include <linux/mnt_namespace.h>
61239+#include <linux/nsproxy.h>
61240+#include <linux/gracl.h>
61241+#include <linux/grdefs.h>
61242+#include <linux/grmsg.h>
61243+
61244+void gr_add_learn_entry(const char *fmt, ...)
61245+ __attribute__ ((format (printf, 1, 2)));
61246+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
61247+ const struct vfsmount *mnt);
61248+__u32 gr_check_create(const struct dentry *new_dentry,
61249+ const struct dentry *parent,
61250+ const struct vfsmount *mnt, const __u32 mode);
61251+int gr_check_protected_task(const struct task_struct *task);
61252+__u32 to_gr_audit(const __u32 reqmode);
61253+int gr_set_acls(const int type);
16454cff 61254+int gr_apply_subject_to_task(struct task_struct *task);
58c5fc13
MT
61255+int gr_acl_is_enabled(void);
61256+char gr_roletype_to_char(void);
61257+
61258+void gr_handle_alertkill(struct task_struct *task);
61259+char *gr_to_filename(const struct dentry *dentry,
61260+ const struct vfsmount *mnt);
61261+char *gr_to_filename1(const struct dentry *dentry,
61262+ const struct vfsmount *mnt);
61263+char *gr_to_filename2(const struct dentry *dentry,
61264+ const struct vfsmount *mnt);
61265+char *gr_to_filename3(const struct dentry *dentry,
61266+ const struct vfsmount *mnt);
61267+
4c928ab7 61268+extern int grsec_enable_ptrace_readexec;
58c5fc13
MT
61269+extern int grsec_enable_harden_ptrace;
61270+extern int grsec_enable_link;
61271+extern int grsec_enable_fifo;
61272+extern int grsec_enable_execve;
61273+extern int grsec_enable_shm;
61274+extern int grsec_enable_execlog;
61275+extern int grsec_enable_signal;
ae4e228f 61276+extern int grsec_enable_audit_ptrace;
58c5fc13
MT
61277+extern int grsec_enable_forkfail;
61278+extern int grsec_enable_time;
ae4e228f 61279+extern int grsec_enable_rofs;
58c5fc13 61280+extern int grsec_enable_chroot_shmat;
58c5fc13
MT
61281+extern int grsec_enable_chroot_mount;
61282+extern int grsec_enable_chroot_double;
61283+extern int grsec_enable_chroot_pivot;
61284+extern int grsec_enable_chroot_chdir;
61285+extern int grsec_enable_chroot_chmod;
61286+extern int grsec_enable_chroot_mknod;
61287+extern int grsec_enable_chroot_fchdir;
61288+extern int grsec_enable_chroot_nice;
61289+extern int grsec_enable_chroot_execlog;
61290+extern int grsec_enable_chroot_caps;
61291+extern int grsec_enable_chroot_sysctl;
61292+extern int grsec_enable_chroot_unix;
572b4308
MT
61293+extern int grsec_enable_symlinkown;
61294+extern int grsec_symlinkown_gid;
58c5fc13
MT
61295+extern int grsec_enable_tpe;
61296+extern int grsec_tpe_gid;
61297+extern int grsec_enable_tpe_all;
57199397 61298+extern int grsec_enable_tpe_invert;
58c5fc13
MT
61299+extern int grsec_enable_socket_all;
61300+extern int grsec_socket_all_gid;
61301+extern int grsec_enable_socket_client;
61302+extern int grsec_socket_client_gid;
61303+extern int grsec_enable_socket_server;
61304+extern int grsec_socket_server_gid;
61305+extern int grsec_audit_gid;
61306+extern int grsec_enable_group;
61307+extern int grsec_enable_audit_textrel;
6892158b 61308+extern int grsec_enable_log_rwxmaps;
58c5fc13
MT
61309+extern int grsec_enable_mount;
61310+extern int grsec_enable_chdir;
61311+extern int grsec_resource_logging;
ae4e228f
MT
61312+extern int grsec_enable_blackhole;
61313+extern int grsec_lastack_retries;
15a11c5b 61314+extern int grsec_enable_brute;
58c5fc13
MT
61315+extern int grsec_lock;
61316+
61317+extern spinlock_t grsec_alert_lock;
61318+extern unsigned long grsec_alert_wtime;
61319+extern unsigned long grsec_alert_fyet;
61320+
61321+extern spinlock_t grsec_audit_lock;
61322+
61323+extern rwlock_t grsec_exec_file_lock;
61324+
6892158b
MT
61325+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
61326+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
61327+ (tsk)->exec_file->f_vfsmnt) : "/")
58c5fc13 61328+
6892158b
MT
61329+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
61330+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
61331+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58c5fc13 61332+
6892158b
MT
61333+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
61334+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
61335+ (tsk)->exec_file->f_vfsmnt) : "/")
58c5fc13 61336+
6892158b
MT
61337+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
61338+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
61339+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
58c5fc13 61340+
6892158b 61341+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
58c5fc13 61342+
6892158b 61343+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
58c5fc13 61344+
6892158b
MT
61345+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
61346+ (task)->pid, (cred)->uid, \
61347+ (cred)->euid, (cred)->gid, (cred)->egid, \
58c5fc13 61348+ gr_parent_task_fullpath(task), \
6892158b
MT
61349+ (task)->real_parent->comm, (task)->real_parent->pid, \
61350+ (pcred)->uid, (pcred)->euid, \
61351+ (pcred)->gid, (pcred)->egid
58c5fc13
MT
61352+
61353+#define GR_CHROOT_CAPS {{ \
61354+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
61355+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
61356+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
61357+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
61358+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
6e9df6a3
MT
61359+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
61360+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
58c5fc13
MT
61361+
61362+#define security_learn(normal_msg,args...) \
61363+({ \
61364+ read_lock(&grsec_exec_file_lock); \
61365+ gr_add_learn_entry(normal_msg "\n", ## args); \
61366+ read_unlock(&grsec_exec_file_lock); \
61367+})
61368+
61369+enum {
61370+ GR_DO_AUDIT,
61371+ GR_DONT_AUDIT,
16454cff 61372+ /* used for non-audit messages that we shouldn't kill the task on */
58c5fc13
MT
61373+ GR_DONT_AUDIT_GOOD
61374+};
61375+
61376+enum {
61377+ GR_TTYSNIFF,
61378+ GR_RBAC,
61379+ GR_RBAC_STR,
61380+ GR_STR_RBAC,
61381+ GR_RBAC_MODE2,
61382+ GR_RBAC_MODE3,
61383+ GR_FILENAME,
61384+ GR_SYSCTL_HIDDEN,
61385+ GR_NOARGS,
61386+ GR_ONE_INT,
61387+ GR_ONE_INT_TWO_STR,
61388+ GR_ONE_STR,
61389+ GR_STR_INT,
bc901d79 61390+ GR_TWO_STR_INT,
58c5fc13 61391+ GR_TWO_INT,
71d190be 61392+ GR_TWO_U64,
58c5fc13
MT
61393+ GR_THREE_INT,
61394+ GR_FIVE_INT_TWO_STR,
61395+ GR_TWO_STR,
61396+ GR_THREE_STR,
61397+ GR_FOUR_STR,
61398+ GR_STR_FILENAME,
61399+ GR_FILENAME_STR,
61400+ GR_FILENAME_TWO_INT,
61401+ GR_FILENAME_TWO_INT_STR,
61402+ GR_TEXTREL,
61403+ GR_PTRACE,
61404+ GR_RESOURCE,
61405+ GR_CAP,
61406+ GR_SIG,
61407+ GR_SIG2,
61408+ GR_CRASH1,
61409+ GR_CRASH2,
6892158b
MT
61410+ GR_PSACCT,
61411+ GR_RWXMAP
58c5fc13
MT
61412+};
61413+
61414+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
61415+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
61416+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
61417+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
61418+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
61419+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
61420+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
61421+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
61422+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
61423+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
61424+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
61425+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
61426+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
61427+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
71d190be 61428+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
58c5fc13
MT
61429+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
61430+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
61431+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
bc901d79 61432+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
58c5fc13
MT
61433+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
61434+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
61435+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
61436+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
61437+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
61438+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
61439+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
61440+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
61441+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
61442+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
61443+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
61444+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
61445+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
61446+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
61447+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
6892158b 61448+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
58c5fc13
MT
61449+
61450+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
61451+
61452+#endif
61453+
61454+#endif
fe2de317
MT
61455diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
61456new file mode 100644
572b4308 61457index 0000000..54f4e85
fe2de317
MT
61458--- /dev/null
61459+++ b/include/linux/grmsg.h
572b4308 61460@@ -0,0 +1,110 @@
58c5fc13 61461+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
ae4e228f 61462+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
58c5fc13
MT
61463+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
61464+#define GR_STOPMOD_MSG "denied modification of module state by "
ae4e228f
MT
61465+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
61466+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
58c5fc13
MT
61467+#define GR_IOPERM_MSG "denied use of ioperm() by "
61468+#define GR_IOPL_MSG "denied use of iopl() by "
61469+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
61470+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
61471+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
71d190be 61472+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
58c5fc13 61473+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
ae4e228f
MT
61474+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
61475+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
58c5fc13
MT
61476+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
61477+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
61478+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
61479+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
61480+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
61481+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
61482+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
ae4e228f 61483+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
58c5fc13
MT
61484+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
61485+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
61486+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
61487+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
61488+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
61489+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
61490+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
61491+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
ae4e228f 61492+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
58c5fc13 61493+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
58c5fc13 61494+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
4c928ab7 61495+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
58c5fc13
MT
61496+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
61497+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
61498+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
61499+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
61500+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
61501+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
61502+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
61503+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
58c5fc13
MT
61504+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
61505+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
61506+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
61507+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
bc901d79 61508+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
58c5fc13
MT
61509+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
61510+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
61511+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
4c928ab7 61512+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
58c5fc13
MT
61513+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
61514+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
61515+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
61516+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
61517+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
61518+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
61519+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
61520+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
61521+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
61522+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
61523+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
61524+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
61525+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
61526+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
61527+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
61528+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
61529+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
58c5fc13
MT
61530+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
61531+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
6892158b 61532+#define GR_FAILFORK_MSG "failed fork with errno %s by "
58c5fc13
MT
61533+#define GR_NICE_CHROOT_MSG "denied priority change by "
61534+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
61535+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
61536+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
61537+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
61538+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
61539+#define GR_TIME_MSG "time set by "
61540+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
61541+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
61542+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
61543+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
bc901d79 61544+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
58c5fc13
MT
61545+#define GR_BIND_MSG "denied bind() by "
61546+#define GR_CONNECT_MSG "denied connect() by "
ae4e228f
MT
61547+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
61548+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
61549+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
58c5fc13
MT
61550+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
61551+#define GR_CAP_ACL_MSG "use of %s denied for "
15a11c5b 61552+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
df50ba0c 61553+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
58c5fc13
MT
61554+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
61555+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
61556+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
61557+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
61558+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
61559+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
61560+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
61561+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
6892158b
MT
61562+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
61563+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
58c5fc13 61564+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
ae4e228f
MT
61565+#define GR_VM86_MSG "denied use of vm86 by "
61566+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
4c928ab7 61567+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
16454cff 61568+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
4c928ab7 61569+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
572b4308 61570+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
fe2de317
MT
61571diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
61572new file mode 100644
572b4308 61573index 0000000..38bfb04
fe2de317
MT
61574--- /dev/null
61575+++ b/include/linux/grsecurity.h
572b4308 61576@@ -0,0 +1,233 @@
58c5fc13
MT
61577+#ifndef GR_SECURITY_H
61578+#define GR_SECURITY_H
61579+#include <linux/fs.h>
61580+#include <linux/fs_struct.h>
61581+#include <linux/binfmts.h>
61582+#include <linux/gracl.h>
61583+
61584+/* notify of brain-dead configs */
15a11c5b
MT
61585+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61586+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
61587+#endif
58c5fc13
MT
61588+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
61589+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
61590+#endif
58c5fc13
MT
61591+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
61592+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
61593+#endif
61594+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
61595+#error "CONFIG_PAX enabled, but no PaX options are enabled."
61596+#endif
61597+
15a11c5b
MT
61598+#include <linux/compat.h>
61599+
61600+struct user_arg_ptr {
61601+#ifdef CONFIG_COMPAT
61602+ bool is_compat;
61603+#endif
61604+ union {
61605+ const char __user *const __user *native;
61606+#ifdef CONFIG_COMPAT
61607+ compat_uptr_t __user *compat;
61608+#endif
61609+ } ptr;
61610+};
61611+
71d190be 61612+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
58c5fc13 61613+void gr_handle_brute_check(void);
71d190be
MT
61614+void gr_handle_kernel_exploit(void);
61615+int gr_process_user_ban(void);
58c5fc13
MT
61616+
61617+char gr_roletype_to_char(void);
61618+
bc901d79
MT
61619+int gr_acl_enable_at_secure(void);
61620+
58c5fc13
MT
61621+int gr_check_user_change(int real, int effective, int fs);
61622+int gr_check_group_change(int real, int effective, int fs);
61623+
61624+void gr_del_task_from_ip_table(struct task_struct *p);
61625+
61626+int gr_pid_is_chrooted(struct task_struct *p);
57199397 61627+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
58c5fc13
MT
61628+int gr_handle_chroot_nice(void);
61629+int gr_handle_chroot_sysctl(const int op);
61630+int gr_handle_chroot_setpriority(struct task_struct *p,
61631+ const int niceval);
61632+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
61633+int gr_handle_chroot_chroot(const struct dentry *dentry,
61634+ const struct vfsmount *mnt);
58c5fc13
MT
61635+void gr_handle_chroot_chdir(struct path *path);
61636+int gr_handle_chroot_chmod(const struct dentry *dentry,
61637+ const struct vfsmount *mnt, const int mode);
61638+int gr_handle_chroot_mknod(const struct dentry *dentry,
61639+ const struct vfsmount *mnt, const int mode);
61640+int gr_handle_chroot_mount(const struct dentry *dentry,
61641+ const struct vfsmount *mnt,
61642+ const char *dev_name);
61643+int gr_handle_chroot_pivot(void);
15a11c5b 61644+int gr_handle_chroot_unix(const pid_t pid);
58c5fc13
MT
61645+
61646+int gr_handle_rawio(const struct inode *inode);
58c5fc13
MT
61647+
61648+void gr_handle_ioperm(void);
61649+void gr_handle_iopl(void);
61650+
4c928ab7
MT
61651+umode_t gr_acl_umask(void);
61652+
58c5fc13
MT
61653+int gr_tpe_allow(const struct file *file);
61654+
df50ba0c
MT
61655+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
61656+void gr_clear_chroot_entries(struct task_struct *task);
58c5fc13
MT
61657+
61658+void gr_log_forkfail(const int retval);
61659+void gr_log_timechange(void);
61660+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
61661+void gr_log_chdir(const struct dentry *dentry,
61662+ const struct vfsmount *mnt);
61663+void gr_log_chroot_exec(const struct dentry *dentry,
61664+ const struct vfsmount *mnt);
15a11c5b 61665+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
58c5fc13
MT
61666+void gr_log_remount(const char *devname, const int retval);
61667+void gr_log_unmount(const char *devname, const int retval);
61668+void gr_log_mount(const char *from, const char *to, const int retval);
61669+void gr_log_textrel(struct vm_area_struct *vma);
6892158b
MT
61670+void gr_log_rwxmmap(struct file *file);
61671+void gr_log_rwxmprotect(struct file *file);
58c5fc13
MT
61672+
61673+int gr_handle_follow_link(const struct inode *parent,
61674+ const struct inode *inode,
61675+ const struct dentry *dentry,
61676+ const struct vfsmount *mnt);
61677+int gr_handle_fifo(const struct dentry *dentry,
61678+ const struct vfsmount *mnt,
61679+ const struct dentry *dir, const int flag,
61680+ const int acc_mode);
61681+int gr_handle_hardlink(const struct dentry *dentry,
61682+ const struct vfsmount *mnt,
61683+ struct inode *inode,
61684+ const int mode, const char *to);
61685+
61686+int gr_is_capable(const int cap);
61687+int gr_is_capable_nolog(const int cap);
5e856224
MT
61688+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
61689+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
61690+
58c5fc13
MT
61691+void gr_learn_resource(const struct task_struct *task, const int limit,
61692+ const unsigned long wanted, const int gt);
61693+void gr_copy_label(struct task_struct *tsk);
61694+void gr_handle_crash(struct task_struct *task, const int sig);
61695+int gr_handle_signal(const struct task_struct *p, const int sig);
61696+int gr_check_crash_uid(const uid_t uid);
61697+int gr_check_protected_task(const struct task_struct *task);
57199397 61698+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
58c5fc13
MT
61699+int gr_acl_handle_mmap(const struct file *file,
61700+ const unsigned long prot);
61701+int gr_acl_handle_mprotect(const struct file *file,
61702+ const unsigned long prot);
61703+int gr_check_hidden_task(const struct task_struct *tsk);
61704+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
61705+ const struct vfsmount *mnt);
61706+__u32 gr_acl_handle_utime(const struct dentry *dentry,
61707+ const struct vfsmount *mnt);
61708+__u32 gr_acl_handle_access(const struct dentry *dentry,
61709+ const struct vfsmount *mnt, const int fmode);
58c5fc13 61710+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
4c928ab7 61711+ const struct vfsmount *mnt, umode_t *mode);
58c5fc13
MT
61712+__u32 gr_acl_handle_chown(const struct dentry *dentry,
61713+ const struct vfsmount *mnt);
bc901d79
MT
61714+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
61715+ const struct vfsmount *mnt);
58c5fc13
MT
61716+int gr_handle_ptrace(struct task_struct *task, const long request);
61717+int gr_handle_proc_ptrace(struct task_struct *task);
61718+__u32 gr_acl_handle_execve(const struct dentry *dentry,
61719+ const struct vfsmount *mnt);
61720+int gr_check_crash_exec(const struct file *filp);
61721+int gr_acl_is_enabled(void);
61722+void gr_set_kernel_label(struct task_struct *task);
61723+void gr_set_role_label(struct task_struct *task, const uid_t uid,
61724+ const gid_t gid);
61725+int gr_set_proc_label(const struct dentry *dentry,
61726+ const struct vfsmount *mnt,
4c928ab7 61727+ const int unsafe_flags);
58c5fc13
MT
61728+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
61729+ const struct vfsmount *mnt);
61730+__u32 gr_acl_handle_open(const struct dentry *dentry,
6e9df6a3 61731+ const struct vfsmount *mnt, int acc_mode);
58c5fc13
MT
61732+__u32 gr_acl_handle_creat(const struct dentry *dentry,
61733+ const struct dentry *p_dentry,
6e9df6a3
MT
61734+ const struct vfsmount *p_mnt,
61735+ int open_flags, int acc_mode, const int imode);
58c5fc13
MT
61736+void gr_handle_create(const struct dentry *dentry,
61737+ const struct vfsmount *mnt);
6e9df6a3
MT
61738+void gr_handle_proc_create(const struct dentry *dentry,
61739+ const struct inode *inode);
58c5fc13
MT
61740+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
61741+ const struct dentry *parent_dentry,
61742+ const struct vfsmount *parent_mnt,
61743+ const int mode);
61744+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
61745+ const struct dentry *parent_dentry,
61746+ const struct vfsmount *parent_mnt);
61747+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
61748+ const struct vfsmount *mnt);
61749+void gr_handle_delete(const ino_t ino, const dev_t dev);
61750+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
61751+ const struct vfsmount *mnt);
61752+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
61753+ const struct dentry *parent_dentry,
61754+ const struct vfsmount *parent_mnt,
61755+ const char *from);
61756+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
61757+ const struct dentry *parent_dentry,
61758+ const struct vfsmount *parent_mnt,
61759+ const struct dentry *old_dentry,
61760+ const struct vfsmount *old_mnt, const char *to);
572b4308 61761+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
58c5fc13
MT
61762+int gr_acl_handle_rename(struct dentry *new_dentry,
61763+ struct dentry *parent_dentry,
61764+ const struct vfsmount *parent_mnt,
61765+ struct dentry *old_dentry,
61766+ struct inode *old_parent_inode,
61767+ struct vfsmount *old_mnt, const char *newname);
61768+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61769+ struct dentry *old_dentry,
61770+ struct dentry *new_dentry,
61771+ struct vfsmount *mnt, const __u8 replace);
61772+__u32 gr_check_link(const struct dentry *new_dentry,
61773+ const struct dentry *parent_dentry,
61774+ const struct vfsmount *parent_mnt,
61775+ const struct dentry *old_dentry,
61776+ const struct vfsmount *old_mnt);
61777+int gr_acl_handle_filldir(const struct file *file, const char *name,
61778+ const unsigned int namelen, const ino_t ino);
61779+
61780+__u32 gr_acl_handle_unix(const struct dentry *dentry,
61781+ const struct vfsmount *mnt);
61782+void gr_acl_handle_exit(void);
61783+void gr_acl_handle_psacct(struct task_struct *task, const long code);
61784+int gr_acl_handle_procpidmem(const struct task_struct *task);
ae4e228f
MT
61785+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
61786+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
61787+void gr_audit_ptrace(struct task_struct *task);
16454cff 61788+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
58c5fc13 61789+
4c928ab7
MT
61790+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
61791+
58c5fc13 61792+#ifdef CONFIG_GRKERNSEC
6892158b 61793+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
ae4e228f 61794+void gr_handle_vm86(void);
71d190be 61795+void gr_handle_mem_readwrite(u64 from, u64 to);
58c5fc13 61796+
4c928ab7
MT
61797+void gr_log_badprocpid(const char *entry);
61798+
58c5fc13 61799+extern int grsec_enable_dmesg;
df50ba0c 61800+extern int grsec_disable_privio;
15a11c5b
MT
61801+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61802+extern int grsec_enable_chroot_findtask;
61803+#endif
4c928ab7
MT
61804+#ifdef CONFIG_GRKERNSEC_SETXID
61805+extern int grsec_enable_setxid;
61806+#endif
58c5fc13
MT
61807+#endif
61808+
61809+#endif
fe2de317
MT
61810diff --git a/include/linux/grsock.h b/include/linux/grsock.h
61811new file mode 100644
61812index 0000000..e7ffaaf
61813--- /dev/null
61814+++ b/include/linux/grsock.h
ae4e228f
MT
61815@@ -0,0 +1,19 @@
61816+#ifndef __GRSOCK_H
61817+#define __GRSOCK_H
61818+
61819+extern void gr_attach_curr_ip(const struct sock *sk);
61820+extern int gr_handle_sock_all(const int family, const int type,
61821+ const int protocol);
61822+extern int gr_handle_sock_server(const struct sockaddr *sck);
df50ba0c 61823+extern int gr_handle_sock_server_other(const struct sock *sck);
ae4e228f
MT
61824+extern int gr_handle_sock_client(const struct sockaddr *sck);
61825+extern int gr_search_connect(struct socket * sock,
61826+ struct sockaddr_in * addr);
61827+extern int gr_search_bind(struct socket * sock,
61828+ struct sockaddr_in * addr);
61829+extern int gr_search_listen(struct socket * sock);
61830+extern int gr_search_accept(struct socket * sock);
61831+extern int gr_search_socket(const int domain, const int type,
61832+ const int protocol);
61833+
61834+#endif
fe2de317 61835diff --git a/include/linux/hid.h b/include/linux/hid.h
5e856224 61836index 3a95da6..51986f1 100644
fe2de317
MT
61837--- a/include/linux/hid.h
61838+++ b/include/linux/hid.h
5e856224 61839@@ -696,7 +696,7 @@ struct hid_ll_driver {
15a11c5b
MT
61840 unsigned int code, int value);
61841
61842 int (*parse)(struct hid_device *hdev);
61843-};
61844+} __no_const;
61845
61846 #define PM_HINT_FULLON 1<<5
61847 #define PM_HINT_NORMAL 1<<1
fe2de317 61848diff --git a/include/linux/highmem.h b/include/linux/highmem.h
c6e2a6c8 61849index d3999b4..1304cb4 100644
fe2de317
MT
61850--- a/include/linux/highmem.h
61851+++ b/include/linux/highmem.h
c6e2a6c8
MT
61852@@ -221,6 +221,18 @@ static inline void clear_highpage(struct page *page)
61853 kunmap_atomic(kaddr);
58c5fc13
MT
61854 }
61855
61856+static inline void sanitize_highpage(struct page *page)
61857+{
61858+ void *kaddr;
61859+ unsigned long flags;
61860+
61861+ local_irq_save(flags);
c6e2a6c8 61862+ kaddr = kmap_atomic(page);
58c5fc13 61863+ clear_page(kaddr);
c6e2a6c8 61864+ kunmap_atomic(kaddr);
58c5fc13
MT
61865+ local_irq_restore(flags);
61866+}
61867+
61868 static inline void zero_user_segments(struct page *page,
61869 unsigned start1, unsigned end1,
61870 unsigned start2, unsigned end2)
fe2de317 61871diff --git a/include/linux/i2c.h b/include/linux/i2c.h
c6e2a6c8 61872index 195d8b3..e20cfab 100644
fe2de317
MT
61873--- a/include/linux/i2c.h
61874+++ b/include/linux/i2c.h
c6e2a6c8 61875@@ -365,6 +365,7 @@ struct i2c_algorithm {
15a11c5b
MT
61876 /* To determine what the adapter supports */
61877 u32 (*functionality) (struct i2c_adapter *);
61878 };
61879+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
61880
61881 /*
61882 * i2c_adapter is the structure used to identify a physical i2c bus along
fe2de317 61883diff --git a/include/linux/i2o.h b/include/linux/i2o.h
c6e2a6c8 61884index d23c3c2..eb63c81 100644
fe2de317
MT
61885--- a/include/linux/i2o.h
61886+++ b/include/linux/i2o.h
c6e2a6c8 61887@@ -565,7 +565,7 @@ struct i2o_controller {
8308f9c9
MT
61888 struct i2o_device *exec; /* Executive */
61889 #if BITS_PER_LONG == 64
61890 spinlock_t context_list_lock; /* lock for context_list */
61891- atomic_t context_list_counter; /* needed for unique contexts */
61892+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
61893 struct list_head context_list; /* list of context id's
61894 and pointers */
61895 #endif
5e856224
MT
61896diff --git a/include/linux/if_team.h b/include/linux/if_team.h
61897index 58404b0..439ed95 100644
61898--- a/include/linux/if_team.h
61899+++ b/include/linux/if_team.h
61900@@ -64,6 +64,7 @@ struct team_mode_ops {
61901 void (*port_leave)(struct team *team, struct team_port *port);
61902 void (*port_change_mac)(struct team *team, struct team_port *port);
61903 };
61904+typedef struct team_mode_ops __no_const team_mode_ops_no_const;
61905
61906 enum team_option_type {
61907 TEAM_OPTION_TYPE_U32,
61908@@ -112,7 +113,7 @@ struct team {
61909 struct list_head option_list;
61910
61911 const struct team_mode *mode;
61912- struct team_mode_ops ops;
61913+ team_mode_ops_no_const ops;
61914 long mode_priv[TEAM_MODE_PRIV_LONGS];
61915 };
61916
fe2de317 61917diff --git a/include/linux/init.h b/include/linux/init.h
572b4308 61918index 6b95109..bcbdd68 100644
fe2de317
MT
61919--- a/include/linux/init.h
61920+++ b/include/linux/init.h
572b4308
MT
61921@@ -39,9 +39,15 @@
61922 * Also note, that this data cannot be "const".
61923 */
61924
61925+#ifdef MODULE
61926+#define add_latent_entropy
61927+#else
61928+#define add_latent_entropy __latent_entropy
61929+#endif
61930+
61931 /* These are for everybody (although not all archs will actually
61932 discard it in modules) */
61933-#define __init __section(.init.text) __cold notrace
61934+#define __init __section(.init.text) __cold notrace add_latent_entropy
61935 #define __initdata __section(.init.data)
61936 #define __initconst __section(.init.rodata)
61937 #define __exitdata __section(.exit.data)
61938@@ -83,7 +89,7 @@
61939 #define __exit __section(.exit.text) __exitused __cold notrace
61940
61941 /* Used for HOTPLUG */
61942-#define __devinit __section(.devinit.text) __cold notrace
61943+#define __devinit __section(.devinit.text) __cold notrace add_latent_entropy
61944 #define __devinitdata __section(.devinit.data)
61945 #define __devinitconst __section(.devinit.rodata)
61946 #define __devexit __section(.devexit.text) __exitused __cold notrace
61947@@ -91,7 +97,7 @@
61948 #define __devexitconst __section(.devexit.rodata)
61949
61950 /* Used for HOTPLUG_CPU */
61951-#define __cpuinit __section(.cpuinit.text) __cold notrace
61952+#define __cpuinit __section(.cpuinit.text) __cold notrace add_latent_entropy
61953 #define __cpuinitdata __section(.cpuinit.data)
61954 #define __cpuinitconst __section(.cpuinit.rodata)
61955 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
61956@@ -99,7 +105,7 @@
61957 #define __cpuexitconst __section(.cpuexit.rodata)
61958
61959 /* Used for MEMORY_HOTPLUG */
61960-#define __meminit __section(.meminit.text) __cold notrace
61961+#define __meminit __section(.meminit.text) __cold notrace add_latent_entropy
61962 #define __meminitdata __section(.meminit.data)
61963 #define __meminitconst __section(.meminit.rodata)
61964 #define __memexit __section(.memexit.text) __exitused __cold notrace
61965@@ -294,13 +300,13 @@ void __init parse_early_options(char *cmdline);
6892158b
MT
61966
61967 /* Each module must use one module_init(). */
61968 #define module_init(initfn) \
61969- static inline initcall_t __inittest(void) \
61970+ static inline __used initcall_t __inittest(void) \
61971 { return initfn; } \
61972 int init_module(void) __attribute__((alias(#initfn)));
61973
61974 /* This is only required if you want to be unloadable. */
61975 #define module_exit(exitfn) \
61976- static inline exitcall_t __exittest(void) \
61977+ static inline __used exitcall_t __exittest(void) \
61978 { return exitfn; } \
61979 void cleanup_module(void) __attribute__((alias(#exitfn)));
61980
fe2de317 61981diff --git a/include/linux/init_task.h b/include/linux/init_task.h
c6e2a6c8 61982index e4baff5..83bb175 100644
fe2de317
MT
61983--- a/include/linux/init_task.h
61984+++ b/include/linux/init_task.h
c6e2a6c8 61985@@ -134,6 +134,12 @@ extern struct cred init_cred;
4c928ab7
MT
61986
61987 #define INIT_TASK_COMM "swapper"
71d190be
MT
61988
61989+#ifdef CONFIG_X86
61990+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
61991+#else
61992+#define INIT_TASK_THREAD_INFO
61993+#endif
61994+
61995 /*
15a11c5b
MT
61996 * INIT_TASK is used to set up the first task table, touch at
61997 * your own risk!. Base=0, limit=0x1fffff (=2MB)
c6e2a6c8 61998@@ -172,6 +178,7 @@ extern struct cred init_cred;
71d190be 61999 RCU_INIT_POINTER(.cred, &init_cred), \
4c928ab7 62000 .comm = INIT_TASK_COMM, \
71d190be
MT
62001 .thread = INIT_THREAD, \
62002+ INIT_TASK_THREAD_INFO \
62003 .fs = &init_fs, \
62004 .files = &init_files, \
62005 .signal = &init_signals, \
fe2de317 62006diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
4c928ab7 62007index e6ca56d..8583707 100644
fe2de317
MT
62008--- a/include/linux/intel-iommu.h
62009+++ b/include/linux/intel-iommu.h
15a11c5b
MT
62010@@ -296,7 +296,7 @@ struct iommu_flush {
62011 u8 fm, u64 type);
62012 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
62013 unsigned int size_order, u64 type);
62014-};
62015+} __no_const;
62016
62017 enum {
62018 SR_DMAR_FECTL_REG,
fe2de317 62019diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
c6e2a6c8 62020index 2aea5d2..0b82f0c 100644
fe2de317
MT
62021--- a/include/linux/interrupt.h
62022+++ b/include/linux/interrupt.h
c6e2a6c8 62023@@ -439,7 +439,7 @@ enum
ae4e228f
MT
62024 /* map softirq index to softirq name. update 'softirq_to_name' in
62025 * kernel/softirq.c when adding a new softirq.
62026 */
62027-extern char *softirq_to_name[NR_SOFTIRQS];
62028+extern const char * const softirq_to_name[NR_SOFTIRQS];
62029
62030 /* softirq mask and active fields moved to irq_cpustat_t in
62031 * asm/hardirq.h to get better cache usage. KAO
c6e2a6c8 62032@@ -447,12 +447,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
ae4e228f
MT
62033
62034 struct softirq_action
62035 {
62036- void (*action)(struct softirq_action *);
62037+ void (*action)(void);
62038 };
62039
62040 asmlinkage void do_softirq(void);
62041 asmlinkage void __do_softirq(void);
62042-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
62043+extern void open_softirq(int nr, void (*action)(void));
62044 extern void softirq_init(void);
c6e2a6c8
MT
62045 extern void __raise_softirq_irqoff(unsigned int nr);
62046
fe2de317 62047diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
4c928ab7 62048index 3875719..4cd454c 100644
fe2de317
MT
62049--- a/include/linux/kallsyms.h
62050+++ b/include/linux/kallsyms.h
58c5fc13
MT
62051@@ -15,7 +15,8 @@
62052
62053 struct module;
62054
62055-#ifdef CONFIG_KALLSYMS
bc901d79 62056+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
58c5fc13
MT
62057+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
62058 /* Lookup the address for a symbol. Returns 0 if not found. */
62059 unsigned long kallsyms_lookup_name(const char *name);
62060
fe2de317 62061@@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
58c5fc13
MT
62062 /* Stupid that this does nothing, but I didn't create this mess. */
62063 #define __print_symbol(fmt, addr)
62064 #endif /*CONFIG_KALLSYMS*/
bc901d79 62065+#else /* when included by kallsyms.c, vsnprintf.c, or
4c928ab7 62066+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
58c5fc13 62067+extern void __print_symbol(const char *fmt, unsigned long address);
66a7e928 62068+extern int sprint_backtrace(char *buffer, unsigned long address);
bc901d79
MT
62069+extern int sprint_symbol(char *buffer, unsigned long address);
62070+const char *kallsyms_lookup(unsigned long addr,
62071+ unsigned long *symbolsize,
62072+ unsigned long *offset,
62073+ char **modname, char *namebuf);
58c5fc13
MT
62074+#endif
62075
62076 /* This macro allows us to keep printk typechecking */
4c928ab7 62077 static __printf(1, 2)
fe2de317 62078diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
5e856224 62079index c4d2fc1..5df9c19 100644
fe2de317
MT
62080--- a/include/linux/kgdb.h
62081+++ b/include/linux/kgdb.h
8308f9c9
MT
62082@@ -53,7 +53,7 @@ extern int kgdb_connected;
62083 extern int kgdb_io_module_registered;
62084
62085 extern atomic_t kgdb_setting_breakpoint;
62086-extern atomic_t kgdb_cpu_doing_single_step;
62087+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
62088
62089 extern struct task_struct *kgdb_usethread;
62090 extern struct task_struct *kgdb_contthread;
5e856224 62091@@ -252,7 +252,7 @@ struct kgdb_arch {
15a11c5b
MT
62092 void (*disable_hw_break)(struct pt_regs *regs);
62093 void (*remove_all_hw_break)(void);
62094 void (*correct_hw_break)(void);
62095-};
62096+} __do_const;
ae4e228f 62097
15a11c5b
MT
62098 /**
62099 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
5e856224 62100@@ -277,7 +277,7 @@ struct kgdb_io {
15a11c5b
MT
62101 void (*pre_exception) (void);
62102 void (*post_exception) (void);
62103 int is_console;
62104-};
62105+} __do_const;
ae4e228f 62106
15a11c5b 62107 extern struct kgdb_arch arch_kgdb_ops;
ae4e228f 62108
fe2de317 62109diff --git a/include/linux/kmod.h b/include/linux/kmod.h
c6e2a6c8 62110index dd99c32..da06047 100644
fe2de317
MT
62111--- a/include/linux/kmod.h
62112+++ b/include/linux/kmod.h
62113@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
71d190be 62114 * usually useless though. */
4c928ab7
MT
62115 extern __printf(2, 3)
62116 int __request_module(bool wait, const char *name, ...);
62117+extern __printf(3, 4)
62118+int ___request_module(bool wait, char *param_name, const char *name, ...);
71d190be
MT
62119 #define request_module(mod...) __request_module(true, mod)
62120 #define request_module_nowait(mod...) __request_module(false, mod)
62121 #define try_then_request_module(x, mod...) \
5e856224
MT
62122diff --git a/include/linux/kref.h b/include/linux/kref.h
62123index 9c07dce..a92fa71 100644
62124--- a/include/linux/kref.h
62125+++ b/include/linux/kref.h
62126@@ -63,7 +63,7 @@ static inline void kref_get(struct kref *kref)
62127 static inline int kref_sub(struct kref *kref, unsigned int count,
62128 void (*release)(struct kref *kref))
62129 {
62130- WARN_ON(release == NULL);
62131+ BUG_ON(release == NULL);
62132
62133 if (atomic_sub_and_test((int) count, &kref->refcount)) {
62134 release(kref);
fe2de317 62135diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
c6e2a6c8 62136index 72cbf08..dd0201d 100644
fe2de317
MT
62137--- a/include/linux/kvm_host.h
62138+++ b/include/linux/kvm_host.h
c6e2a6c8 62139@@ -322,7 +322,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
58c5fc13
MT
62140 void vcpu_load(struct kvm_vcpu *vcpu);
62141 void vcpu_put(struct kvm_vcpu *vcpu);
62142
57199397
MT
62143-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
62144+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
58c5fc13
MT
62145 struct module *module);
62146 void kvm_exit(void);
62147
c6e2a6c8 62148@@ -486,7 +486,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
58c5fc13
MT
62149 struct kvm_guest_debug *dbg);
62150 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
62151
62152-int kvm_arch_init(void *opaque);
62153+int kvm_arch_init(const void *opaque);
62154 void kvm_arch_exit(void);
62155
62156 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
fe2de317 62157diff --git a/include/linux/libata.h b/include/linux/libata.h
c6e2a6c8 62158index 6e887c7..4539601 100644
fe2de317
MT
62159--- a/include/linux/libata.h
62160+++ b/include/linux/libata.h
c6e2a6c8 62161@@ -910,7 +910,7 @@ struct ata_port_operations {
15a11c5b
MT
62162 * fields must be pointers.
62163 */
62164 const struct ata_port_operations *inherits;
62165-};
62166+} __do_const;
66a7e928 62167
15a11c5b 62168 struct ata_port_info {
ae4e228f 62169 unsigned long flags;
fe2de317
MT
62170diff --git a/include/linux/mca.h b/include/linux/mca.h
62171index 3797270..7765ede 100644
62172--- a/include/linux/mca.h
62173+++ b/include/linux/mca.h
15a11c5b
MT
62174@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
62175 int region);
62176 void * (*mca_transform_memory)(struct mca_device *,
62177 void *memory);
62178-};
62179+} __no_const;
62180
62181 struct mca_bus {
62182 u64 default_dma_mask;
fe2de317 62183diff --git a/include/linux/memory.h b/include/linux/memory.h
5e856224 62184index 1ac7f6e..a5794d0 100644
fe2de317
MT
62185--- a/include/linux/memory.h
62186+++ b/include/linux/memory.h
5e856224 62187@@ -143,7 +143,7 @@ struct memory_accessor {
15a11c5b
MT
62188 size_t count);
62189 ssize_t (*write)(struct memory_accessor *, const char *buf,
62190 off_t offset, size_t count);
62191-};
62192+} __no_const;
ae4e228f
MT
62193
62194 /*
15a11c5b 62195 * Kernel text modification mutex, used for code patching. Users of this lock
fe2de317 62196diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h
c6e2a6c8 62197index ee96cd5..7823c3a 100644
fe2de317
MT
62198--- a/include/linux/mfd/abx500.h
62199+++ b/include/linux/mfd/abx500.h
c6e2a6c8 62200@@ -455,6 +455,7 @@ struct abx500_ops {
15a11c5b 62201 int (*event_registers_startup_state_get) (struct device *, u8 *);
66a7e928
MT
62202 int (*startup_irq_enabled) (struct device *, unsigned int);
62203 };
15a11c5b 62204+typedef struct abx500_ops __no_const abx500_ops_no_const;
66a7e928 62205
15a11c5b 62206 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
66a7e928 62207 void abx500_remove_ops(struct device *dev);
c6e2a6c8
MT
62208diff --git a/include/linux/mfd/abx500/ux500_chargalg.h b/include/linux/mfd/abx500/ux500_chargalg.h
62209index 9b07725..3d55001 100644
62210--- a/include/linux/mfd/abx500/ux500_chargalg.h
62211+++ b/include/linux/mfd/abx500/ux500_chargalg.h
62212@@ -19,7 +19,7 @@ struct ux500_charger_ops {
62213 int (*enable) (struct ux500_charger *, int, int, int);
62214 int (*kick_wd) (struct ux500_charger *);
62215 int (*update_curr) (struct ux500_charger *, int);
62216-};
62217+} __no_const;
62218
62219 /**
62220 * struct ux500_charger - power supply ux500 charger sub class
fe2de317 62221diff --git a/include/linux/mm.h b/include/linux/mm.h
c6e2a6c8 62222index 74aa71b..4ae97ba 100644
fe2de317
MT
62223--- a/include/linux/mm.h
62224+++ b/include/linux/mm.h
c6e2a6c8 62225@@ -116,7 +116,14 @@ extern unsigned int kobjsize(const void *objp);
58c5fc13 62226
df50ba0c
MT
62227 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
62228 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
62229+
62230+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62231+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
62232+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
62233+#else
62234 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
58c5fc13
MT
62235+#endif
62236+
df50ba0c
MT
62237 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
62238 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
62239
c6e2a6c8 62240@@ -1013,34 +1020,6 @@ int set_page_dirty(struct page *page);
bc901d79
MT
62241 int set_page_dirty_lock(struct page *page);
62242 int clear_page_dirty_for_io(struct page *page);
62243
62244-/* Is the vma a continuation of the stack vma above it? */
66a7e928 62245-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
bc901d79
MT
62246-{
62247- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
62248-}
66a7e928
MT
62249-
62250-static inline int stack_guard_page_start(struct vm_area_struct *vma,
62251- unsigned long addr)
62252-{
62253- return (vma->vm_flags & VM_GROWSDOWN) &&
62254- (vma->vm_start == addr) &&
62255- !vma_growsdown(vma->vm_prev, addr);
62256-}
62257-
62258-/* Is the vma a continuation of the stack vma below it? */
62259-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
62260-{
62261- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
62262-}
62263-
62264-static inline int stack_guard_page_end(struct vm_area_struct *vma,
62265- unsigned long addr)
62266-{
62267- return (vma->vm_flags & VM_GROWSUP) &&
62268- (vma->vm_end == addr) &&
62269- !vma_growsup(vma->vm_next, addr);
62270-}
bc901d79 62271-
c6e2a6c8
MT
62272 extern pid_t
62273 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
62274
62275@@ -1139,6 +1118,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
6e9df6a3
MT
62276 }
62277 #endif
58c5fc13 62278
6892158b 62279+#ifdef CONFIG_MMU
15a11c5b 62280+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
6892158b 62281+#else
15a11c5b 62282+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
6892158b
MT
62283+{
62284+ return __pgprot(0);
62285+}
62286+#endif
58c5fc13
MT
62287+
62288 int vma_wants_writenotify(struct vm_area_struct *vma);
62289
bc901d79 62290 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
c6e2a6c8 62291@@ -1157,8 +1145,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
5e856224
MT
62292 {
62293 return 0;
62294 }
62295+
62296+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
62297+ unsigned long address)
62298+{
62299+ return 0;
62300+}
62301 #else
62302 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
62303+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
62304 #endif
62305
62306 #ifdef __PAGETABLE_PMD_FOLDED
c6e2a6c8 62307@@ -1167,8 +1162,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
5e856224
MT
62308 {
62309 return 0;
62310 }
62311+
62312+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
62313+ unsigned long address)
62314+{
62315+ return 0;
62316+}
62317 #else
62318 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
62319+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
62320 #endif
62321
62322 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
c6e2a6c8 62323@@ -1186,11 +1188,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
5e856224
MT
62324 NULL: pud_offset(pgd, address);
62325 }
62326
62327+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
62328+{
62329+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
62330+ NULL: pud_offset(pgd, address);
62331+}
62332+
62333 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
62334 {
62335 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
62336 NULL: pmd_offset(pud, address);
62337 }
62338+
62339+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
62340+{
62341+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
62342+ NULL: pmd_offset(pud, address);
62343+}
62344 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
62345
62346 #if USE_SPLIT_PTLOCKS
c6e2a6c8
MT
62347@@ -1400,6 +1414,7 @@ extern unsigned long do_mmap(struct file *, unsigned long,
62348 unsigned long, unsigned long,
62349 unsigned long, unsigned long);
58c5fc13
MT
62350 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
62351+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
62352
c6e2a6c8
MT
62353 /* These take the mm semaphore themselves */
62354 extern unsigned long vm_brk(unsigned long, unsigned long);
62355@@ -1462,6 +1477,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
58c5fc13
MT
62356 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
62357 struct vm_area_struct **pprev);
62358
62359+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
df50ba0c 62360+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
58c5fc13
MT
62361+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
62362+
62363 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
62364 NULL if none. Assume start_addr < end_addr. */
62365 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
c6e2a6c8 62366@@ -1490,15 +1509,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
5e856224 62367 return vma;
58c5fc13
MT
62368 }
62369
6892158b 62370-#ifdef CONFIG_MMU
58c5fc13 62371-pgprot_t vm_get_page_prot(unsigned long vm_flags);
6892158b
MT
62372-#else
62373-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
62374-{
62375- return __pgprot(0);
62376-}
62377-#endif
62378-
58c5fc13
MT
62379 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
62380 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
62381 unsigned long pfn, unsigned long size, pgprot_t);
c6e2a6c8 62382@@ -1602,7 +1612,7 @@ extern int unpoison_memory(unsigned long pfn);
ae4e228f
MT
62383 extern int sysctl_memory_failure_early_kill;
62384 extern int sysctl_memory_failure_recovery;
62385 extern void shake_page(struct page *p, int access);
62386-extern atomic_long_t mce_bad_pages;
62387+extern atomic_long_unchecked_t mce_bad_pages;
62388 extern int soft_offline_page(struct page *page, int flags);
66a7e928
MT
62389
62390 extern void dump_page(struct page *page);
c6e2a6c8 62391@@ -1633,5 +1643,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
5e856224
MT
62392 static inline bool page_is_guard(struct page *page) { return false; }
62393 #endif /* CONFIG_DEBUG_PAGEALLOC */
df50ba0c 62394
58c5fc13
MT
62395+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
62396+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
62397+#else
62398+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
62399+#endif
62400+
62401 #endif /* __KERNEL__ */
62402 #endif /* _LINUX_MM_H */
fe2de317 62403diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
572b4308 62404index b35752f..41075a0 100644
fe2de317
MT
62405--- a/include/linux/mm_types.h
62406+++ b/include/linux/mm_types.h
572b4308 62407@@ -262,6 +262,8 @@ struct vm_area_struct {
58c5fc13
MT
62408 #ifdef CONFIG_NUMA
62409 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
62410 #endif
62411+
62412+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
62413 };
62414
62415 struct core_thread {
572b4308 62416@@ -336,7 +338,7 @@ struct mm_struct {
5e856224
MT
62417 unsigned long def_flags;
62418 unsigned long nr_ptes; /* Page table pages */
62419 unsigned long start_code, end_code, start_data, end_data;
62420- unsigned long start_brk, brk, start_stack;
62421+ unsigned long brk_gap, start_brk, brk, start_stack;
62422 unsigned long arg_start, arg_end, env_start, env_end;
62423
62424 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
572b4308 62425@@ -398,6 +400,24 @@ struct mm_struct {
15a11c5b
MT
62426 #ifdef CONFIG_CPUMASK_OFFSTACK
62427 struct cpumask cpumask_allocation;
58c5fc13
MT
62428 #endif
62429+
5e856224 62430+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS) || defined(CONFIG_PAX_HAVE_ACL_FLAGS) || defined(CONFIG_PAX_HOOK_ACL_FLAGS)
58c5fc13
MT
62431+ unsigned long pax_flags;
62432+#endif
62433+
62434+#ifdef CONFIG_PAX_DLRESOLVE
62435+ unsigned long call_dl_resolve;
62436+#endif
62437+
62438+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
62439+ unsigned long call_syscall;
62440+#endif
62441+
62442+#ifdef CONFIG_PAX_ASLR
62443+ unsigned long delta_mmap; /* randomized offset */
62444+ unsigned long delta_stack; /* randomized offset */
62445+#endif
62446+
62447 };
62448
15a11c5b 62449 static inline void mm_init_cpumask(struct mm_struct *mm)
fe2de317
MT
62450diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
62451index 1d1b1e1..2a13c78 100644
62452--- a/include/linux/mmu_notifier.h
62453+++ b/include/linux/mmu_notifier.h
62454@@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
ae4e228f
MT
62455 */
62456 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
62457 ({ \
62458- pte_t __pte; \
62459+ pte_t ___pte; \
62460 struct vm_area_struct *___vma = __vma; \
62461 unsigned long ___address = __address; \
62462- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
62463+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
62464 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
62465- __pte; \
62466+ ___pte; \
62467 })
62468
16454cff 62469 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
fe2de317 62470diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
572b4308 62471index 5f6806b..49db2b2 100644
fe2de317
MT
62472--- a/include/linux/mmzone.h
62473+++ b/include/linux/mmzone.h
c6e2a6c8 62474@@ -380,7 +380,7 @@ struct zone {
57199397
MT
62475 unsigned long flags; /* zone flags, see below */
62476
62477 /* Zone statistics */
62478- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62479+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
62480
62481 /*
6892158b 62482 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
fe2de317 62483diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
c6e2a6c8 62484index 501da4c..ba79bb4 100644
fe2de317
MT
62485--- a/include/linux/mod_devicetable.h
62486+++ b/include/linux/mod_devicetable.h
58c5fc13
MT
62487@@ -12,7 +12,7 @@
62488 typedef unsigned long kernel_ulong_t;
62489 #endif
62490
62491-#define PCI_ANY_ID (~0)
62492+#define PCI_ANY_ID ((__u16)~0)
62493
62494 struct pci_device_id {
62495 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
62496@@ -131,7 +131,7 @@ struct usb_device_id {
62497 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
62498 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
62499
62500-#define HID_ANY_ID (~0)
62501+#define HID_ANY_ID (~0U)
62502
62503 struct hid_device_id {
62504 __u16 bus;
fe2de317 62505diff --git a/include/linux/module.h b/include/linux/module.h
c6e2a6c8 62506index fbcafe2..e5d9587 100644
fe2de317
MT
62507--- a/include/linux/module.h
62508+++ b/include/linux/module.h
4c928ab7 62509@@ -17,6 +17,7 @@
15a11c5b
MT
62510 #include <linux/moduleparam.h>
62511 #include <linux/tracepoint.h>
4c928ab7 62512 #include <linux/export.h>
15a11c5b
MT
62513+#include <linux/fs.h>
62514
62515 #include <linux/percpu.h>
62516 #include <asm/module.h>
c6e2a6c8 62517@@ -273,19 +274,16 @@ struct module
58c5fc13
MT
62518 int (*init)(void);
62519
62520 /* If this is non-NULL, vfree after init() returns */
62521- void *module_init;
62522+ void *module_init_rx, *module_init_rw;
62523
62524 /* Here is the actual code + data, vfree'd on unload. */
62525- void *module_core;
62526+ void *module_core_rx, *module_core_rw;
62527
62528 /* Here are the sizes of the init and core sections */
62529- unsigned int init_size, core_size;
62530+ unsigned int init_size_rw, core_size_rw;
62531
62532 /* The size of the executable code in each section. */
62533- unsigned int init_text_size, core_text_size;
16454cff
MT
62534-
62535- /* Size of RO sections of the module (text+rodata) */
62536- unsigned int init_ro_size, core_ro_size;
58c5fc13
MT
62537+ unsigned int init_size_rx, core_size_rx;
62538
62539 /* Arch-specific module values */
62540 struct mod_arch_specific arch;
c6e2a6c8 62541@@ -341,6 +339,10 @@ struct module
15a11c5b
MT
62542 #ifdef CONFIG_EVENT_TRACING
62543 struct ftrace_event_call **trace_events;
62544 unsigned int num_trace_events;
62545+ struct file_operations trace_id;
62546+ struct file_operations trace_enable;
62547+ struct file_operations trace_format;
62548+ struct file_operations trace_filter;
62549 #endif
62550 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
62551 unsigned int num_ftrace_callsites;
c6e2a6c8 62552@@ -388,16 +390,46 @@ bool is_module_address(unsigned long addr);
df50ba0c 62553 bool is_module_percpu_address(unsigned long addr);
58c5fc13
MT
62554 bool is_module_text_address(unsigned long addr);
62555
62556+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
62557+{
62558+
62559+#ifdef CONFIG_PAX_KERNEXEC
62560+ if (ktla_ktva(addr) >= (unsigned long)start &&
62561+ ktla_ktva(addr) < (unsigned long)start + size)
62562+ return 1;
62563+#endif
62564+
62565+ return ((void *)addr >= start && (void *)addr < start + size);
62566+}
62567+
62568+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
62569+{
62570+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
62571+}
62572+
62573+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
62574+{
62575+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
62576+}
62577+
62578+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
62579+{
62580+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
62581+}
62582+
62583+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
62584+{
62585+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
62586+}
62587+
62588 static inline int within_module_core(unsigned long addr, struct module *mod)
62589 {
62590- return (unsigned long)mod->module_core <= addr &&
62591- addr < (unsigned long)mod->module_core + mod->core_size;
62592+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
62593 }
62594
62595 static inline int within_module_init(unsigned long addr, struct module *mod)
62596 {
62597- return (unsigned long)mod->module_init <= addr &&
62598- addr < (unsigned long)mod->module_init + mod->init_size;
62599+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
62600 }
62601
62602 /* Search for module by name: must hold module_mutex. */
fe2de317 62603diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
5e856224 62604index b2be02e..72d2f78 100644
fe2de317
MT
62605--- a/include/linux/moduleloader.h
62606+++ b/include/linux/moduleloader.h
4c928ab7 62607@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
58c5fc13 62608
4c928ab7
MT
62609 /* Allocator used for allocating struct module, core sections and init
62610 sections. Returns NULL on failure. */
62611-void *module_alloc(unsigned long size);
62612+void *module_alloc(unsigned long size) __size_overflow(1);
62613+
58c5fc13 62614+#ifdef CONFIG_PAX_KERNEXEC
5e856224 62615+void *module_alloc_exec(unsigned long size) __size_overflow(1);
58c5fc13
MT
62616+#else
62617+#define module_alloc_exec(x) module_alloc(x)
62618+#endif
4c928ab7 62619
58c5fc13
MT
62620 /* Free memory returned from module_alloc. */
62621 void module_free(struct module *mod, void *module_region);
62622
62623+#ifdef CONFIG_PAX_KERNEXEC
62624+void module_free_exec(struct module *mod, void *module_region);
62625+#else
ae4e228f 62626+#define module_free_exec(x, y) module_free((x), (y))
58c5fc13
MT
62627+#endif
62628+
62629 /* Apply the given relocation to the (simplified) ELF. Return -error
62630 or 0. */
62631 int apply_relocate(Elf_Shdr *sechdrs,
fe2de317 62632diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
c6e2a6c8 62633index 944bc18..042d291 100644
fe2de317
MT
62634--- a/include/linux/moduleparam.h
62635+++ b/include/linux/moduleparam.h
c6e2a6c8 62636@@ -286,7 +286,7 @@ static inline void __kernel_param_unlock(void)
6892158b
MT
62637 * @len is usually just sizeof(string).
62638 */
62639 #define module_param_string(name, string, len, perm) \
62640- static const struct kparam_string __param_string_##name \
62641+ static const struct kparam_string __param_string_##name __used \
62642 = { len, string }; \
62643 __module_param_call(MODULE_PARAM_PREFIX, name, \
62644 &param_ops_string, \
c6e2a6c8 62645@@ -424,7 +424,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
6892158b
MT
62646 */
62647 #define module_param_array_named(name, array, type, nump, perm) \
5e856224 62648 param_check_##type(name, &(array)[0]); \
6892158b
MT
62649- static const struct kparam_array __param_arr_##name \
62650+ static const struct kparam_array __param_arr_##name __used \
15a11c5b
MT
62651 = { .max = ARRAY_SIZE(array), .num = nump, \
62652 .ops = &param_ops_##type, \
62653 .elemsize = sizeof(array[0]), .elem = array }; \
fe2de317
MT
62654diff --git a/include/linux/namei.h b/include/linux/namei.h
62655index ffc0213..2c1f2cb 100644
62656--- a/include/linux/namei.h
62657+++ b/include/linux/namei.h
66a7e928 62658@@ -24,7 +24,7 @@ struct nameidata {
16454cff 62659 unsigned seq;
58c5fc13
MT
62660 int last_type;
62661 unsigned depth;
62662- char *saved_names[MAX_NESTED_LINKS + 1];
62663+ const char *saved_names[MAX_NESTED_LINKS + 1];
62664
62665 /* Intent data */
62666 union {
6e9df6a3 62667@@ -94,12 +94,12 @@ extern int follow_up(struct path *);
58c5fc13
MT
62668 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
62669 extern void unlock_rename(struct dentry *, struct dentry *);
62670
62671-static inline void nd_set_link(struct nameidata *nd, char *path)
62672+static inline void nd_set_link(struct nameidata *nd, const char *path)
62673 {
62674 nd->saved_names[nd->depth] = path;
62675 }
62676
62677-static inline char *nd_get_link(struct nameidata *nd)
ae4e228f 62678+static inline const char *nd_get_link(const struct nameidata *nd)
58c5fc13
MT
62679 {
62680 return nd->saved_names[nd->depth];
62681 }
fe2de317 62682diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
c6e2a6c8 62683index 33900a5..2072000 100644
fe2de317
MT
62684--- a/include/linux/netdevice.h
62685+++ b/include/linux/netdevice.h
c6e2a6c8 62686@@ -1003,6 +1003,7 @@ struct net_device_ops {
5e856224
MT
62687 int (*ndo_neigh_construct)(struct neighbour *n);
62688 void (*ndo_neigh_destroy)(struct neighbour *n);
15a11c5b
MT
62689 };
62690+typedef struct net_device_ops __no_const net_device_ops_no_const;
62691
62692 /*
62693 * The DEVICE structure.
c6e2a6c8 62694@@ -1064,7 +1065,7 @@ struct net_device {
4c928ab7
MT
62695 int iflink;
62696
62697 struct net_device_stats stats;
62698- atomic_long_t rx_dropped; /* dropped packets by core network
62699+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
62700 * Do not use this in drivers.
62701 */
62702
fe2de317
MT
62703diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
62704new file mode 100644
62705index 0000000..33f4af8
62706--- /dev/null
62707+++ b/include/linux/netfilter/xt_gradm.h
6892158b
MT
62708@@ -0,0 +1,9 @@
62709+#ifndef _LINUX_NETFILTER_XT_GRADM_H
62710+#define _LINUX_NETFILTER_XT_GRADM_H 1
62711+
62712+struct xt_gradm_mtinfo {
62713+ __u16 flags;
62714+ __u16 invflags;
62715+};
62716+
62717+#endif
fe2de317
MT
62718diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
62719index c65a18a..0c05f3a 100644
62720--- a/include/linux/of_pdt.h
62721+++ b/include/linux/of_pdt.h
15a11c5b
MT
62722@@ -32,7 +32,7 @@ struct of_pdt_ops {
62723
62724 /* return 0 on success; fill in 'len' with number of bytes in path */
62725 int (*pkg2path)(phandle node, char *buf, const int buflen, int *len);
62726-};
62727+} __no_const;
62728
62729 extern void *prom_early_alloc(unsigned long size);
62730
fe2de317 62731diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
c6e2a6c8 62732index a4c5624..79d6d88 100644
fe2de317
MT
62733--- a/include/linux/oprofile.h
62734+++ b/include/linux/oprofile.h
62735@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
ae4e228f
MT
62736 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
62737 char const * name, ulong * val);
58c5fc13 62738
ae4e228f
MT
62739-/** Create a file for read-only access to an atomic_t. */
62740+/** Create a file for read-only access to an atomic_unchecked_t. */
58c5fc13
MT
62741 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
62742- char const * name, atomic_t * val);
62743+ char const * name, atomic_unchecked_t * val);
62744
62745 /** create a directory */
62746 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
fe2de317 62747diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
c6e2a6c8 62748index ddbb6a9..be1680e 100644
fe2de317
MT
62749--- a/include/linux/perf_event.h
62750+++ b/include/linux/perf_event.h
c6e2a6c8 62751@@ -879,8 +879,8 @@ struct perf_event {
8308f9c9
MT
62752
62753 enum perf_event_active_state state;
62754 unsigned int attach_state;
62755- local64_t count;
62756- atomic64_t child_count;
62757+ local64_t count; /* PaX: fix it one day */
62758+ atomic64_unchecked_t child_count;
62759
62760 /*
62761 * These are the total time in nanoseconds that the event
c6e2a6c8 62762@@ -931,8 +931,8 @@ struct perf_event {
8308f9c9
MT
62763 * These accumulate total time (in nanoseconds) that children
62764 * events have been enabled and running, respectively.
62765 */
62766- atomic64_t child_total_time_enabled;
62767- atomic64_t child_total_time_running;
62768+ atomic64_unchecked_t child_total_time_enabled;
62769+ atomic64_unchecked_t child_total_time_running;
62770
62771 /*
62772 * Protect attach/detach and child_list:
4c928ab7
MT
62773diff --git a/include/linux/personality.h b/include/linux/personality.h
62774index 8fc7dd1a..c19d89e 100644
62775--- a/include/linux/personality.h
62776+++ b/include/linux/personality.h
62777@@ -44,6 +44,7 @@ enum {
62778 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
62779 ADDR_NO_RANDOMIZE | \
62780 ADDR_COMPAT_LAYOUT | \
62781+ ADDR_LIMIT_3GB | \
62782 MMAP_PAGE_ZERO)
62783
62784 /*
fe2de317 62785diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
c6e2a6c8 62786index e1ac1ce..0675fed 100644
fe2de317
MT
62787--- a/include/linux/pipe_fs_i.h
62788+++ b/include/linux/pipe_fs_i.h
c6e2a6c8 62789@@ -45,9 +45,9 @@ struct pipe_buffer {
57199397 62790 struct pipe_inode_info {
ae4e228f 62791 wait_queue_head_t wait;
57199397 62792 unsigned int nrbufs, curbuf, buffers;
ae4e228f
MT
62793- unsigned int readers;
62794- unsigned int writers;
62795- unsigned int waiting_writers;
62796+ atomic_t readers;
62797+ atomic_t writers;
62798+ atomic_t waiting_writers;
62799 unsigned int r_counter;
62800 unsigned int w_counter;
57199397 62801 struct page *tmp_page;
fe2de317 62802diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
5e856224 62803index 609daae..5392427 100644
fe2de317
MT
62804--- a/include/linux/pm_runtime.h
62805+++ b/include/linux/pm_runtime.h
5e856224 62806@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
bc901d79
MT
62807
62808 static inline void pm_runtime_mark_last_busy(struct device *dev)
62809 {
62810- ACCESS_ONCE(dev->power.last_busy) = jiffies;
62811+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
62812 }
62813
62814 #else /* !CONFIG_PM_RUNTIME */
fe2de317 62815diff --git a/include/linux/poison.h b/include/linux/poison.h
5e856224 62816index 2110a81..13a11bb 100644
fe2de317
MT
62817--- a/include/linux/poison.h
62818+++ b/include/linux/poison.h
ae4e228f 62819@@ -19,8 +19,8 @@
58c5fc13
MT
62820 * under normal circumstances, used to verify that nobody uses
62821 * non-initialized list entries.
62822 */
ae4e228f
MT
62823-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
62824-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
62825+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
62826+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
58c5fc13
MT
62827
62828 /********** include/linux/timer.h **********/
62829 /*
fe2de317 62830diff --git a/include/linux/preempt.h b/include/linux/preempt.h
c6e2a6c8 62831index 5a710b9..0b0dab9 100644
fe2de317
MT
62832--- a/include/linux/preempt.h
62833+++ b/include/linux/preempt.h
c6e2a6c8 62834@@ -126,7 +126,7 @@ struct preempt_ops {
15a11c5b
MT
62835 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
62836 void (*sched_out)(struct preempt_notifier *notifier,
62837 struct task_struct *next);
62838-};
62839+} __no_const;
66a7e928 62840
15a11c5b
MT
62841 /**
62842 * preempt_notifier - key for installing preemption notifiers
c6e2a6c8
MT
62843diff --git a/include/linux/printk.h b/include/linux/printk.h
62844index 0525927..a5388b6 100644
62845--- a/include/linux/printk.h
62846+++ b/include/linux/printk.h
62847@@ -94,6 +94,8 @@ void early_printk(const char *fmt, ...);
62848 extern int printk_needs_cpu(int cpu);
62849 extern void printk_tick(void);
62850
62851+extern int kptr_restrict;
62852+
62853 #ifdef CONFIG_PRINTK
62854 asmlinkage __printf(1, 0)
62855 int vprintk(const char *fmt, va_list args);
62856@@ -117,7 +119,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
62857
62858 extern int printk_delay_msec;
62859 extern int dmesg_restrict;
62860-extern int kptr_restrict;
62861
62862 void log_buf_kexec_setup(void);
62863 void __init setup_log_buf(int early);
fe2de317 62864diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
5e856224 62865index 85c5073..51fac8b 100644
fe2de317
MT
62866--- a/include/linux/proc_fs.h
62867+++ b/include/linux/proc_fs.h
5e856224 62868@@ -155,6 +155,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
58c5fc13
MT
62869 return proc_create_data(name, mode, parent, proc_fops, NULL);
62870 }
62871
5e856224 62872+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
58c5fc13
MT
62873+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
62874+{
62875+#ifdef CONFIG_GRKERNSEC_PROC_USER
62876+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
62877+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62878+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
62879+#else
62880+ return proc_create_data(name, mode, parent, proc_fops, NULL);
62881+#endif
62882+}
58c5fc13
MT
62883+
62884 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
5e856224 62885 umode_t mode, struct proc_dir_entry *base,
58c5fc13 62886 read_proc_t *read_proc, void * data)
5e856224 62887@@ -258,7 +270,7 @@ union proc_op {
15a11c5b
MT
62888 int (*proc_show)(struct seq_file *m,
62889 struct pid_namespace *ns, struct pid *pid,
62890 struct task_struct *task);
62891-};
62892+} __no_const;
62893
62894 struct ctl_table_header;
62895 struct ctl_table;
fe2de317 62896diff --git a/include/linux/random.h b/include/linux/random.h
572b4308 62897index 8f74538..de61694 100644
fe2de317
MT
62898--- a/include/linux/random.h
62899+++ b/include/linux/random.h
572b4308
MT
62900@@ -54,6 +54,10 @@ extern void add_input_randomness(unsigned int type, unsigned int code,
62901 unsigned int value);
62902 extern void add_interrupt_randomness(int irq);
62903
62904+#ifdef CONFIG_PAX_LATENT_ENTROPY
62905+extern void transfer_latent_entropy(void);
62906+#endif
62907+
62908 extern void get_random_bytes(void *buf, int nbytes);
62909 void generate_random_uuid(unsigned char uuid_out[16]);
62910
62911@@ -69,12 +73,17 @@ void srandom32(u32 seed);
57199397
MT
62912
62913 u32 prandom32(struct rnd_state *);
58c5fc13
MT
62914
62915+static inline unsigned long pax_get_random_long(void)
62916+{
62917+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
62918+}
62919+
57199397
MT
62920 /*
62921 * Handle minimum values for seeds
62922 */
62923 static inline u32 __seed(u32 x, u32 m)
62924 {
62925- return (x < m) ? x + m : x;
62926+ return (x <= m) ? x + m + 1 : x;
62927 }
58c5fc13 62928
57199397 62929 /**
fe2de317
MT
62930diff --git a/include/linux/reboot.h b/include/linux/reboot.h
62931index e0879a7..a12f962 100644
62932--- a/include/linux/reboot.h
62933+++ b/include/linux/reboot.h
62934@@ -52,9 +52,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
66a7e928
MT
62935 * Architecture-specific implementations of sys_reboot commands.
62936 */
62937
62938-extern void machine_restart(char *cmd);
62939-extern void machine_halt(void);
62940-extern void machine_power_off(void);
62941+extern void machine_restart(char *cmd) __noreturn;
62942+extern void machine_halt(void) __noreturn;
62943+extern void machine_power_off(void) __noreturn;
62944
62945 extern void machine_shutdown(void);
62946 struct pt_regs;
fe2de317 62947@@ -65,9 +65,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
66a7e928
MT
62948 */
62949
62950 extern void kernel_restart_prepare(char *cmd);
62951-extern void kernel_restart(char *cmd);
62952-extern void kernel_halt(void);
62953-extern void kernel_power_off(void);
62954+extern void kernel_restart(char *cmd) __noreturn;
62955+extern void kernel_halt(void) __noreturn;
62956+extern void kernel_power_off(void) __noreturn;
62957
62958 extern int C_A_D; /* for sysctl */
62959 void ctrl_alt_del(void);
6e9df6a3 62960@@ -81,7 +81,7 @@ extern int orderly_poweroff(bool force);
66a7e928
MT
62961 * Emergency restart, callable from an interrupt handler.
62962 */
62963
62964-extern void emergency_restart(void);
62965+extern void emergency_restart(void) __noreturn;
62966 #include <asm/emergency-restart.h>
62967
62968 #endif
fe2de317 62969diff --git a/include/linux/relay.h b/include/linux/relay.h
c6e2a6c8 62970index 91cacc3..b55ff74 100644
fe2de317
MT
62971--- a/include/linux/relay.h
62972+++ b/include/linux/relay.h
c6e2a6c8 62973@@ -160,7 +160,7 @@ struct rchan_callbacks
15a11c5b
MT
62974 * The callback should return 0 if successful, negative if not.
62975 */
62976 int (*remove_buf_file)(struct dentry *dentry);
62977-};
62978+} __no_const;
62979
62980 /*
62981 * CONFIG_RELAY kernel API, kernel/relay.c
fe2de317 62982diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
c6e2a6c8 62983index 6fdf027..ff72610 100644
fe2de317
MT
62984--- a/include/linux/rfkill.h
62985+++ b/include/linux/rfkill.h
15a11c5b
MT
62986@@ -147,6 +147,7 @@ struct rfkill_ops {
62987 void (*query)(struct rfkill *rfkill, void *data);
62988 int (*set_block)(void *data, bool blocked);
62989 };
62990+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
62991
62992 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
62993 /**
4c928ab7
MT
62994diff --git a/include/linux/rio.h b/include/linux/rio.h
62995index 4d50611..c6858a2 100644
62996--- a/include/linux/rio.h
62997+++ b/include/linux/rio.h
62998@@ -315,7 +315,7 @@ struct rio_ops {
62999 int mbox, void *buffer, size_t len);
63000 int (*add_inb_buffer)(struct rio_mport *mport, int mbox, void *buf);
63001 void *(*get_inb_message)(struct rio_mport *mport, int mbox);
63002-};
63003+} __no_const;
63004
63005 #define RIO_RESOURCE_MEM 0x00000100
63006 #define RIO_RESOURCE_DOORBELL 0x00000200
fe2de317 63007diff --git a/include/linux/rmap.h b/include/linux/rmap.h
c6e2a6c8 63008index fd07c45..4676b8e 100644
fe2de317
MT
63009--- a/include/linux/rmap.h
63010+++ b/include/linux/rmap.h
5e856224 63011@@ -119,9 +119,9 @@ static inline void anon_vma_unlock(struct anon_vma *anon_vma)
57199397
MT
63012 void anon_vma_init(void); /* create anon_vma_cachep */
63013 int anon_vma_prepare(struct vm_area_struct *);
63014 void unlink_anon_vmas(struct vm_area_struct *);
63015-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
57199397 63016+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
5e856224
MT
63017 void anon_vma_moveto_tail(struct vm_area_struct *);
63018-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
57199397 63019+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
57199397 63020
66a7e928 63021 static inline void anon_vma_merge(struct vm_area_struct *vma,
c6e2a6c8 63022 struct vm_area_struct *next)
fe2de317 63023diff --git a/include/linux/sched.h b/include/linux/sched.h
572b4308 63024index 7b06169..c92adbe 100644
fe2de317
MT
63025--- a/include/linux/sched.h
63026+++ b/include/linux/sched.h
c6e2a6c8 63027@@ -100,6 +100,7 @@ struct bio_list;
58c5fc13 63028 struct fs_struct;
ae4e228f 63029 struct perf_event_context;
66a7e928 63030 struct blk_plug;
58c5fc13
MT
63031+struct linux_binprm;
63032
63033 /*
63034 * List of flags we want to share for kernel threads,
5e856224 63035@@ -382,10 +383,13 @@ struct user_namespace;
57199397
MT
63036 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
63037
63038 extern int sysctl_max_map_count;
63039+extern unsigned long sysctl_heap_stack_gap;
63040
63041 #include <linux/aio.h>
63042
63043 #ifdef CONFIG_MMU
16454cff
MT
63044+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
63045+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
57199397
MT
63046 extern void arch_pick_mmap_layout(struct mm_struct *mm);
63047 extern unsigned long
63048 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
c6e2a6c8 63049@@ -643,6 +647,17 @@ struct signal_struct {
16454cff
MT
63050 #ifdef CONFIG_TASKSTATS
63051 struct taskstats *stats;
58c5fc13 63052 #endif
16454cff 63053+
58c5fc13
MT
63054+#ifdef CONFIG_GRKERNSEC
63055+ u32 curr_ip;
bc901d79 63056+ u32 saved_ip;
58c5fc13
MT
63057+ u32 gr_saddr;
63058+ u32 gr_daddr;
63059+ u16 gr_sport;
63060+ u16 gr_dport;
63061+ u8 used_accept:1;
63062+#endif
ae4e228f 63063+
16454cff
MT
63064 #ifdef CONFIG_AUDIT
63065 unsigned audit_tty;
63066 struct tty_audit_buf *tty_audit_buf;
c6e2a6c8 63067@@ -726,6 +741,11 @@ struct user_struct {
71d190be
MT
63068 struct key *session_keyring; /* UID's default session keyring */
63069 #endif
63070
63071+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63072+ unsigned int banned;
63073+ unsigned long ban_expires;
63074+#endif
63075+
63076 /* Hash table maintenance information */
63077 struct hlist_node uidhash_node;
63078 uid_t uid;
c6e2a6c8 63079@@ -1386,8 +1406,8 @@ struct task_struct {
58c5fc13
MT
63080 struct list_head thread_group;
63081
63082 struct completion *vfork_done; /* for vfork() */
63083- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
63084- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63085+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
63086+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
63087
63088 cputime_t utime, stime, utimescaled, stimescaled;
63089 cputime_t gtime;
c6e2a6c8 63090@@ -1403,13 +1423,6 @@ struct task_struct {
58c5fc13
MT
63091 struct task_cputime cputime_expires;
63092 struct list_head cpu_timers[3];
63093
63094-/* process credentials */
bc901d79 63095- const struct cred __rcu *real_cred; /* objective and real subjective task
58c5fc13 63096- * credentials (COW) */
bc901d79 63097- const struct cred __rcu *cred; /* effective (overridable) subjective task
58c5fc13 63098- * credentials (COW) */
ae4e228f 63099- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
58c5fc13
MT
63100-
63101 char comm[TASK_COMM_LEN]; /* executable name excluding path
63102 - access with [gs]et_task_comm (which lock
63103 it with task_lock())
c6e2a6c8 63104@@ -1426,8 +1439,16 @@ struct task_struct {
71d190be
MT
63105 #endif
63106 /* CPU-specific state of this task */
bc901d79 63107 struct thread_struct thread;
71d190be
MT
63108+/* thread_info moved to task_struct */
63109+#ifdef CONFIG_X86
63110+ struct thread_info tinfo;
63111+#endif
bc901d79
MT
63112 /* filesystem information */
63113 struct fs_struct *fs;
58c5fc13 63114+
bc901d79 63115+ const struct cred __rcu *cred; /* effective (overridable) subjective task
58c5fc13 63116+ * credentials (COW) */
58c5fc13 63117+
bc901d79
MT
63118 /* open file information */
63119 struct files_struct *files;
63120 /* namespaces */
c6e2a6c8 63121@@ -1469,6 +1490,11 @@ struct task_struct {
bc901d79
MT
63122 struct rt_mutex_waiter *pi_blocked_on;
63123 #endif
ae4e228f 63124
bc901d79
MT
63125+/* process credentials */
63126+ const struct cred __rcu *real_cred; /* objective and real subjective task
ae4e228f 63127+ * credentials (COW) */
bc901d79 63128+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
ae4e228f 63129+
bc901d79
MT
63130 #ifdef CONFIG_DEBUG_MUTEXES
63131 /* mutex deadlock detection */
63132 struct mutex_waiter *blocked_on;
c6e2a6c8 63133@@ -1585,6 +1611,27 @@ struct task_struct {
ae4e228f
MT
63134 unsigned long default_timer_slack_ns;
63135
63136 struct list_head *scm_work_list;
58c5fc13
MT
63137+
63138+#ifdef CONFIG_GRKERNSEC
63139+ /* grsecurity */
4c928ab7
MT
63140+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63141+ u64 exec_id;
63142+#endif
63143+#ifdef CONFIG_GRKERNSEC_SETXID
63144+ const struct cred *delayed_cred;
63145+#endif
df50ba0c 63146+ struct dentry *gr_chroot_dentry;
58c5fc13
MT
63147+ struct acl_subject_label *acl;
63148+ struct acl_role_label *role;
63149+ struct file *exec_file;
63150+ u16 acl_role_id;
16454cff 63151+ /* is this the task that authenticated to the special role */
58c5fc13
MT
63152+ u8 acl_sp_role;
63153+ u8 is_writable;
63154+ u8 brute;
df50ba0c 63155+ u8 gr_is_chrooted;
58c5fc13
MT
63156+#endif
63157+
ae4e228f 63158 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
df50ba0c 63159 /* Index of current stored address in ret_stack */
ae4e228f 63160 int curr_ret_stack;
c6e2a6c8 63161@@ -1619,6 +1666,51 @@ struct task_struct {
ae4e228f 63162 #endif
58c5fc13
MT
63163 };
63164
63165+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
63166+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
63167+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
63168+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
63169+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
63170+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
63171+
63172+#ifdef CONFIG_PAX_SOFTMODE
15a11c5b 63173+extern int pax_softmode;
58c5fc13
MT
63174+#endif
63175+
63176+extern int pax_check_flags(unsigned long *);
63177+
63178+/* if tsk != current then task_lock must be held on it */
63179+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
63180+static inline unsigned long pax_get_flags(struct task_struct *tsk)
63181+{
63182+ if (likely(tsk->mm))
63183+ return tsk->mm->pax_flags;
63184+ else
63185+ return 0UL;
63186+}
63187+
63188+/* if tsk != current then task_lock must be held on it */
63189+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
63190+{
63191+ if (likely(tsk->mm)) {
63192+ tsk->mm->pax_flags = flags;
63193+ return 0;
63194+ }
63195+ return -EINVAL;
63196+}
63197+#endif
63198+
63199+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
63200+extern void pax_set_initial_flags(struct linux_binprm *bprm);
63201+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
63202+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
63203+#endif
63204+
15a11c5b 63205+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
6e9df6a3 63206+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
15a11c5b 63207+extern void pax_report_refcount_overflow(struct pt_regs *regs);
572b4308 63208+extern void check_object_size(const void *ptr, unsigned long n, bool to);
58c5fc13
MT
63209+
63210 /* Future-safe accessor for struct task_struct's cpus_allowed. */
ae4e228f 63211 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
58c5fc13 63212
572b4308 63213@@ -2146,7 +2238,9 @@ void yield(void);
71d190be
MT
63214 extern struct exec_domain default_exec_domain;
63215
63216 union thread_union {
63217+#ifndef CONFIG_X86
63218 struct thread_info thread_info;
63219+#endif
63220 unsigned long stack[THREAD_SIZE/sizeof(long)];
63221 };
63222
572b4308 63223@@ -2179,6 +2273,7 @@ extern struct pid_namespace init_pid_ns;
15a11c5b
MT
63224 */
63225
63226 extern struct task_struct *find_task_by_vpid(pid_t nr);
63227+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
63228 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
63229 struct pid_namespace *ns);
63230
572b4308 63231@@ -2322,7 +2417,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
58c5fc13
MT
63232 extern void exit_itimers(struct signal_struct *);
63233 extern void flush_itimer_signals(void);
63234
5e856224 63235-extern void do_group_exit(int);
4c928ab7 63236+extern __noreturn void do_group_exit(int);
58c5fc13
MT
63237
63238 extern void daemonize(const char *, ...);
63239 extern int allow_signal(int);
572b4308 63240@@ -2523,9 +2618,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
58c5fc13
MT
63241
63242 #endif
63243
63244-static inline int object_is_on_stack(void *obj)
ae4e228f 63245+static inline int object_starts_on_stack(void *obj)
58c5fc13 63246 {
ae4e228f
MT
63247- void *stack = task_stack_page(current);
63248+ const void *stack = task_stack_page(current);
58c5fc13 63249
ae4e228f
MT
63250 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
63251 }
fe2de317
MT
63252diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
63253index 899fbb4..1cb4138 100644
63254--- a/include/linux/screen_info.h
63255+++ b/include/linux/screen_info.h
ae4e228f 63256@@ -43,7 +43,8 @@ struct screen_info {
58c5fc13
MT
63257 __u16 pages; /* 0x32 */
63258 __u16 vesa_attributes; /* 0x34 */
63259 __u32 capabilities; /* 0x36 */
63260- __u8 _reserved[6]; /* 0x3a */
63261+ __u16 vesapm_size; /* 0x3a */
63262+ __u8 _reserved[4]; /* 0x3c */
63263 } __attribute__((packed));
63264
63265 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
fe2de317 63266diff --git a/include/linux/security.h b/include/linux/security.h
c6e2a6c8 63267index 673afbb..2b7454b 100644
fe2de317
MT
63268--- a/include/linux/security.h
63269+++ b/include/linux/security.h
c6e2a6c8
MT
63270@@ -26,6 +26,7 @@
63271 #include <linux/capability.h>
df50ba0c 63272 #include <linux/slab.h>
c6e2a6c8 63273 #include <linux/err.h>
58c5fc13 63274+#include <linux/grsecurity.h>
58c5fc13 63275
c6e2a6c8
MT
63276 struct linux_binprm;
63277 struct cred;
fe2de317 63278diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
c6e2a6c8 63279index fc61854..d7c490b 100644
fe2de317
MT
63280--- a/include/linux/seq_file.h
63281+++ b/include/linux/seq_file.h
c6e2a6c8 63282@@ -25,6 +25,9 @@ struct seq_file {
4c928ab7
MT
63283 struct mutex lock;
63284 const struct seq_operations *op;
63285 int poll_event;
63286+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63287+ u64 exec_id;
63288+#endif
63289 void *private;
63290 };
63291
c6e2a6c8 63292@@ -34,6 +37,7 @@ struct seq_operations {
15a11c5b
MT
63293 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
63294 int (*show) (struct seq_file *m, void *v);
63295 };
63296+typedef struct seq_operations __no_const seq_operations_no_const;
63297
63298 #define SEQ_SKIP 1
63299
fe2de317
MT
63300diff --git a/include/linux/shm.h b/include/linux/shm.h
63301index 92808b8..c28cac4 100644
63302--- a/include/linux/shm.h
63303+++ b/include/linux/shm.h
63304@@ -98,6 +98,10 @@ struct shmid_kernel /* private to the kernel */
15a11c5b 63305
6e9df6a3
MT
63306 /* The task created the shm object. NULL if the task is dead. */
63307 struct task_struct *shm_creator;
58c5fc13
MT
63308+#ifdef CONFIG_GRKERNSEC
63309+ time_t shm_createtime;
63310+ pid_t shm_lapid;
63311+#endif
63312 };
63313
63314 /* shm_mode upper byte flags */
fe2de317 63315diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
572b4308 63316index c1bae8d..2dbcd31 100644
fe2de317
MT
63317--- a/include/linux/skbuff.h
63318+++ b/include/linux/skbuff.h
572b4308 63319@@ -663,7 +663,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
bc901d79
MT
63320 */
63321 static inline int skb_queue_empty(const struct sk_buff_head *list)
63322 {
63323- return list->next == (struct sk_buff *)list;
63324+ return list->next == (const struct sk_buff *)list;
63325 }
63326
63327 /**
572b4308 63328@@ -676,7 +676,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
bc901d79
MT
63329 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
63330 const struct sk_buff *skb)
63331 {
63332- return skb->next == (struct sk_buff *)list;
63333+ return skb->next == (const struct sk_buff *)list;
63334 }
63335
63336 /**
572b4308 63337@@ -689,7 +689,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
bc901d79
MT
63338 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
63339 const struct sk_buff *skb)
63340 {
63341- return skb->prev == (struct sk_buff *)list;
63342+ return skb->prev == (const struct sk_buff *)list;
63343 }
63344
63345 /**
572b4308 63346@@ -1584,7 +1584,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
8308f9c9
MT
63347 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
63348 */
63349 #ifndef NET_SKB_PAD
63350-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
15a11c5b 63351+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
8308f9c9
MT
63352 #endif
63353
63354 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
fe2de317 63355diff --git a/include/linux/slab.h b/include/linux/slab.h
572b4308 63356index a595dce..dfab0d2 100644
fe2de317
MT
63357--- a/include/linux/slab.h
63358+++ b/include/linux/slab.h
71d190be 63359@@ -11,12 +11,20 @@
ae4e228f
MT
63360
63361 #include <linux/gfp.h>
63362 #include <linux/types.h>
63363+#include <linux/err.h>
63364
63365 /*
63366 * Flags to pass to kmem_cache_create().
71d190be
MT
63367 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
63368 */
63369 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
63370+
572b4308 63371+#ifdef CONFIG_PAX_USERCOPY_SLABS
71d190be
MT
63372+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
63373+#else
63374+#define SLAB_USERCOPY 0x00000000UL
63375+#endif
63376+
63377 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
63378 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
63379 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
63380@@ -87,10 +95,13 @@
58c5fc13
MT
63381 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
63382 * Both make kfree a no-op.
63383 */
63384-#define ZERO_SIZE_PTR ((void *)16)
ae4e228f
MT
63385+#define ZERO_SIZE_PTR \
63386+({ \
63387+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
63388+ (void *)(-MAX_ERRNO-1L); \
63389+})
58c5fc13
MT
63390
63391-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
63392- (unsigned long)ZERO_SIZE_PTR)
df50ba0c 63393+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
58c5fc13
MT
63394
63395 /*
63396 * struct kmem_cache related prototypes
572b4308 63397@@ -161,6 +172,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
58c5fc13
MT
63398 void kfree(const void *);
63399 void kzfree(const void *);
63400 size_t ksize(const void *);
572b4308
MT
63401+const char *check_heap_object(const void *ptr, unsigned long n, bool to);
63402+bool is_usercopy_object(const void *ptr);
58c5fc13
MT
63403
63404 /*
63405 * Allocator specific definitions. These are mainly used to establish optimized
572b4308 63406@@ -240,6 +253,7 @@ size_t ksize(const void *);
c6e2a6c8
MT
63407 * for general use, and so are not documented here. For a full list of
63408 * potential flags, always refer to linux/gfp.h.
63409 */
63410+static void *kmalloc_array(size_t n, size_t size, gfp_t flags) __size_overflow(1, 2);
63411 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
63412 {
63413 if (size != 0 && n > ULONG_MAX / size)
572b4308 63414@@ -298,7 +312,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
4c928ab7
MT
63415 */
63416 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63417 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63418-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
63419+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
63420 #define kmalloc_track_caller(size, flags) \
63421 __kmalloc_track_caller(size, flags, _RET_IP_)
63422 #else
572b4308 63423@@ -317,7 +331,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
4c928ab7
MT
63424 */
63425 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
63426 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING))
63427-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
63428+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
63429 #define kmalloc_node_track_caller(size, flags, node) \
63430 __kmalloc_node_track_caller(size, flags, node, \
63431 _RET_IP_)
fe2de317 63432diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
572b4308 63433index fbd1117..0a3d314 100644
fe2de317
MT
63434--- a/include/linux/slab_def.h
63435+++ b/include/linux/slab_def.h
5e856224 63436@@ -66,10 +66,10 @@ struct kmem_cache {
fe2de317
MT
63437 unsigned long node_allocs;
63438 unsigned long node_frees;
63439 unsigned long node_overflow;
63440- atomic_t allochit;
63441- atomic_t allocmiss;
63442- atomic_t freehit;
63443- atomic_t freemiss;
63444+ atomic_unchecked_t allochit;
63445+ atomic_unchecked_t allocmiss;
63446+ atomic_unchecked_t freehit;
63447+ atomic_unchecked_t freemiss;
63448
63449 /*
63450 * If debugging is enabled, then the allocator can add additional
572b4308
MT
63451@@ -103,11 +103,16 @@ struct cache_sizes {
63452 #ifdef CONFIG_ZONE_DMA
63453 struct kmem_cache *cs_dmacachep;
63454 #endif
63455+
63456+#ifdef CONFIG_PAX_USERCOPY_SLABS
63457+ struct kmem_cache *cs_usercopycachep;
63458+#endif
63459+
63460 };
4c928ab7
MT
63461 extern struct cache_sizes malloc_sizes[];
63462
63463 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63464-void *__kmalloc(size_t size, gfp_t flags);
63465+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63466
63467 #ifdef CONFIG_TRACING
63468 extern void *kmem_cache_alloc_trace(size_t size,
572b4308
MT
63469@@ -150,6 +155,13 @@ found:
63470 cachep = malloc_sizes[i].cs_dmacachep;
63471 else
63472 #endif
63473+
63474+#ifdef CONFIG_PAX_USERCOPY_SLABS
63475+ if (flags & GFP_USERCOPY)
63476+ cachep = malloc_sizes[i].cs_usercopycachep;
63477+ else
63478+#endif
63479+
63480 cachep = malloc_sizes[i].cs_cachep;
63481
63482 ret = kmem_cache_alloc_trace(size, cachep, flags);
63483@@ -160,7 +172,7 @@ found:
4c928ab7
MT
63484 }
63485
63486 #ifdef CONFIG_NUMA
63487-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
63488+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63489 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63490
63491 #ifdef CONFIG_TRACING
572b4308
MT
63492@@ -203,6 +215,13 @@ found:
63493 cachep = malloc_sizes[i].cs_dmacachep;
63494 else
63495 #endif
63496+
63497+#ifdef CONFIG_PAX_USERCOPY_SLABS
63498+ if (flags & GFP_USERCOPY)
63499+ cachep = malloc_sizes[i].cs_usercopycachep;
63500+ else
63501+#endif
63502+
63503 cachep = malloc_sizes[i].cs_cachep;
63504
63505 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
4c928ab7 63506diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
c6e2a6c8 63507index 0ec00b3..39cb7fc 100644
4c928ab7
MT
63508--- a/include/linux/slob_def.h
63509+++ b/include/linux/slob_def.h
c6e2a6c8 63510@@ -9,7 +9,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
4c928ab7
MT
63511 return kmem_cache_alloc_node(cachep, flags, -1);
63512 }
63513
63514-void *__kmalloc_node(size_t size, gfp_t flags, int node);
63515+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63516
4c928ab7
MT
63517 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63518 {
c6e2a6c8 63519@@ -29,6 +29,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
4c928ab7
MT
63520 return __kmalloc_node(size, flags, -1);
63521 }
63522
63523+static __always_inline void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
63524 static __always_inline void *__kmalloc(size_t size, gfp_t flags)
63525 {
63526 return kmalloc(size, flags);
fe2de317 63527diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
c6e2a6c8 63528index c2f8c8b..be9e036 100644
fe2de317
MT
63529--- a/include/linux/slub_def.h
63530+++ b/include/linux/slub_def.h
c6e2a6c8 63531@@ -92,7 +92,7 @@ struct kmem_cache {
58c5fc13
MT
63532 struct kmem_cache_order_objects max;
63533 struct kmem_cache_order_objects min;
63534 gfp_t allocflags; /* gfp flags to use on each alloc */
63535- int refcount; /* Refcount for slab cache destroy */
63536+ atomic_t refcount; /* Refcount for slab cache destroy */
63537 void (*ctor)(void *);
63538 int inuse; /* Offset to metadata */
63539 int align; /* Alignment */
c6e2a6c8
MT
63540@@ -153,6 +153,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
63541 * Sorry that the following has to be that ugly but some versions of GCC
63542 * have trouble with constant propagation and loops.
4c928ab7 63543 */
c6e2a6c8
MT
63544+static __always_inline int kmalloc_index(size_t size) __size_overflow(1);
63545 static __always_inline int kmalloc_index(size_t size)
4c928ab7 63546 {
c6e2a6c8
MT
63547 if (!size)
63548@@ -218,7 +219,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
15a11c5b
MT
63549 }
63550
63551 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
63552-void *__kmalloc(size_t size, gfp_t flags);
4c928ab7 63553+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
15a11c5b
MT
63554
63555 static __always_inline void *
63556 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
c6e2a6c8 63557@@ -259,6 +260,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
4c928ab7
MT
63558 }
63559 #endif
63560
63561+static __always_inline void *kmalloc_large(size_t size, gfp_t flags) __size_overflow(1);
63562 static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
63563 {
63564 unsigned int order = get_order(size);
c6e2a6c8 63565@@ -284,7 +286,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
4c928ab7
MT
63566 }
63567
63568 #ifdef CONFIG_NUMA
63569-void *__kmalloc_node(size_t size, gfp_t flags, int node);
63570+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
63571 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
63572
63573 #ifdef CONFIG_TRACING
fe2de317
MT
63574diff --git a/include/linux/sonet.h b/include/linux/sonet.h
63575index de8832d..0147b46 100644
63576--- a/include/linux/sonet.h
63577+++ b/include/linux/sonet.h
58c5fc13 63578@@ -61,7 +61,7 @@ struct sonet_stats {
6e9df6a3 63579 #include <linux/atomic.h>
58c5fc13
MT
63580
63581 struct k_sonet_stats {
63582-#define __HANDLE_ITEM(i) atomic_t i
63583+#define __HANDLE_ITEM(i) atomic_unchecked_t i
63584 __SONET_ITEMS
63585 #undef __HANDLE_ITEM
63586 };
fe2de317 63587diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
c6e2a6c8 63588index 523547e..2cb7140 100644
fe2de317
MT
63589--- a/include/linux/sunrpc/clnt.h
63590+++ b/include/linux/sunrpc/clnt.h
c6e2a6c8 63591@@ -174,9 +174,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
bc901d79
MT
63592 {
63593 switch (sap->sa_family) {
63594 case AF_INET:
63595- return ntohs(((struct sockaddr_in *)sap)->sin_port);
63596+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
63597 case AF_INET6:
63598- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
63599+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
63600 }
63601 return 0;
63602 }
c6e2a6c8 63603@@ -209,7 +209,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
bc901d79
MT
63604 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
63605 const struct sockaddr *src)
63606 {
63607- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
63608+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
63609 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
63610
63611 dsin->sin_family = ssin->sin_family;
c6e2a6c8 63612@@ -312,7 +312,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
bc901d79
MT
63613 if (sa->sa_family != AF_INET6)
63614 return 0;
63615
63616- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
63617+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
63618 }
63619
63620 #endif /* __KERNEL__ */
fe2de317 63621diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
c6e2a6c8 63622index dc0c3cc..8503fb6 100644
fe2de317
MT
63623--- a/include/linux/sunrpc/sched.h
63624+++ b/include/linux/sunrpc/sched.h
c6e2a6c8
MT
63625@@ -106,6 +106,7 @@ struct rpc_call_ops {
63626 void (*rpc_count_stats)(struct rpc_task *, void *);
6e9df6a3
MT
63627 void (*rpc_release)(void *);
63628 };
63629+typedef struct rpc_call_ops __no_const rpc_call_ops_no_const;
63630
63631 struct rpc_task_setup {
63632 struct rpc_task *task;
fe2de317 63633diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
c6e2a6c8 63634index 0b8e3e6..33e0a01 100644
fe2de317
MT
63635--- a/include/linux/sunrpc/svc_rdma.h
63636+++ b/include/linux/sunrpc/svc_rdma.h
8308f9c9
MT
63637@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
63638 extern unsigned int svcrdma_max_requests;
63639 extern unsigned int svcrdma_max_req_size;
63640
63641-extern atomic_t rdma_stat_recv;
63642-extern atomic_t rdma_stat_read;
63643-extern atomic_t rdma_stat_write;
63644-extern atomic_t rdma_stat_sq_starve;
63645-extern atomic_t rdma_stat_rq_starve;
63646-extern atomic_t rdma_stat_rq_poll;
63647-extern atomic_t rdma_stat_rq_prod;
63648-extern atomic_t rdma_stat_sq_poll;
63649-extern atomic_t rdma_stat_sq_prod;
63650+extern atomic_unchecked_t rdma_stat_recv;
63651+extern atomic_unchecked_t rdma_stat_read;
63652+extern atomic_unchecked_t rdma_stat_write;
63653+extern atomic_unchecked_t rdma_stat_sq_starve;
63654+extern atomic_unchecked_t rdma_stat_rq_starve;
63655+extern atomic_unchecked_t rdma_stat_rq_poll;
63656+extern atomic_unchecked_t rdma_stat_rq_prod;
63657+extern atomic_unchecked_t rdma_stat_sq_poll;
63658+extern atomic_unchecked_t rdma_stat_sq_prod;
63659
63660 #define RPCRDMA_VERSION 1
63661
fe2de317 63662diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
c6e2a6c8 63663index c34b4c8..a65b67d 100644
fe2de317
MT
63664--- a/include/linux/sysctl.h
63665+++ b/include/linux/sysctl.h
ae4e228f 63666@@ -155,7 +155,11 @@ enum
58c5fc13
MT
63667 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
63668 };
63669
63670-
63671+#ifdef CONFIG_PAX_SOFTMODE
63672+enum {
63673+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
63674+};
63675+#endif
63676
63677 /* CTL_VM names: */
63678 enum
c6e2a6c8 63679@@ -948,6 +952,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
bc901d79
MT
63680
63681 extern int proc_dostring(struct ctl_table *, int,
63682 void __user *, size_t *, loff_t *);
63683+extern int proc_dostring_modpriv(struct ctl_table *, int,
63684+ void __user *, size_t *, loff_t *);
63685 extern int proc_dointvec(struct ctl_table *, int,
63686 void __user *, size_t *, loff_t *);
63687 extern int proc_dointvec_minmax(struct ctl_table *, int,
fe2de317
MT
63688diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
63689index ff7dc08..893e1bd 100644
63690--- a/include/linux/tty_ldisc.h
63691+++ b/include/linux/tty_ldisc.h
16454cff 63692@@ -148,7 +148,7 @@ struct tty_ldisc_ops {
58c5fc13
MT
63693
63694 struct module *owner;
63695
63696- int refcount;
63697+ atomic_t refcount;
63698 };
63699
63700 struct tty_ldisc {
fe2de317 63701diff --git a/include/linux/types.h b/include/linux/types.h
c6e2a6c8 63702index 7f480db..175c256 100644
fe2de317
MT
63703--- a/include/linux/types.h
63704+++ b/include/linux/types.h
c6e2a6c8 63705@@ -220,10 +220,26 @@ typedef struct {
57199397 63706 int counter;
58c5fc13
MT
63707 } atomic_t;
63708
63709+#ifdef CONFIG_PAX_REFCOUNT
63710+typedef struct {
57199397 63711+ int counter;
58c5fc13
MT
63712+} atomic_unchecked_t;
63713+#else
63714+typedef atomic_t atomic_unchecked_t;
63715+#endif
63716+
63717 #ifdef CONFIG_64BIT
63718 typedef struct {
57199397 63719 long counter;
58c5fc13
MT
63720 } atomic64_t;
63721+
63722+#ifdef CONFIG_PAX_REFCOUNT
63723+typedef struct {
57199397 63724+ long counter;
58c5fc13
MT
63725+} atomic64_unchecked_t;
63726+#else
63727+typedef atomic64_t atomic64_unchecked_t;
63728+#endif
63729 #endif
63730
6892158b 63731 struct list_head {
fe2de317 63732diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
c6e2a6c8 63733index 5ca0951..ab496a5 100644
fe2de317
MT
63734--- a/include/linux/uaccess.h
63735+++ b/include/linux/uaccess.h
63736@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
58c5fc13
MT
63737 long ret; \
63738 mm_segment_t old_fs = get_fs(); \
63739 \
63740- set_fs(KERNEL_DS); \
63741 pagefault_disable(); \
6e9df6a3 63742- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
58c5fc13 63743- pagefault_enable(); \
6e9df6a3
MT
63744+ set_fs(KERNEL_DS); \
63745+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
58c5fc13
MT
63746 set_fs(old_fs); \
63747+ pagefault_enable(); \
63748 ret; \
63749 })
63750
fe2de317
MT
63751diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
63752index 99c1b4d..bb94261 100644
63753--- a/include/linux/unaligned/access_ok.h
63754+++ b/include/linux/unaligned/access_ok.h
bc901d79
MT
63755@@ -6,32 +6,32 @@
63756
63757 static inline u16 get_unaligned_le16(const void *p)
63758 {
63759- return le16_to_cpup((__le16 *)p);
63760+ return le16_to_cpup((const __le16 *)p);
63761 }
63762
63763 static inline u32 get_unaligned_le32(const void *p)
63764 {
63765- return le32_to_cpup((__le32 *)p);
63766+ return le32_to_cpup((const __le32 *)p);
63767 }
63768
63769 static inline u64 get_unaligned_le64(const void *p)
63770 {
63771- return le64_to_cpup((__le64 *)p);
63772+ return le64_to_cpup((const __le64 *)p);
63773 }
63774
63775 static inline u16 get_unaligned_be16(const void *p)
63776 {
63777- return be16_to_cpup((__be16 *)p);
63778+ return be16_to_cpup((const __be16 *)p);
63779 }
63780
63781 static inline u32 get_unaligned_be32(const void *p)
63782 {
63783- return be32_to_cpup((__be32 *)p);
63784+ return be32_to_cpup((const __be32 *)p);
63785 }
63786
63787 static inline u64 get_unaligned_be64(const void *p)
63788 {
63789- return be64_to_cpup((__be64 *)p);
63790+ return be64_to_cpup((const __be64 *)p);
63791 }
63792
63793 static inline void put_unaligned_le16(u16 val, void *p)
4c928ab7 63794diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
c6e2a6c8 63795index 547e59c..db6ad19 100644
4c928ab7
MT
63796--- a/include/linux/usb/renesas_usbhs.h
63797+++ b/include/linux/usb/renesas_usbhs.h
63798@@ -39,7 +39,7 @@ enum {
63799 */
63800 struct renesas_usbhs_driver_callback {
63801 int (*notify_hotplug)(struct platform_device *pdev);
63802-};
63803+} __no_const;
63804
63805 /*
63806 * callback functions for platform
5e856224 63807@@ -97,7 +97,7 @@ struct renesas_usbhs_platform_callback {
4c928ab7
MT
63808 * VBUS control is needed for Host
63809 */
63810 int (*set_vbus)(struct platform_device *pdev, int enable);
63811-};
63812+} __no_const;
63813
63814 /*
63815 * parameters for renesas usbhs
fe2de317 63816diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
4c928ab7 63817index 6f8fbcf..8259001 100644
fe2de317
MT
63818--- a/include/linux/vermagic.h
63819+++ b/include/linux/vermagic.h
4c928ab7 63820@@ -25,9 +25,35 @@
6e9df6a3
MT
63821 #define MODULE_ARCH_VERMAGIC ""
63822 #endif
63823
63824+#ifdef CONFIG_PAX_REFCOUNT
63825+#define MODULE_PAX_REFCOUNT "REFCOUNT "
63826+#else
63827+#define MODULE_PAX_REFCOUNT ""
63828+#endif
63829+
63830+#ifdef CONSTIFY_PLUGIN
63831+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
63832+#else
63833+#define MODULE_CONSTIFY_PLUGIN ""
63834+#endif
63835+
63836+#ifdef STACKLEAK_PLUGIN
63837+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
63838+#else
63839+#define MODULE_STACKLEAK_PLUGIN ""
63840+#endif
63841+
63842+#ifdef CONFIG_GRKERNSEC
63843+#define MODULE_GRSEC "GRSEC "
63844+#else
63845+#define MODULE_GRSEC ""
63846+#endif
63847+
63848 #define VERMAGIC_STRING \
63849 UTS_RELEASE " " \
63850 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
63851 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
63852- MODULE_ARCH_VERMAGIC
63853+ MODULE_ARCH_VERMAGIC \
63854+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
63855+ MODULE_GRSEC
63856
fe2de317 63857diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
c6e2a6c8 63858index dcdfc2b..ec79ab5 100644
fe2de317
MT
63859--- a/include/linux/vmalloc.h
63860+++ b/include/linux/vmalloc.h
63861@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
58c5fc13
MT
63862 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
63863 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
6e9df6a3 63864 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
58c5fc13 63865+
df50ba0c 63866+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
6e9df6a3 63867+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
58c5fc13
MT
63868+#endif
63869+
63870 /* bits [20..32] reserved for arch specific ioremap internals */
63871
63872 /*
c6e2a6c8
MT
63873@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
63874 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
4c928ab7
MT
63875 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
63876 unsigned long start, unsigned long end, gfp_t gfp_mask,
63877- pgprot_t prot, int node, void *caller);
63878+ pgprot_t prot, int node, void *caller) __size_overflow(1);
63879 extern void vfree(const void *addr);
63880
63881 extern void *vmap(struct page **pages, unsigned int count,
63882@@ -123,8 +128,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
63883 extern void free_vm_area(struct vm_struct *area);
63884
63885 /* for /dev/kmem */
63886-extern long vread(char *buf, char *addr, unsigned long count);
63887-extern long vwrite(char *buf, char *addr, unsigned long count);
63888+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
63889+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
63890
63891 /*
63892 * Internals. Dont't use..
fe2de317
MT
63893diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
63894index 65efb92..137adbb 100644
63895--- a/include/linux/vmstat.h
63896+++ b/include/linux/vmstat.h
63897@@ -87,18 +87,18 @@ static inline void vm_events_fold_cpu(int cpu)
57199397
MT
63898 /*
63899 * Zone based page accounting with per cpu differentials.
63900 */
63901-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63902+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
63903
63904 static inline void zone_page_state_add(long x, struct zone *zone,
63905 enum zone_stat_item item)
63906 {
63907- atomic_long_add(x, &zone->vm_stat[item]);
63908- atomic_long_add(x, &vm_stat[item]);
63909+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
63910+ atomic_long_add_unchecked(x, &vm_stat[item]);
63911 }
63912
63913 static inline unsigned long global_page_state(enum zone_stat_item item)
63914 {
63915- long x = atomic_long_read(&vm_stat[item]);
63916+ long x = atomic_long_read_unchecked(&vm_stat[item]);
63917 #ifdef CONFIG_SMP
63918 if (x < 0)
63919 x = 0;
fe2de317 63920@@ -109,7 +109,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
57199397
MT
63921 static inline unsigned long zone_page_state(struct zone *zone,
63922 enum zone_stat_item item)
63923 {
63924- long x = atomic_long_read(&zone->vm_stat[item]);
63925+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63926 #ifdef CONFIG_SMP
63927 if (x < 0)
63928 x = 0;
fe2de317 63929@@ -126,7 +126,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
6892158b
MT
63930 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
63931 enum zone_stat_item item)
63932 {
63933- long x = atomic_long_read(&zone->vm_stat[item]);
63934+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
63935
63936 #ifdef CONFIG_SMP
63937 int cpu;
fe2de317 63938@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
57199397
MT
63939
63940 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
63941 {
63942- atomic_long_inc(&zone->vm_stat[item]);
63943- atomic_long_inc(&vm_stat[item]);
63944+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
63945+ atomic_long_inc_unchecked(&vm_stat[item]);
63946 }
63947
63948 static inline void __inc_zone_page_state(struct page *page,
fe2de317 63949@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
57199397
MT
63950
63951 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
63952 {
63953- atomic_long_dec(&zone->vm_stat[item]);
63954- atomic_long_dec(&vm_stat[item]);
63955+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
63956+ atomic_long_dec_unchecked(&vm_stat[item]);
63957 }
63958
63959 static inline void __dec_zone_page_state(struct page *page,
4c928ab7
MT
63960diff --git a/include/linux/xattr.h b/include/linux/xattr.h
63961index e5d1220..ef6e406 100644
63962--- a/include/linux/xattr.h
63963+++ b/include/linux/xattr.h
63964@@ -57,6 +57,11 @@
63965 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
63966 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
63967
63968+/* User namespace */
63969+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
63970+#define XATTR_PAX_FLAGS_SUFFIX "flags"
63971+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
63972+
63973 #ifdef __KERNEL__
63974
63975 #include <linux/types.h>
fe2de317
MT
63976diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
63977index 4aeff96..b378cdc 100644
63978--- a/include/media/saa7146_vv.h
63979+++ b/include/media/saa7146_vv.h
15a11c5b
MT
63980@@ -163,7 +163,7 @@ struct saa7146_ext_vv
63981 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
63982
63983 /* the extension can override this */
63984- struct v4l2_ioctl_ops ops;
63985+ v4l2_ioctl_ops_no_const ops;
63986 /* pointer to the saa7146 core ops */
63987 const struct v4l2_ioctl_ops *core_ops;
63988
fe2de317 63989diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
c6e2a6c8 63990index 96d2221..2292f89 100644
fe2de317
MT
63991--- a/include/media/v4l2-dev.h
63992+++ b/include/media/v4l2-dev.h
63993@@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_state *global, enum v4l2_priority local);
6e9df6a3
MT
63994
63995
63996 struct v4l2_file_operations {
63997- struct module *owner;
63998+ struct module * const owner;
63999 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
64000 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
64001 unsigned int (*poll) (struct file *, struct poll_table_struct *);
c6e2a6c8 64002@@ -71,6 +71,7 @@ struct v4l2_file_operations {
6e9df6a3
MT
64003 int (*open) (struct file *);
64004 int (*release) (struct file *);
64005 };
64006+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
64007
64008 /*
64009 * Newer version of video_device, handled by videodev2.c
fe2de317 64010diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
c6e2a6c8 64011index 3cb939c..f23c6bb 100644
fe2de317
MT
64012--- a/include/media/v4l2-ioctl.h
64013+++ b/include/media/v4l2-ioctl.h
c6e2a6c8 64014@@ -281,7 +281,7 @@ struct v4l2_ioctl_ops {
15a11c5b
MT
64015 long (*vidioc_default) (struct file *file, void *fh,
64016 bool valid_prio, int cmd, void *arg);
64017 };
6e9df6a3 64018-
15a11c5b
MT
64019+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
64020
15a11c5b 64021 /* v4l debugging and diagnostics */
6e9df6a3 64022
fe2de317 64023diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
c6e2a6c8 64024index 6db8ecf..8c23861 100644
fe2de317
MT
64025--- a/include/net/caif/caif_hsi.h
64026+++ b/include/net/caif/caif_hsi.h
4c928ab7 64027@@ -98,7 +98,7 @@ struct cfhsi_drv {
6e9df6a3
MT
64028 void (*rx_done_cb) (struct cfhsi_drv *drv);
64029 void (*wake_up_cb) (struct cfhsi_drv *drv);
64030 void (*wake_down_cb) (struct cfhsi_drv *drv);
64031-};
64032+} __no_const;
64033
64034 /* Structure implemented by HSI device. */
64035 struct cfhsi_dev {
fe2de317
MT
64036diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
64037index 9e5425b..8136ffc 100644
64038--- a/include/net/caif/cfctrl.h
64039+++ b/include/net/caif/cfctrl.h
15a11c5b
MT
64040@@ -52,7 +52,7 @@ struct cfctrl_rsp {
64041 void (*radioset_rsp)(void);
64042 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
64043 struct cflayer *client_layer);
64044-};
64045+} __no_const;
64046
64047 /* Link Setup Parameters for CAIF-Links. */
64048 struct cfctrl_link_param {
8308f9c9
MT
64049@@ -101,8 +101,8 @@ struct cfctrl_request_info {
64050 struct cfctrl {
64051 struct cfsrvl serv;
64052 struct cfctrl_rsp res;
64053- atomic_t req_seq_no;
64054- atomic_t rsp_seq_no;
64055+ atomic_unchecked_t req_seq_no;
64056+ atomic_unchecked_t rsp_seq_no;
64057 struct list_head list;
64058 /* Protects from simultaneous access to first_req list */
64059 spinlock_t info_list_lock;
fe2de317 64060diff --git a/include/net/flow.h b/include/net/flow.h
5e856224 64061index 6c469db..7743b8e 100644
fe2de317
MT
64062--- a/include/net/flow.h
64063+++ b/include/net/flow.h
5e856224 64064@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
8308f9c9
MT
64065
64066 extern void flow_cache_flush(void);
4c928ab7 64067 extern void flow_cache_flush_deferred(void);
8308f9c9
MT
64068-extern atomic_t flow_cache_genid;
64069+extern atomic_unchecked_t flow_cache_genid;
64070
66a7e928 64071 #endif
fe2de317 64072diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
572b4308 64073index 2040bff..f4c0733 100644
fe2de317
MT
64074--- a/include/net/inetpeer.h
64075+++ b/include/net/inetpeer.h
572b4308 64076@@ -51,8 +51,8 @@ struct inet_peer {
6892158b
MT
64077 */
64078 union {
64079 struct {
66a7e928
MT
64080- atomic_t rid; /* Frag reception counter */
64081- atomic_t ip_id_count; /* IP ID for the next packet */
64082+ atomic_unchecked_t rid; /* Frag reception counter */
64083+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
64084 __u32 tcp_ts;
64085 __u32 tcp_ts_stamp;
6e9df6a3 64086 };
572b4308 64087@@ -118,11 +118,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
6892158b
MT
64088 more++;
64089 inet_peer_refcheck(p);
6e9df6a3
MT
64090 do {
64091- old = atomic_read(&p->ip_id_count);
64092+ old = atomic_read_unchecked(&p->ip_id_count);
64093 new = old + more;
64094 if (!new)
64095 new = 1;
64096- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
64097+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
64098 return new;
64099 }
64100
fe2de317
MT
64101diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
64102index 10422ef..662570f 100644
64103--- a/include/net/ip_fib.h
64104+++ b/include/net/ip_fib.h
64105@@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
66a7e928
MT
64106
64107 #define FIB_RES_SADDR(net, res) \
64108 ((FIB_RES_NH(res).nh_saddr_genid == \
64109- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
64110+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
64111 FIB_RES_NH(res).nh_saddr : \
64112 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
64113 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
fe2de317 64114diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
572b4308 64115index 72522f0..2965e05 100644
fe2de317
MT
64116--- a/include/net/ip_vs.h
64117+++ b/include/net/ip_vs.h
c6e2a6c8 64118@@ -510,7 +510,7 @@ struct ip_vs_conn {
8308f9c9
MT
64119 struct ip_vs_conn *control; /* Master control connection */
64120 atomic_t n_control; /* Number of controlled ones */
64121 struct ip_vs_dest *dest; /* real server */
64122- atomic_t in_pkts; /* incoming packet counter */
64123+ atomic_unchecked_t in_pkts; /* incoming packet counter */
64124
64125 /* packet transmitter for different forwarding methods. If it
64126 mangles the packet, it must return NF_DROP or better NF_STOLEN,
c6e2a6c8 64127@@ -648,7 +648,7 @@ struct ip_vs_dest {
8308f9c9 64128 __be16 port; /* port number of the server */
66a7e928 64129 union nf_inet_addr addr; /* IP address of the server */
8308f9c9
MT
64130 volatile unsigned flags; /* dest status flags */
64131- atomic_t conn_flags; /* flags to copy to conn */
64132+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
64133 atomic_t weight; /* server weight */
64134
64135 atomic_t refcnt; /* reference counter */
572b4308
MT
64136@@ -1356,7 +1356,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
64137 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
64138
64139 if (!ct || !nf_ct_is_untracked(ct)) {
64140- nf_reset(skb);
64141+ nf_conntrack_put(skb->nfct);
64142 skb->nfct = &nf_ct_untracked_get()->ct_general;
64143 skb->nfctinfo = IP_CT_NEW;
64144 nf_conntrack_get(skb->nfct);
fe2de317
MT
64145diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
64146index 69b610a..fe3962c 100644
64147--- a/include/net/irda/ircomm_core.h
64148+++ b/include/net/irda/ircomm_core.h
15a11c5b
MT
64149@@ -51,7 +51,7 @@ typedef struct {
64150 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
64151 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
64152 struct ircomm_info *);
64153-} call_t;
64154+} __no_const call_t;
64155
64156 struct ircomm_cb {
64157 irda_queue_t queue;
fe2de317
MT
64158diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
64159index 59ba38bc..d515662 100644
64160--- a/include/net/irda/ircomm_tty.h
64161+++ b/include/net/irda/ircomm_tty.h
c52201e0
MT
64162@@ -35,6 +35,7 @@
64163 #include <linux/termios.h>
64164 #include <linux/timer.h>
64165 #include <linux/tty.h> /* struct tty_struct */
64166+#include <asm/local.h>
64167
64168 #include <net/irda/irias_object.h>
64169 #include <net/irda/ircomm_core.h>
64170@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
58c5fc13
MT
64171 unsigned short close_delay;
64172 unsigned short closing_wait; /* time to wait before closing */
64173
64174- int open_count;
64175- int blocked_open; /* # of blocked opens */
c52201e0
MT
64176+ local_t open_count;
64177+ local_t blocked_open; /* # of blocked opens */
58c5fc13
MT
64178
64179 /* Protect concurent access to :
64180 * o self->open_count
fe2de317 64181diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
c6e2a6c8 64182index cc7c197..9f2da2a 100644
fe2de317
MT
64183--- a/include/net/iucv/af_iucv.h
64184+++ b/include/net/iucv/af_iucv.h
c6e2a6c8 64185@@ -141,7 +141,7 @@ struct iucv_sock {
8308f9c9
MT
64186 struct iucv_sock_list {
64187 struct hlist_head head;
64188 rwlock_t lock;
64189- atomic_t autobind_name;
64190+ atomic_unchecked_t autobind_name;
64191 };
64192
64193 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
fe2de317 64194diff --git a/include/net/neighbour.h b/include/net/neighbour.h
5e856224 64195index 34c996f..bb3b4d4 100644
fe2de317
MT
64196--- a/include/net/neighbour.h
64197+++ b/include/net/neighbour.h
5e856224 64198@@ -123,7 +123,7 @@ struct neigh_ops {
6e9df6a3
MT
64199 void (*error_report)(struct neighbour *, struct sk_buff *);
64200 int (*output)(struct neighbour *, struct sk_buff *);
64201 int (*connected_output)(struct neighbour *, struct sk_buff *);
15a11c5b
MT
64202-};
64203+} __do_const;
ae4e228f
MT
64204
64205 struct pneigh_entry {
15a11c5b 64206 struct pneigh_entry *next;
fe2de317 64207diff --git a/include/net/netlink.h b/include/net/netlink.h
c6e2a6c8 64208index f394fe5..fd073f9 100644
fe2de317
MT
64209--- a/include/net/netlink.h
64210+++ b/include/net/netlink.h
c6e2a6c8 64211@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
bc901d79
MT
64212 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
64213 {
64214 if (mark)
64215- skb_trim(skb, (unsigned char *) mark - skb->data);
64216+ skb_trim(skb, (const unsigned char *) mark - skb->data);
64217 }
64218
64219 /**
fe2de317 64220diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
5e856224 64221index bbd023a..97c6d0d 100644
fe2de317
MT
64222--- a/include/net/netns/ipv4.h
64223+++ b/include/net/netns/ipv4.h
5e856224 64224@@ -57,8 +57,8 @@ struct netns_ipv4 {
15a11c5b 64225 unsigned int sysctl_ping_group_range[2];
5e856224 64226 long sysctl_tcp_mem[3];
8308f9c9
MT
64227
64228- atomic_t rt_genid;
66a7e928 64229- atomic_t dev_addr_genid;
8308f9c9 64230+ atomic_unchecked_t rt_genid;
66a7e928 64231+ atomic_unchecked_t dev_addr_genid;
8308f9c9
MT
64232
64233 #ifdef CONFIG_IP_MROUTE
64234 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
fe2de317 64235diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
c6e2a6c8 64236index a2ef814..31a8e3f 100644
fe2de317
MT
64237--- a/include/net/sctp/sctp.h
64238+++ b/include/net/sctp/sctp.h
6e9df6a3 64239@@ -318,9 +318,9 @@ do { \
58c5fc13
MT
64240
64241 #else /* SCTP_DEBUG */
64242
64243-#define SCTP_DEBUG_PRINTK(whatever...)
bc901d79 64244-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
58c5fc13
MT
64245-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
64246+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
bc901d79 64247+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
58c5fc13
MT
64248+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
64249 #define SCTP_ENABLE_DEBUG
64250 #define SCTP_DISABLE_DEBUG
64251 #define SCTP_ASSERT(expr, str, func)
fe2de317 64252diff --git a/include/net/sock.h b/include/net/sock.h
c6e2a6c8 64253index 5a0a58a..2e3d4d0 100644
fe2de317
MT
64254--- a/include/net/sock.h
64255+++ b/include/net/sock.h
c6e2a6c8 64256@@ -302,7 +302,7 @@ struct sock {
8308f9c9
MT
64257 #ifdef CONFIG_RPS
64258 __u32 sk_rxhash;
64259 #endif
64260- atomic_t sk_drops;
64261+ atomic_unchecked_t sk_drops;
64262 int sk_rcvbuf;
64263
64264 struct sk_filter __rcu *sk_filter;
c6e2a6c8 64265@@ -1691,7 +1691,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
15a11c5b
MT
64266 }
64267
64268 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
64269- char __user *from, char *to,
64270+ char __user *from, unsigned char *to,
64271 int copy, int offset)
64272 {
64273 if (skb->ip_summed == CHECKSUM_NONE) {
fe2de317 64274diff --git a/include/net/tcp.h b/include/net/tcp.h
c6e2a6c8 64275index f75a04d..702cf06 100644
fe2de317
MT
64276--- a/include/net/tcp.h
64277+++ b/include/net/tcp.h
c6e2a6c8 64278@@ -1425,7 +1425,7 @@ struct tcp_seq_afinfo {
4c928ab7
MT
64279 char *name;
64280 sa_family_t family;
64281 const struct file_operations *seq_fops;
64282- struct seq_operations seq_ops;
64283+ seq_operations_no_const seq_ops;
ae4e228f 64284 };
16454cff 64285
15a11c5b 64286 struct tcp_iter_state {
fe2de317 64287diff --git a/include/net/udp.h b/include/net/udp.h
c6e2a6c8 64288index 5d606d9..e879f7b 100644
fe2de317
MT
64289--- a/include/net/udp.h
64290+++ b/include/net/udp.h
c6e2a6c8 64291@@ -244,7 +244,7 @@ struct udp_seq_afinfo {
4c928ab7
MT
64292 sa_family_t family;
64293 struct udp_table *udp_table;
64294 const struct file_operations *seq_fops;
64295- struct seq_operations seq_ops;
64296+ seq_operations_no_const seq_ops;
ae4e228f 64297 };
16454cff 64298
15a11c5b 64299 struct udp_iter_state {
fe2de317 64300diff --git a/include/net/xfrm.h b/include/net/xfrm.h
c6e2a6c8 64301index 96239e7..c85b032 100644
fe2de317
MT
64302--- a/include/net/xfrm.h
64303+++ b/include/net/xfrm.h
66a7e928 64304@@ -505,7 +505,7 @@ struct xfrm_policy {
8308f9c9
MT
64305 struct timer_list timer;
64306
64307 struct flow_cache_object flo;
64308- atomic_t genid;
64309+ atomic_unchecked_t genid;
64310 u32 priority;
64311 u32 index;
64312 struct xfrm_mark mark;
fe2de317 64313diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
4c928ab7 64314index 1a046b1..ee0bef0 100644
fe2de317
MT
64315--- a/include/rdma/iw_cm.h
64316+++ b/include/rdma/iw_cm.h
4c928ab7 64317@@ -122,7 +122,7 @@ struct iw_cm_verbs {
15a11c5b
MT
64318 int backlog);
64319
64320 int (*destroy_listen)(struct iw_cm_id *cm_id);
64321-};
64322+} __no_const;
64323
64324 /**
64325 * iw_create_cm_id - Create an IW CM identifier.
fe2de317 64326diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
c6e2a6c8 64327index 8f9dfba..610ab6c 100644
fe2de317
MT
64328--- a/include/scsi/libfc.h
64329+++ b/include/scsi/libfc.h
c6e2a6c8 64330@@ -756,6 +756,7 @@ struct libfc_function_template {
15a11c5b
MT
64331 */
64332 void (*disc_stop_final) (struct fc_lport *);
64333 };
64334+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
64335
64336 /**
64337 * struct fc_disc - Discovery context
c6e2a6c8 64338@@ -861,7 +862,7 @@ struct fc_lport {
15a11c5b
MT
64339 struct fc_vport *vport;
64340
64341 /* Operational Information */
64342- struct libfc_function_template tt;
64343+ libfc_function_template_no_const tt;
64344 u8 link_up;
64345 u8 qfull;
64346 enum fc_lport_state state;
fe2de317 64347diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
572b4308 64348index ba96988..ecf2eb9 100644
fe2de317
MT
64349--- a/include/scsi/scsi_device.h
64350+++ b/include/scsi/scsi_device.h
572b4308 64351@@ -163,9 +163,9 @@ struct scsi_device {
8308f9c9
MT
64352 unsigned int max_device_blocked; /* what device_blocked counts down from */
64353 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
64354
64355- atomic_t iorequest_cnt;
64356- atomic_t iodone_cnt;
64357- atomic_t ioerr_cnt;
64358+ atomic_unchecked_t iorequest_cnt;
64359+ atomic_unchecked_t iodone_cnt;
64360+ atomic_unchecked_t ioerr_cnt;
64361
64362 struct device sdev_gendev,
64363 sdev_dev;
fe2de317 64364diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
c6e2a6c8 64365index 719faf1..d1154d4 100644
fe2de317
MT
64366--- a/include/scsi/scsi_transport_fc.h
64367+++ b/include/scsi/scsi_transport_fc.h
c6e2a6c8 64368@@ -739,7 +739,7 @@ struct fc_function_template {
15a11c5b 64369 unsigned long show_host_system_hostname:1;
66a7e928 64370
15a11c5b
MT
64371 unsigned long disable_target_scan:1;
64372-};
64373+} __do_const;
66a7e928 64374
66a7e928 64375
15a11c5b 64376 /**
fe2de317
MT
64377diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
64378index 030b87c..98a6954 100644
64379--- a/include/sound/ak4xxx-adda.h
64380+++ b/include/sound/ak4xxx-adda.h
15a11c5b
MT
64381@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
64382 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
64383 unsigned char val);
64384 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
64385-};
64386+} __no_const;
64387
64388 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
64389
fe2de317
MT
64390diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
64391index 8c05e47..2b5df97 100644
64392--- a/include/sound/hwdep.h
64393+++ b/include/sound/hwdep.h
15a11c5b
MT
64394@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
64395 struct snd_hwdep_dsp_status *status);
64396 int (*dsp_load)(struct snd_hwdep *hw,
64397 struct snd_hwdep_dsp_image *image);
64398-};
64399+} __no_const;
64400
64401 struct snd_hwdep {
64402 struct snd_card *card;
fe2de317 64403diff --git a/include/sound/info.h b/include/sound/info.h
5e856224 64404index 9ca1a49..aba1728 100644
fe2de317
MT
64405--- a/include/sound/info.h
64406+++ b/include/sound/info.h
15a11c5b
MT
64407@@ -44,7 +44,7 @@ struct snd_info_entry_text {
64408 struct snd_info_buffer *buffer);
64409 void (*write)(struct snd_info_entry *entry,
64410 struct snd_info_buffer *buffer);
64411-};
64412+} __no_const;
64413
64414 struct snd_info_entry_ops {
64415 int (*open)(struct snd_info_entry *entry,
fe2de317 64416diff --git a/include/sound/pcm.h b/include/sound/pcm.h
c6e2a6c8 64417index 0d11128..814178e 100644
fe2de317
MT
64418--- a/include/sound/pcm.h
64419+++ b/include/sound/pcm.h
15a11c5b
MT
64420@@ -81,6 +81,7 @@ struct snd_pcm_ops {
64421 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
64422 int (*ack)(struct snd_pcm_substream *substream);
64423 };
64424+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
66a7e928 64425
15a11c5b
MT
64426 /*
64427 *
fe2de317
MT
64428diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
64429index af1b49e..a5d55a5 100644
64430--- a/include/sound/sb16_csp.h
64431+++ b/include/sound/sb16_csp.h
15a11c5b
MT
64432@@ -146,7 +146,7 @@ struct snd_sb_csp_ops {
64433 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
64434 int (*csp_stop) (struct snd_sb_csp * p);
64435 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
64436-};
64437+} __no_const;
66a7e928 64438
15a11c5b
MT
64439 /*
64440 * CSP private data
fe2de317 64441diff --git a/include/sound/soc.h b/include/sound/soc.h
c6e2a6c8 64442index 2ebf787..0276839 100644
fe2de317
MT
64443--- a/include/sound/soc.h
64444+++ b/include/sound/soc.h
c6e2a6c8 64445@@ -711,7 +711,7 @@ struct snd_soc_platform_driver {
6e9df6a3
MT
64446 /* platform IO - used for platform DAPM */
64447 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
64448 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
15a11c5b
MT
64449-};
64450+} __do_const;
64451
64452 struct snd_soc_platform {
64453 const char *name;
c6e2a6c8 64454@@ -887,7 +887,7 @@ struct snd_soc_pcm_runtime {
5e856224
MT
64455 struct snd_soc_dai_link *dai_link;
64456 struct mutex pcm_mutex;
64457 enum snd_soc_pcm_subclass pcm_subclass;
64458- struct snd_pcm_ops ops;
64459+ snd_pcm_ops_no_const ops;
64460
64461 unsigned int complete:1;
64462 unsigned int dev_registered:1;
fe2de317 64463diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
c6e2a6c8 64464index 4119966..1a4671c 100644
fe2de317
MT
64465--- a/include/sound/ymfpci.h
64466+++ b/include/sound/ymfpci.h
8308f9c9
MT
64467@@ -358,7 +358,7 @@ struct snd_ymfpci {
64468 spinlock_t reg_lock;
64469 spinlock_t voice_lock;
64470 wait_queue_head_t interrupt_sleep;
64471- atomic_t interrupt_sleep_count;
64472+ atomic_unchecked_t interrupt_sleep_count;
64473 struct snd_info_entry *proc_entry;
64474 const struct firmware *dsp_microcode;
64475 const struct firmware *controller_microcode;
fe2de317 64476diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
c6e2a6c8 64477index aaccc5f..092d568 100644
fe2de317
MT
64478--- a/include/target/target_core_base.h
64479+++ b/include/target/target_core_base.h
c6e2a6c8 64480@@ -447,7 +447,7 @@ struct t10_reservation_ops {
15a11c5b
MT
64481 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
64482 int (*t10_pr_register)(struct se_cmd *);
64483 int (*t10_pr_clear)(struct se_cmd *);
64484-};
64485+} __no_const;
64486
6e9df6a3 64487 struct t10_reservation {
15a11c5b 64488 /* Reservation effects all target ports */
c6e2a6c8 64489@@ -576,7 +576,7 @@ struct se_cmd {
4c928ab7 64490 atomic_t t_se_count;
8308f9c9
MT
64491 atomic_t t_task_cdbs_left;
64492 atomic_t t_task_cdbs_ex_left;
8308f9c9 64493- atomic_t t_task_cdbs_sent;
8308f9c9 64494+ atomic_unchecked_t t_task_cdbs_sent;
c6e2a6c8
MT
64495 unsigned int transport_state;
64496 #define CMD_T_ABORTED (1 << 0)
64497 #define CMD_T_ACTIVE (1 << 1)
64498@@ -802,7 +802,7 @@ struct se_device {
5e856224 64499 spinlock_t stats_lock;
4c928ab7 64500 /* Active commands on this virtual SE device */
8308f9c9 64501 atomic_t simple_cmds;
8308f9c9
MT
64502- atomic_t dev_ordered_id;
64503+ atomic_unchecked_t dev_ordered_id;
8308f9c9 64504 atomic_t execute_tasks;
4c928ab7
MT
64505 atomic_t dev_ordered_sync;
64506 atomic_t dev_qf_count;
c6e2a6c8
MT
64507diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
64508new file mode 100644
64509index 0000000..2efe49d
64510--- /dev/null
64511+++ b/include/trace/events/fs.h
64512@@ -0,0 +1,53 @@
64513+#undef TRACE_SYSTEM
64514+#define TRACE_SYSTEM fs
64515+
64516+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
64517+#define _TRACE_FS_H
64518+
64519+#include <linux/fs.h>
64520+#include <linux/tracepoint.h>
64521+
64522+TRACE_EVENT(do_sys_open,
64523+
64524+ TP_PROTO(char *filename, int flags, int mode),
64525+
64526+ TP_ARGS(filename, flags, mode),
64527+
64528+ TP_STRUCT__entry(
64529+ __string( filename, filename )
64530+ __field( int, flags )
64531+ __field( int, mode )
64532+ ),
64533+
64534+ TP_fast_assign(
64535+ __assign_str(filename, filename);
64536+ __entry->flags = flags;
64537+ __entry->mode = mode;
64538+ ),
64539+
64540+ TP_printk("\"%s\" %x %o",
64541+ __get_str(filename), __entry->flags, __entry->mode)
64542+);
64543+
64544+TRACE_EVENT(open_exec,
64545+
64546+ TP_PROTO(const char *filename),
64547+
64548+ TP_ARGS(filename),
64549+
64550+ TP_STRUCT__entry(
64551+ __string( filename, filename )
64552+ ),
64553+
64554+ TP_fast_assign(
64555+ __assign_str(filename, filename);
64556+ ),
64557+
64558+ TP_printk("\"%s\"",
64559+ __get_str(filename))
64560+);
64561+
64562+#endif /* _TRACE_FS_H */
64563+
64564+/* This part must be outside protection */
64565+#include <trace/define_trace.h>
fe2de317
MT
64566diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
64567index 1c09820..7f5ec79 100644
64568--- a/include/trace/events/irq.h
64569+++ b/include/trace/events/irq.h
bc901d79 64570@@ -36,7 +36,7 @@ struct softirq_action;
ae4e228f
MT
64571 */
64572 TRACE_EVENT(irq_handler_entry,
64573
64574- TP_PROTO(int irq, struct irqaction *action),
64575+ TP_PROTO(int irq, const struct irqaction *action),
64576
64577 TP_ARGS(irq, action),
64578
bc901d79 64579@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
ae4e228f
MT
64580 */
64581 TRACE_EVENT(irq_handler_exit,
64582
64583- TP_PROTO(int irq, struct irqaction *action, int ret),
64584+ TP_PROTO(int irq, const struct irqaction *action, int ret),
64585
64586 TP_ARGS(irq, action, ret),
64587
fe2de317 64588diff --git a/include/video/udlfb.h b/include/video/udlfb.h
c6e2a6c8 64589index f9466fa..f4e2b81 100644
fe2de317
MT
64590--- a/include/video/udlfb.h
64591+++ b/include/video/udlfb.h
c6e2a6c8 64592@@ -53,10 +53,10 @@ struct dlfb_data {
8308f9c9 64593 u32 pseudo_palette[256];
4c928ab7 64594 int blank_mode; /*one of FB_BLANK_ */
8308f9c9
MT
64595 /* blit-only rendering path metrics, exposed through sysfs */
64596- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64597- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
64598- atomic_t bytes_sent; /* to usb, after compression including overhead */
64599- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
64600+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
64601+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
64602+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
64603+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
64604 };
64605
64606 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
fe2de317
MT
64607diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
64608index 0993a22..32ba2fe 100644
64609--- a/include/video/uvesafb.h
64610+++ b/include/video/uvesafb.h
58c5fc13
MT
64611@@ -177,6 +177,7 @@ struct uvesafb_par {
64612 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
64613 u8 pmi_setpal; /* PMI for palette changes */
64614 u16 *pmi_base; /* protected mode interface location */
64615+ u8 *pmi_code; /* protected mode code location */
64616 void *pmi_start;
64617 void *pmi_pal;
64618 u8 *vbe_state_orig; /*
fe2de317 64619diff --git a/init/Kconfig b/init/Kconfig
572b4308 64620index 6cfd71d..16006e6 100644
fe2de317
MT
64621--- a/init/Kconfig
64622+++ b/init/Kconfig
c6e2a6c8 64623@@ -790,6 +790,7 @@ endif # CGROUPS
5e856224
MT
64624
64625 config CHECKPOINT_RESTORE
64626 bool "Checkpoint/restore support" if EXPERT
64627+ depends on !GRKERNSEC
64628 default n
64629 help
64630 Enables additional kernel features in a sake of checkpoint/restore.
c6e2a6c8 64631@@ -1240,7 +1241,7 @@ config SLUB_DEBUG
fe2de317
MT
64632
64633 config COMPAT_BRK
64634 bool "Disable heap randomization"
64635- default y
64636+ default n
64637 help
64638 Randomizing heap placement makes heap exploits harder, but it
64639 also breaks ancient binaries (including anything libc5 based).
572b4308
MT
64640@@ -1423,7 +1424,7 @@ config INIT_ALL_POSSIBLE
64641 config STOP_MACHINE
64642 bool
64643 default y
64644- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
64645+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
64646 help
64647 Need stop_machine() primitive.
64648
fe2de317 64649diff --git a/init/do_mounts.c b/init/do_mounts.c
c6e2a6c8 64650index 42b0707..c06eef4 100644
fe2de317
MT
64651--- a/init/do_mounts.c
64652+++ b/init/do_mounts.c
5e856224 64653@@ -326,11 +326,11 @@ static void __init get_fs_names(char *page)
58c5fc13
MT
64654 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
64655 {
5e856224 64656 struct super_block *s;
58c5fc13 64657- int err = sys_mount(name, "/root", fs, flags, data);
6e9df6a3 64658+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
58c5fc13
MT
64659 if (err)
64660 return err;
64661
6e9df6a3 64662- sys_chdir((const char __user __force *)"/root");
5e856224
MT
64663+ sys_chdir((const char __force_user *)"/root");
64664 s = current->fs->pwd.dentry->d_sb;
64665 ROOT_DEV = s->s_dev;
6e9df6a3 64666 printk(KERN_INFO
5e856224 64667@@ -450,18 +450,18 @@ void __init change_floppy(char *fmt, ...)
58c5fc13
MT
64668 va_start(args, fmt);
64669 vsprintf(buf, fmt, args);
64670 va_end(args);
64671- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
64672+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
64673 if (fd >= 0) {
64674 sys_ioctl(fd, FDEJECT, 0);
64675 sys_close(fd);
64676 }
64677 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
64678- fd = sys_open("/dev/console", O_RDWR, 0);
df50ba0c 64679+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
58c5fc13
MT
64680 if (fd >= 0) {
64681 sys_ioctl(fd, TCGETS, (long)&termios);
64682 termios.c_lflag &= ~ICANON;
64683 sys_ioctl(fd, TCSETSF, (long)&termios);
64684- sys_read(fd, &c, 1);
64685+ sys_read(fd, (char __user *)&c, 1);
64686 termios.c_lflag |= ICANON;
64687 sys_ioctl(fd, TCSETSF, (long)&termios);
64688 sys_close(fd);
5e856224 64689@@ -555,6 +555,6 @@ void __init prepare_namespace(void)
58c5fc13
MT
64690 mount_root();
64691 out:
ae4e228f 64692 devtmpfs_mount("dev");
58c5fc13 64693- sys_mount(".", "/", NULL, MS_MOVE, NULL);
6e9df6a3
MT
64694- sys_chroot((const char __user __force *)".");
64695+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64696+ sys_chroot((const char __force_user *)".");
58c5fc13 64697 }
fe2de317
MT
64698diff --git a/init/do_mounts.h b/init/do_mounts.h
64699index f5b978a..69dbfe8 100644
64700--- a/init/do_mounts.h
64701+++ b/init/do_mounts.h
58c5fc13
MT
64702@@ -15,15 +15,15 @@ extern int root_mountflags;
64703
64704 static inline int create_dev(char *name, dev_t dev)
64705 {
64706- sys_unlink(name);
64707- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
6e9df6a3
MT
64708+ sys_unlink((char __force_user *)name);
64709+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
58c5fc13
MT
64710 }
64711
64712 #if BITS_PER_LONG == 32
64713 static inline u32 bstat(char *name)
64714 {
64715 struct stat64 stat;
64716- if (sys_stat64(name, &stat) != 0)
6e9df6a3
MT
64717+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
64718 return 0;
64719 if (!S_ISBLK(stat.st_mode))
64720 return 0;
64721@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
64722 static inline u32 bstat(char *name)
64723 {
64724 struct stat stat;
64725- if (sys_newstat(name, &stat) != 0)
64726+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
58c5fc13
MT
64727 return 0;
64728 if (!S_ISBLK(stat.st_mode))
64729 return 0;
fe2de317 64730diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
c6e2a6c8 64731index 9047330..de0d1fb 100644
fe2de317
MT
64732--- a/init/do_mounts_initrd.c
64733+++ b/init/do_mounts_initrd.c
c6e2a6c8 64734@@ -43,13 +43,13 @@ static void __init handle_initrd(void)
58c5fc13
MT
64735 create_dev("/dev/root.old", Root_RAM0);
64736 /* mount initrd on rootfs' /root */
64737 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
64738- sys_mkdir("/old", 0700);
64739- root_fd = sys_open("/", 0, 0);
64740- old_fd = sys_open("/old", 0, 0);
6e9df6a3
MT
64741+ sys_mkdir((const char __force_user *)"/old", 0700);
64742+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
64743+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
58c5fc13
MT
64744 /* move initrd over / and chdir/chroot in initrd root */
64745- sys_chdir("/root");
64746- sys_mount(".", "/", NULL, MS_MOVE, NULL);
64747- sys_chroot(".");
6e9df6a3
MT
64748+ sys_chdir((const char __force_user *)"/root");
64749+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
64750+ sys_chroot((const char __force_user *)".");
58c5fc13
MT
64751
64752 /*
64753 * In case that a resume from disk is carried out by linuxrc or one of
c6e2a6c8 64754@@ -66,15 +66,15 @@ static void __init handle_initrd(void)
58c5fc13
MT
64755
64756 /* move initrd to rootfs' /old */
64757 sys_fchdir(old_fd);
64758- sys_mount("/", ".", NULL, MS_MOVE, NULL);
6e9df6a3 64759+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
58c5fc13
MT
64760 /* switch root and cwd back to / of rootfs */
64761 sys_fchdir(root_fd);
64762- sys_chroot(".");
6e9df6a3 64763+ sys_chroot((const char __force_user *)".");
58c5fc13
MT
64764 sys_close(old_fd);
64765 sys_close(root_fd);
64766
64767 if (new_decode_dev(real_root_dev) == Root_RAM0) {
64768- sys_chdir("/old");
6e9df6a3 64769+ sys_chdir((const char __force_user *)"/old");
58c5fc13
MT
64770 return;
64771 }
64772
c6e2a6c8 64773@@ -82,17 +82,17 @@ static void __init handle_initrd(void)
58c5fc13
MT
64774 mount_root();
64775
64776 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
64777- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
6e9df6a3 64778+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
58c5fc13
MT
64779 if (!error)
64780 printk("okay\n");
64781 else {
64782- int fd = sys_open("/dev/root.old", O_RDWR, 0);
6e9df6a3 64783+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
58c5fc13
MT
64784 if (error == -ENOENT)
64785 printk("/initrd does not exist. Ignored.\n");
64786 else
64787 printk("failed\n");
64788 printk(KERN_NOTICE "Unmounting old root\n");
64789- sys_umount("/old", MNT_DETACH);
6e9df6a3 64790+ sys_umount((char __force_user *)"/old", MNT_DETACH);
58c5fc13
MT
64791 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
64792 if (fd < 0) {
64793 error = fd;
c6e2a6c8 64794@@ -115,11 +115,11 @@ int __init initrd_load(void)
58c5fc13
MT
64795 * mounted in the normal path.
64796 */
64797 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
64798- sys_unlink("/initrd.image");
6e9df6a3 64799+ sys_unlink((const char __force_user *)"/initrd.image");
58c5fc13
MT
64800 handle_initrd();
64801 return 1;
64802 }
64803 }
64804- sys_unlink("/initrd.image");
6e9df6a3 64805+ sys_unlink((const char __force_user *)"/initrd.image");
58c5fc13
MT
64806 return 0;
64807 }
fe2de317
MT
64808diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
64809index 32c4799..c27ee74 100644
64810--- a/init/do_mounts_md.c
64811+++ b/init/do_mounts_md.c
58c5fc13
MT
64812@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
64813 partitioned ? "_d" : "", minor,
64814 md_setup_args[ent].device_names);
64815
64816- fd = sys_open(name, 0, 0);
6e9df6a3 64817+ fd = sys_open((char __force_user *)name, 0, 0);
58c5fc13
MT
64818 if (fd < 0) {
64819 printk(KERN_ERR "md: open failed - cannot start "
64820 "array %s\n", name);
64821@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
64822 * array without it
64823 */
64824 sys_close(fd);
64825- fd = sys_open(name, 0, 0);
6e9df6a3 64826+ fd = sys_open((char __force_user *)name, 0, 0);
58c5fc13
MT
64827 sys_ioctl(fd, BLKRRPART, 0);
64828 }
64829 sys_close(fd);
6e9df6a3
MT
64830@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
64831
64832 wait_for_device_probe();
64833
64834- fd = sys_open((const char __user __force *) "/dev/md0", 0, 0);
64835+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
64836 if (fd >= 0) {
64837 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
64838 sys_close(fd);
fe2de317 64839diff --git a/init/initramfs.c b/init/initramfs.c
5e856224 64840index 8216c30..25e8e32 100644
fe2de317
MT
64841--- a/init/initramfs.c
64842+++ b/init/initramfs.c
ae4e228f
MT
64843@@ -74,7 +74,7 @@ static void __init free_hash(void)
64844 }
64845 }
64846
64847-static long __init do_utime(char __user *filename, time_t mtime)
64848+static long __init do_utime(__force char __user *filename, time_t mtime)
64849 {
64850 struct timespec t[2];
64851
64852@@ -109,7 +109,7 @@ static void __init dir_utime(void)
64853 struct dir_entry *de, *tmp;
64854 list_for_each_entry_safe(de, tmp, &dir_list, list) {
64855 list_del(&de->list);
64856- do_utime(de->name, de->mtime);
6e9df6a3 64857+ do_utime((char __force_user *)de->name, de->mtime);
ae4e228f
MT
64858 kfree(de->name);
64859 kfree(de);
64860 }
58c5fc13
MT
64861@@ -271,7 +271,7 @@ static int __init maybe_link(void)
64862 if (nlink >= 2) {
64863 char *old = find_link(major, minor, ino, mode, collected);
64864 if (old)
64865- return (sys_link(old, collected) < 0) ? -1 : 1;
6e9df6a3 64866+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
58c5fc13
MT
64867 }
64868 return 0;
64869 }
5e856224 64870@@ -280,11 +280,11 @@ static void __init clean_path(char *path, umode_t mode)
58c5fc13
MT
64871 {
64872 struct stat st;
64873
64874- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
6e9df6a3 64875+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
58c5fc13
MT
64876 if (S_ISDIR(st.st_mode))
64877- sys_rmdir(path);
6e9df6a3 64878+ sys_rmdir((char __force_user *)path);
58c5fc13
MT
64879 else
64880- sys_unlink(path);
6e9df6a3 64881+ sys_unlink((char __force_user *)path);
58c5fc13
MT
64882 }
64883 }
64884
64885@@ -305,7 +305,7 @@ static int __init do_name(void)
64886 int openflags = O_WRONLY|O_CREAT;
64887 if (ml != 1)
64888 openflags |= O_TRUNC;
64889- wfd = sys_open(collected, openflags, mode);
6e9df6a3 64890+ wfd = sys_open((char __force_user *)collected, openflags, mode);
58c5fc13
MT
64891
64892 if (wfd >= 0) {
64893 sys_fchown(wfd, uid, gid);
ae4e228f 64894@@ -317,17 +317,17 @@ static int __init do_name(void)
58c5fc13
MT
64895 }
64896 }
64897 } else if (S_ISDIR(mode)) {
64898- sys_mkdir(collected, mode);
64899- sys_chown(collected, uid, gid);
64900- sys_chmod(collected, mode);
6e9df6a3
MT
64901+ sys_mkdir((char __force_user *)collected, mode);
64902+ sys_chown((char __force_user *)collected, uid, gid);
64903+ sys_chmod((char __force_user *)collected, mode);
58c5fc13
MT
64904 dir_add(collected, mtime);
64905 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
64906 S_ISFIFO(mode) || S_ISSOCK(mode)) {
64907 if (maybe_link() == 0) {
64908- sys_mknod(collected, mode, rdev);
64909- sys_chown(collected, uid, gid);
64910- sys_chmod(collected, mode);
ae4e228f 64911- do_utime(collected, mtime);
6e9df6a3
MT
64912+ sys_mknod((char __force_user *)collected, mode, rdev);
64913+ sys_chown((char __force_user *)collected, uid, gid);
64914+ sys_chmod((char __force_user *)collected, mode);
64915+ do_utime((char __force_user *)collected, mtime);
58c5fc13
MT
64916 }
64917 }
ae4e228f
MT
64918 return 0;
64919@@ -336,15 +336,15 @@ static int __init do_name(void)
58c5fc13
MT
64920 static int __init do_copy(void)
64921 {
64922 if (count >= body_len) {
64923- sys_write(wfd, victim, body_len);
6e9df6a3 64924+ sys_write(wfd, (char __force_user *)victim, body_len);
58c5fc13 64925 sys_close(wfd);
ae4e228f 64926- do_utime(vcollected, mtime);
6e9df6a3 64927+ do_utime((char __force_user *)vcollected, mtime);
58c5fc13 64928 kfree(vcollected);
ae4e228f 64929 eat(body_len);
58c5fc13
MT
64930 state = SkipIt;
64931 return 0;
64932 } else {
64933- sys_write(wfd, victim, count);
6e9df6a3 64934+ sys_write(wfd, (char __force_user *)victim, count);
58c5fc13
MT
64935 body_len -= count;
64936 eat(count);
64937 return 1;
ae4e228f 64938@@ -355,9 +355,9 @@ static int __init do_symlink(void)
58c5fc13
MT
64939 {
64940 collected[N_ALIGN(name_len) + body_len] = '\0';
64941 clean_path(collected, 0);
64942- sys_symlink(collected + N_ALIGN(name_len), collected);
64943- sys_lchown(collected, uid, gid);
ae4e228f 64944- do_utime(collected, mtime);
6e9df6a3
MT
64945+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
64946+ sys_lchown((char __force_user *)collected, uid, gid);
64947+ do_utime((char __force_user *)collected, mtime);
58c5fc13
MT
64948 state = SkipIt;
64949 next_state = Reset;
ae4e228f 64950 return 0;
fe2de317 64951diff --git a/init/main.c b/init/main.c
572b4308 64952index b08c5f7..bf65a52 100644
fe2de317
MT
64953--- a/init/main.c
64954+++ b/init/main.c
c6e2a6c8 64955@@ -95,6 +95,8 @@ static inline void mark_rodata_ro(void) { }
58c5fc13
MT
64956 extern void tc_init(void);
64957 #endif
58c5fc13 64958
16454cff
MT
64959+extern void grsecurity_init(void);
64960+
64961 /*
64962 * Debug helper: via this flag we know that we are in 'early bootup code'
64963 * where only the boot processor is running with IRQ disabled. This means
c6e2a6c8 64964@@ -148,6 +150,49 @@ static int __init set_reset_devices(char *str)
58c5fc13
MT
64965
64966 __setup("reset_devices", set_reset_devices);
64967
df50ba0c 64968+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
bc901d79
MT
64969+extern char pax_enter_kernel_user[];
64970+extern char pax_exit_kernel_user[];
df50ba0c
MT
64971+extern pgdval_t clone_pgd_mask;
64972+#endif
64973+
64974+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58c5fc13
MT
64975+static int __init setup_pax_nouderef(char *str)
64976+{
df50ba0c 64977+#ifdef CONFIG_X86_32
58c5fc13 64978+ unsigned int cpu;
66a7e928 64979+ struct desc_struct *gdt;
58c5fc13 64980+
4c928ab7 64981+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
66a7e928
MT
64982+ gdt = get_cpu_gdt_table(cpu);
64983+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
64984+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
64985+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
64986+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58c5fc13 64987+ }
bc901d79 64988+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
df50ba0c 64989+#else
6892158b
MT
64990+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
64991+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
df50ba0c
MT
64992+ clone_pgd_mask = ~(pgdval_t)0UL;
64993+#endif
58c5fc13
MT
64994+
64995+ return 0;
64996+}
64997+early_param("pax_nouderef", setup_pax_nouderef);
64998+#endif
64999+
65000+#ifdef CONFIG_PAX_SOFTMODE
15a11c5b 65001+int pax_softmode;
58c5fc13
MT
65002+
65003+static int __init setup_pax_softmode(char *str)
65004+{
65005+ get_option(&str, &pax_softmode);
65006+ return 1;
65007+}
65008+__setup("pax_softmode=", setup_pax_softmode);
65009+#endif
65010+
6892158b
MT
65011 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
65012 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58c5fc13 65013 static const char *panic_later, *panic_param;
c6e2a6c8 65014@@ -674,6 +719,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
58c5fc13
MT
65015 {
65016 int count = preempt_count();
6892158b 65017 int ret;
58c5fc13
MT
65018+ const char *msg1 = "", *msg2 = "";
65019
6892158b
MT
65020 if (initcall_debug)
65021 ret = do_one_initcall_debug(fn);
c6e2a6c8 65022@@ -686,15 +732,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
6892158b 65023 sprintf(msgbuf, "error code %d ", ret);
58c5fc13
MT
65024
65025 if (preempt_count() != count) {
65026- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
65027+ msg1 = " preemption imbalance";
65028 preempt_count() = count;
65029 }
65030 if (irqs_disabled()) {
65031- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
65032+ msg2 = " disabled interrupts";
65033 local_irq_enable();
65034 }
65035- if (msgbuf[0]) {
65036- printk("initcall %pF returned with %s\n", fn, msgbuf);
65037+ if (msgbuf[0] || *msg1 || *msg2) {
65038+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
65039 }
65040
6892158b 65041 return ret;
572b4308
MT
65042@@ -747,8 +793,14 @@ static void __init do_initcall_level(int level)
65043 level, level,
65044 repair_env_string);
65045
65046- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
65047+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
65048 do_one_initcall(*fn);
65049+
65050+#ifdef CONFIG_PAX_LATENT_ENTROPY
65051+ transfer_latent_entropy();
65052+#endif
65053+
65054+ }
65055 }
65056
65057 static void __init do_initcalls(void)
65058@@ -782,8 +834,14 @@ static void __init do_pre_smp_initcalls(void)
65059 {
65060 initcall_t *fn;
65061
65062- for (fn = __initcall_start; fn < __initcall0_start; fn++)
65063+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
65064 do_one_initcall(*fn);
65065+
65066+#ifdef CONFIG_PAX_LATENT_ENTROPY
65067+ transfer_latent_entropy();
65068+#endif
65069+
65070+ }
65071 }
65072
65073 static void run_init_process(const char *init_filename)
65074@@ -865,7 +923,7 @@ static int __init kernel_init(void * unused)
df50ba0c
MT
65075 do_basic_setup();
65076
65077 /* Open the /dev/console on the rootfs, this should never fail */
65078- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
6e9df6a3 65079+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
df50ba0c
MT
65080 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
65081
65082 (void) sys_dup(0);
572b4308 65083@@ -878,11 +936,13 @@ static int __init kernel_init(void * unused)
ae4e228f
MT
65084 if (!ramdisk_execute_command)
65085 ramdisk_execute_command = "/init";
65086
65087- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
6e9df6a3 65088+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
ae4e228f 65089 ramdisk_execute_command = NULL;
58c5fc13
MT
65090 prepare_namespace();
65091 }
65092
65093+ grsecurity_init();
65094+
65095 /*
65096 * Ok, we have completed the initial bootup, and
65097 * we're essentially up and running. Get rid of the
fe2de317 65098diff --git a/ipc/mqueue.c b/ipc/mqueue.c
c6e2a6c8 65099index 28bd64d..c66b72a 100644
fe2de317
MT
65100--- a/ipc/mqueue.c
65101+++ b/ipc/mqueue.c
65102@@ -156,6 +156,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
6e9df6a3
MT
65103 mq_bytes = (mq_msg_tblsz +
65104 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
65105
65106+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
65107 spin_lock(&mq_lock);
65108 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
5e856224 65109 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
fe2de317 65110diff --git a/ipc/msg.c b/ipc/msg.c
4c928ab7 65111index 7385de2..a8180e08 100644
fe2de317
MT
65112--- a/ipc/msg.c
65113+++ b/ipc/msg.c
65114@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
15a11c5b
MT
65115 return security_msg_queue_associate(msq, msgflg);
65116 }
65117
65118+static struct ipc_ops msg_ops = {
65119+ .getnew = newque,
65120+ .associate = msg_security,
65121+ .more_checks = NULL
65122+};
65123+
65124 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
65125 {
65126 struct ipc_namespace *ns;
65127- struct ipc_ops msg_ops;
65128 struct ipc_params msg_params;
65129
65130 ns = current->nsproxy->ipc_ns;
65131
65132- msg_ops.getnew = newque;
65133- msg_ops.associate = msg_security;
65134- msg_ops.more_checks = NULL;
65135-
65136 msg_params.key = key;
65137 msg_params.flg = msgflg;
65138
fe2de317 65139diff --git a/ipc/sem.c b/ipc/sem.c
4c928ab7 65140index 5215a81..cfc0cac 100644
fe2de317
MT
65141--- a/ipc/sem.c
65142+++ b/ipc/sem.c
4c928ab7 65143@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
15a11c5b
MT
65144 return 0;
65145 }
65146
65147+static struct ipc_ops sem_ops = {
65148+ .getnew = newary,
65149+ .associate = sem_security,
65150+ .more_checks = sem_more_checks
65151+};
65152+
65153 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
65154 {
65155 struct ipc_namespace *ns;
65156- struct ipc_ops sem_ops;
65157 struct ipc_params sem_params;
65158
65159 ns = current->nsproxy->ipc_ns;
4c928ab7 65160@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
15a11c5b
MT
65161 if (nsems < 0 || nsems > ns->sc_semmsl)
65162 return -EINVAL;
65163
65164- sem_ops.getnew = newary;
65165- sem_ops.associate = sem_security;
65166- sem_ops.more_checks = sem_more_checks;
65167-
65168 sem_params.key = key;
65169 sem_params.flg = semflg;
65170 sem_params.u.nsems = nsems;
fe2de317 65171diff --git a/ipc/shm.c b/ipc/shm.c
c6e2a6c8 65172index 406c5b2..bc66d67 100644
fe2de317
MT
65173--- a/ipc/shm.c
65174+++ b/ipc/shm.c
65175@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
58c5fc13
MT
65176 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
65177 #endif
65178
65179+#ifdef CONFIG_GRKERNSEC
65180+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65181+ const time_t shm_createtime, const uid_t cuid,
65182+ const int shmid);
65183+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
65184+ const time_t shm_createtime);
65185+#endif
65186+
65187 void shm_init_ns(struct ipc_namespace *ns)
65188 {
65189 ns->shm_ctlmax = SHMMAX;
fe2de317 65190@@ -508,6 +516,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
58c5fc13
MT
65191 shp->shm_lprid = 0;
65192 shp->shm_atim = shp->shm_dtim = 0;
65193 shp->shm_ctim = get_seconds();
65194+#ifdef CONFIG_GRKERNSEC
65195+ {
65196+ struct timespec timeval;
65197+ do_posix_clock_monotonic_gettime(&timeval);
65198+
65199+ shp->shm_createtime = timeval.tv_sec;
65200+ }
65201+#endif
65202 shp->shm_segsz = size;
65203 shp->shm_nattch = 0;
65204 shp->shm_file = file;
fe2de317 65205@@ -559,18 +575,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
15a11c5b
MT
65206 return 0;
65207 }
65208
65209+static struct ipc_ops shm_ops = {
65210+ .getnew = newseg,
65211+ .associate = shm_security,
65212+ .more_checks = shm_more_checks
65213+};
65214+
65215 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
65216 {
65217 struct ipc_namespace *ns;
65218- struct ipc_ops shm_ops;
65219 struct ipc_params shm_params;
65220
65221 ns = current->nsproxy->ipc_ns;
65222
65223- shm_ops.getnew = newseg;
65224- shm_ops.associate = shm_security;
65225- shm_ops.more_checks = shm_more_checks;
65226-
65227 shm_params.key = key;
65228 shm_params.flg = shmflg;
65229 shm_params.u.size = size;
4c928ab7
MT
65230@@ -988,6 +1005,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
65231 f_mode = FMODE_READ | FMODE_WRITE;
65232 }
65233 if (shmflg & SHM_EXEC) {
65234+
65235+#ifdef CONFIG_PAX_MPROTECT
65236+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
65237+ goto out;
65238+#endif
65239+
65240 prot |= PROT_EXEC;
65241 acc_mode |= S_IXUGO;
65242 }
65243@@ -1011,9 +1034,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
58c5fc13
MT
65244 if (err)
65245 goto out_unlock;
65246
65247+#ifdef CONFIG_GRKERNSEC
65248+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
65249+ shp->shm_perm.cuid, shmid) ||
65250+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
65251+ err = -EACCES;
65252+ goto out_unlock;
65253+ }
65254+#endif
65255+
ae4e228f
MT
65256 path = shp->shm_file->f_path;
65257 path_get(&path);
58c5fc13
MT
65258 shp->shm_nattch++;
65259+#ifdef CONFIG_GRKERNSEC
65260+ shp->shm_lapid = current->pid;
65261+#endif
65262 size = i_size_read(path.dentry->d_inode);
65263 shm_unlock(shp);
65264
fe2de317 65265diff --git a/kernel/acct.c b/kernel/acct.c
5e856224 65266index 02e6167..54824f7 100644
fe2de317
MT
65267--- a/kernel/acct.c
65268+++ b/kernel/acct.c
5e856224 65269@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
58c5fc13
MT
65270 */
65271 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
65272 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
65273- file->f_op->write(file, (char *)&ac,
6e9df6a3 65274+ file->f_op->write(file, (char __force_user *)&ac,
58c5fc13
MT
65275 sizeof(acct_t), &file->f_pos);
65276 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
65277 set_fs(fs);
fe2de317 65278diff --git a/kernel/audit.c b/kernel/audit.c
c6e2a6c8 65279index 1c7f2c6..9ba5359 100644
fe2de317
MT
65280--- a/kernel/audit.c
65281+++ b/kernel/audit.c
6e9df6a3 65282@@ -115,7 +115,7 @@ u32 audit_sig_sid = 0;
8308f9c9
MT
65283 3) suppressed due to audit_rate_limit
65284 4) suppressed due to audit_backlog_limit
65285 */
65286-static atomic_t audit_lost = ATOMIC_INIT(0);
65287+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
65288
65289 /* The netlink socket. */
65290 static struct sock *audit_sock;
6e9df6a3 65291@@ -237,7 +237,7 @@ void audit_log_lost(const char *message)
8308f9c9
MT
65292 unsigned long now;
65293 int print;
65294
65295- atomic_inc(&audit_lost);
65296+ atomic_inc_unchecked(&audit_lost);
65297
65298 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
65299
6e9df6a3 65300@@ -256,7 +256,7 @@ void audit_log_lost(const char *message)
8308f9c9
MT
65301 printk(KERN_WARNING
65302 "audit: audit_lost=%d audit_rate_limit=%d "
65303 "audit_backlog_limit=%d\n",
65304- atomic_read(&audit_lost),
65305+ atomic_read_unchecked(&audit_lost),
65306 audit_rate_limit,
65307 audit_backlog_limit);
65308 audit_panic(message);
fe2de317 65309@@ -689,7 +689,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
8308f9c9
MT
65310 status_set.pid = audit_pid;
65311 status_set.rate_limit = audit_rate_limit;
65312 status_set.backlog_limit = audit_backlog_limit;
65313- status_set.lost = atomic_read(&audit_lost);
65314+ status_set.lost = atomic_read_unchecked(&audit_lost);
65315 status_set.backlog = skb_queue_len(&audit_skb_queue);
65316 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
65317 &status_set, sizeof(status_set));
fe2de317 65318diff --git a/kernel/auditsc.c b/kernel/auditsc.c
5e856224 65319index af1de0f..06dfe57 100644
fe2de317
MT
65320--- a/kernel/auditsc.c
65321+++ b/kernel/auditsc.c
5e856224 65322@@ -2288,7 +2288,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
8308f9c9
MT
65323 }
65324
65325 /* global counter which is incremented every time something logs in */
65326-static atomic_t session_id = ATOMIC_INIT(0);
65327+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
65328
65329 /**
5e856224
MT
65330 * audit_set_loginuid - set current task's audit_context loginuid
65331@@ -2312,7 +2312,7 @@ int audit_set_loginuid(uid_t loginuid)
65332 return -EPERM;
65333 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
8308f9c9 65334
5e856224
MT
65335- sessionid = atomic_inc_return(&session_id);
65336+ sessionid = atomic_inc_return_unchecked(&session_id);
8308f9c9 65337 if (context && context->in_syscall) {
5e856224
MT
65338 struct audit_buffer *ab;
65339
fe2de317 65340diff --git a/kernel/capability.c b/kernel/capability.c
5e856224 65341index 3f1adb6..c564db0 100644
fe2de317
MT
65342--- a/kernel/capability.c
65343+++ b/kernel/capability.c
65344@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
ae4e228f
MT
65345 * before modification is attempted and the application
65346 * fails.
65347 */
65348+ if (tocopy > ARRAY_SIZE(kdata))
65349+ return -EFAULT;
65350+
65351 if (copy_to_user(dataptr, kdata, tocopy
65352 * sizeof(struct __user_cap_data_struct))) {
65353 return -EFAULT;
5e856224
MT
65354@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
65355 int ret;
65356
65357 rcu_read_lock();
65358- ret = security_capable(__task_cred(t), ns, cap);
65359+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
65360+ gr_task_is_capable(t, __task_cred(t), cap);
65361 rcu_read_unlock();
65362
65363- return (ret == 0);
65364+ return ret;
65365 }
65366
65367 /**
65368@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
65369 int ret;
65370
65371 rcu_read_lock();
65372- ret = security_capable_noaudit(__task_cred(t), ns, cap);
65373+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
65374 rcu_read_unlock();
65375
65376- return (ret == 0);
65377+ return ret;
65378 }
65379
65380 /**
65381@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
58c5fc13
MT
65382 BUG();
65383 }
65384
5e856224
MT
65385- if (security_capable(current_cred(), ns, cap) == 0) {
65386+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
ae4e228f 65387 current->flags |= PF_SUPERPRIV;
66a7e928 65388 return true;
ae4e228f 65389 }
5e856224 65390@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
ae4e228f 65391 }
66a7e928
MT
65392 EXPORT_SYMBOL(ns_capable);
65393
65394+bool ns_capable_nolog(struct user_namespace *ns, int cap)
bc901d79
MT
65395+{
65396+ if (unlikely(!cap_valid(cap))) {
65397+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
65398+ BUG();
65399+ }
65400+
5e856224 65401+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
bc901d79 65402+ current->flags |= PF_SUPERPRIV;
66a7e928 65403+ return true;
bc901d79 65404+ }
66a7e928 65405+ return false;
bc901d79 65406+}
66a7e928 65407+EXPORT_SYMBOL(ns_capable_nolog);
66a7e928
MT
65408+
65409 /**
5e856224
MT
65410 * capable - Determine if the current task has a superior capability in effect
65411 * @cap: The capability to be tested for
65412@@ -408,6 +427,12 @@ bool capable(int cap)
66a7e928 65413 }
5e856224 65414 EXPORT_SYMBOL(capable);
66a7e928 65415
5e856224 65416+bool capable_nolog(int cap)
66a7e928 65417+{
5e856224 65418+ return ns_capable_nolog(&init_user_ns, cap);
66a7e928 65419+}
5e856224 65420+EXPORT_SYMBOL(capable_nolog);
66a7e928
MT
65421+
65422 /**
65423 * nsown_capable - Check superior capability to one's own user_ns
65424 * @cap: The capability in question
fe2de317 65425diff --git a/kernel/compat.c b/kernel/compat.c
c6e2a6c8 65426index d2c67aa..a629b2e 100644
fe2de317
MT
65427--- a/kernel/compat.c
65428+++ b/kernel/compat.c
57199397
MT
65429@@ -13,6 +13,7 @@
65430
65431 #include <linux/linkage.h>
65432 #include <linux/compat.h>
65433+#include <linux/module.h>
65434 #include <linux/errno.h>
65435 #include <linux/time.h>
65436 #include <linux/signal.h>
c6e2a6c8 65437@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
6e9df6a3
MT
65438 mm_segment_t oldfs;
65439 long ret;
65440
65441- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
65442+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
65443 oldfs = get_fs();
65444 set_fs(KERNEL_DS);
65445 ret = hrtimer_nanosleep_restart(restart);
c6e2a6c8 65446@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
6e9df6a3
MT
65447 oldfs = get_fs();
65448 set_fs(KERNEL_DS);
65449 ret = hrtimer_nanosleep(&tu,
65450- rmtp ? (struct timespec __user *)&rmt : NULL,
65451+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
65452 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
65453 set_fs(oldfs);
65454
c6e2a6c8 65455@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
6e9df6a3
MT
65456 mm_segment_t old_fs = get_fs();
65457
65458 set_fs(KERNEL_DS);
65459- ret = sys_sigpending((old_sigset_t __user *) &s);
65460+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
65461 set_fs(old_fs);
65462 if (ret == 0)
65463 ret = put_user(s, set);
c6e2a6c8 65464@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
6e9df6a3
MT
65465 mm_segment_t old_fs = get_fs();
65466
65467 set_fs(KERNEL_DS);
65468- ret = sys_old_getrlimit(resource, &r);
65469+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
65470 set_fs(old_fs);
65471
65472 if (!ret) {
c6e2a6c8 65473@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
6e9df6a3
MT
65474 mm_segment_t old_fs = get_fs();
65475
65476 set_fs(KERNEL_DS);
65477- ret = sys_getrusage(who, (struct rusage __user *) &r);
65478+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
65479 set_fs(old_fs);
65480
65481 if (ret)
c6e2a6c8 65482@@ -550,8 +551,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
6e9df6a3
MT
65483 set_fs (KERNEL_DS);
65484 ret = sys_wait4(pid,
65485 (stat_addr ?
65486- (unsigned int __user *) &status : NULL),
65487- options, (struct rusage __user *) &r);
65488+ (unsigned int __force_user *) &status : NULL),
65489+ options, (struct rusage __force_user *) &r);
65490 set_fs (old_fs);
65491
65492 if (ret > 0) {
c6e2a6c8 65493@@ -576,8 +577,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
6e9df6a3
MT
65494 memset(&info, 0, sizeof(info));
65495
65496 set_fs(KERNEL_DS);
65497- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
65498- uru ? (struct rusage __user *)&ru : NULL);
65499+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
65500+ uru ? (struct rusage __force_user *)&ru : NULL);
65501 set_fs(old_fs);
65502
65503 if ((ret < 0) || (info.si_signo == 0))
c6e2a6c8 65504@@ -707,8 +708,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
6e9df6a3
MT
65505 oldfs = get_fs();
65506 set_fs(KERNEL_DS);
65507 err = sys_timer_settime(timer_id, flags,
65508- (struct itimerspec __user *) &newts,
65509- (struct itimerspec __user *) &oldts);
65510+ (struct itimerspec __force_user *) &newts,
65511+ (struct itimerspec __force_user *) &oldts);
65512 set_fs(oldfs);
65513 if (!err && old && put_compat_itimerspec(old, &oldts))
65514 return -EFAULT;
c6e2a6c8 65515@@ -725,7 +726,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
6e9df6a3
MT
65516 oldfs = get_fs();
65517 set_fs(KERNEL_DS);
65518 err = sys_timer_gettime(timer_id,
65519- (struct itimerspec __user *) &ts);
65520+ (struct itimerspec __force_user *) &ts);
65521 set_fs(oldfs);
65522 if (!err && put_compat_itimerspec(setting, &ts))
65523 return -EFAULT;
c6e2a6c8 65524@@ -744,7 +745,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
6e9df6a3
MT
65525 oldfs = get_fs();
65526 set_fs(KERNEL_DS);
65527 err = sys_clock_settime(which_clock,
65528- (struct timespec __user *) &ts);
65529+ (struct timespec __force_user *) &ts);
65530 set_fs(oldfs);
65531 return err;
65532 }
c6e2a6c8 65533@@ -759,7 +760,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
6e9df6a3
MT
65534 oldfs = get_fs();
65535 set_fs(KERNEL_DS);
65536 err = sys_clock_gettime(which_clock,
65537- (struct timespec __user *) &ts);
65538+ (struct timespec __force_user *) &ts);
65539 set_fs(oldfs);
65540 if (!err && put_compat_timespec(&ts, tp))
65541 return -EFAULT;
c6e2a6c8 65542@@ -779,7 +780,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
6e9df6a3
MT
65543
65544 oldfs = get_fs();
65545 set_fs(KERNEL_DS);
65546- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
65547+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
65548 set_fs(oldfs);
65549
65550 err = compat_put_timex(utp, &txc);
c6e2a6c8 65551@@ -799,7 +800,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
6e9df6a3
MT
65552 oldfs = get_fs();
65553 set_fs(KERNEL_DS);
65554 err = sys_clock_getres(which_clock,
65555- (struct timespec __user *) &ts);
65556+ (struct timespec __force_user *) &ts);
65557 set_fs(oldfs);
65558 if (!err && tp && put_compat_timespec(&ts, tp))
65559 return -EFAULT;
c6e2a6c8 65560@@ -811,9 +812,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
6e9df6a3
MT
65561 long err;
65562 mm_segment_t oldfs;
65563 struct timespec tu;
65564- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
65565+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
65566
65567- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
65568+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
65569 oldfs = get_fs();
65570 set_fs(KERNEL_DS);
65571 err = clock_nanosleep_restart(restart);
c6e2a6c8 65572@@ -845,8 +846,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
6e9df6a3
MT
65573 oldfs = get_fs();
65574 set_fs(KERNEL_DS);
65575 err = sys_clock_nanosleep(which_clock, flags,
65576- (struct timespec __user *) &in,
65577- (struct timespec __user *) &out);
65578+ (struct timespec __force_user *) &in,
65579+ (struct timespec __force_user *) &out);
65580 set_fs(oldfs);
65581
65582 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
fe2de317
MT
65583diff --git a/kernel/configs.c b/kernel/configs.c
65584index 42e8fa0..9e7406b 100644
65585--- a/kernel/configs.c
65586+++ b/kernel/configs.c
bc901d79 65587@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
58c5fc13
MT
65588 struct proc_dir_entry *entry;
65589
65590 /* create the current config file */
65591+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
65592+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
65593+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
65594+ &ikconfig_file_ops);
65595+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65596+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
65597+ &ikconfig_file_ops);
65598+#endif
65599+#else
65600 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
65601 &ikconfig_file_ops);
65602+#endif
65603+
65604 if (!entry)
65605 return -ENOMEM;
65606
fe2de317 65607diff --git a/kernel/cred.c b/kernel/cred.c
c6e2a6c8 65608index e70683d..27761b6 100644
fe2de317
MT
65609--- a/kernel/cred.c
65610+++ b/kernel/cred.c
c6e2a6c8 65611@@ -205,6 +205,15 @@ void exit_creds(struct task_struct *tsk)
4c928ab7
MT
65612 validate_creds(cred);
65613 put_cred(cred);
65614 }
66a7e928 65615+
4c928ab7
MT
65616+#ifdef CONFIG_GRKERNSEC_SETXID
65617+ cred = (struct cred *) tsk->delayed_cred;
65618+ if (cred) {
65619+ tsk->delayed_cred = NULL;
65620+ validate_creds(cred);
65621+ put_cred(cred);
65622+ }
65623+#endif
65624 }
66a7e928 65625
4c928ab7 65626 /**
c6e2a6c8 65627@@ -473,7 +482,7 @@ error_put:
4c928ab7
MT
65628 * Always returns 0 thus allowing this function to be tail-called at the end
65629 * of, say, sys_setgid().
65630 */
65631-int commit_creds(struct cred *new)
65632+static int __commit_creds(struct cred *new)
66a7e928 65633 {
66a7e928
MT
65634 struct task_struct *task = current;
65635 const struct cred *old = task->real_cred;
c6e2a6c8 65636@@ -492,6 +501,8 @@ int commit_creds(struct cred *new)
58c5fc13
MT
65637
65638 get_cred(new); /* we will require a ref for the subj creds too */
65639
65640+ gr_set_role_label(task, new->uid, new->gid);
65641+
65642 /* dumpability changes */
65643 if (old->euid != new->euid ||
65644 old->egid != new->egid ||
c6e2a6c8 65645@@ -541,6 +552,101 @@ int commit_creds(struct cred *new)
4c928ab7
MT
65646 put_cred(old);
65647 return 0;
65648 }
65649+#ifdef CONFIG_GRKERNSEC_SETXID
65650+extern int set_user(struct cred *new);
66a7e928 65651+
4c928ab7
MT
65652+void gr_delayed_cred_worker(void)
65653+{
65654+ const struct cred *new = current->delayed_cred;
65655+ struct cred *ncred;
66a7e928 65656+
4c928ab7 65657+ current->delayed_cred = NULL;
66a7e928 65658+
4c928ab7
MT
65659+ if (current_uid() && new != NULL) {
65660+ // from doing get_cred on it when queueing this
65661+ put_cred(new);
65662+ return;
65663+ } else if (new == NULL)
65664+ return;
66a7e928 65665+
4c928ab7
MT
65666+ ncred = prepare_creds();
65667+ if (!ncred)
65668+ goto die;
65669+ // uids
65670+ ncred->uid = new->uid;
65671+ ncred->euid = new->euid;
65672+ ncred->suid = new->suid;
65673+ ncred->fsuid = new->fsuid;
65674+ // gids
65675+ ncred->gid = new->gid;
65676+ ncred->egid = new->egid;
65677+ ncred->sgid = new->sgid;
65678+ ncred->fsgid = new->fsgid;
65679+ // groups
65680+ if (set_groups(ncred, new->group_info) < 0) {
65681+ abort_creds(ncred);
65682+ goto die;
65683+ }
65684+ // caps
65685+ ncred->securebits = new->securebits;
65686+ ncred->cap_inheritable = new->cap_inheritable;
65687+ ncred->cap_permitted = new->cap_permitted;
65688+ ncred->cap_effective = new->cap_effective;
65689+ ncred->cap_bset = new->cap_bset;
65690+
65691+ if (set_user(ncred)) {
65692+ abort_creds(ncred);
65693+ goto die;
65694+ }
66a7e928 65695+
4c928ab7
MT
65696+ // from doing get_cred on it when queueing this
65697+ put_cred(new);
66a7e928 65698+
4c928ab7
MT
65699+ __commit_creds(ncred);
65700+ return;
65701+die:
65702+ // from doing get_cred on it when queueing this
65703+ put_cred(new);
65704+ do_group_exit(SIGKILL);
65705+}
65706+#endif
65707+
65708+int commit_creds(struct cred *new)
65709+{
65710+#ifdef CONFIG_GRKERNSEC_SETXID
5e856224
MT
65711+ int ret;
65712+ int schedule_it = 0;
4c928ab7
MT
65713+ struct task_struct *t;
65714+
65715+ /* we won't get called with tasklist_lock held for writing
65716+ and interrupts disabled as the cred struct in that case is
65717+ init_cred
65718+ */
65719+ if (grsec_enable_setxid && !current_is_single_threaded() &&
65720+ !current_uid() && new->uid) {
5e856224
MT
65721+ schedule_it = 1;
65722+ }
65723+ ret = __commit_creds(new);
65724+ if (schedule_it) {
4c928ab7
MT
65725+ rcu_read_lock();
65726+ read_lock(&tasklist_lock);
65727+ for (t = next_thread(current); t != current;
65728+ t = next_thread(t)) {
65729+ if (t->delayed_cred == NULL) {
65730+ t->delayed_cred = get_cred(new);
5e856224 65731+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
4c928ab7
MT
65732+ set_tsk_need_resched(t);
65733+ }
65734+ }
65735+ read_unlock(&tasklist_lock);
65736+ rcu_read_unlock();
65737+ }
5e856224
MT
65738+ return ret;
65739+#else
4c928ab7 65740+ return __commit_creds(new);
5e856224 65741+#endif
4c928ab7
MT
65742+}
65743+
65744 EXPORT_SYMBOL(commit_creds);
65745
65746 /**
fe2de317 65747diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
c6e2a6c8 65748index 0557f24..1a00d9a 100644
fe2de317
MT
65749--- a/kernel/debug/debug_core.c
65750+++ b/kernel/debug/debug_core.c
c6e2a6c8 65751@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
8308f9c9
MT
65752 */
65753 static atomic_t masters_in_kgdb;
65754 static atomic_t slaves_in_kgdb;
65755-static atomic_t kgdb_break_tasklet_var;
65756+static atomic_unchecked_t kgdb_break_tasklet_var;
65757 atomic_t kgdb_setting_breakpoint;
65758
65759 struct task_struct *kgdb_usethread;
c6e2a6c8 65760@@ -132,7 +132,7 @@ int kgdb_single_step;
8308f9c9
MT
65761 static pid_t kgdb_sstep_pid;
65762
65763 /* to keep track of the CPU which is doing the single stepping*/
65764-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65765+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
65766
65767 /*
65768 * If you are debugging a problem where roundup (the collection of
c6e2a6c8 65769@@ -540,7 +540,7 @@ return_normal:
8308f9c9
MT
65770 * kernel will only try for the value of sstep_tries before
65771 * giving up and continuing on.
65772 */
65773- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
65774+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
65775 (kgdb_info[cpu].task &&
65776 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
65777 atomic_set(&kgdb_active, -1);
c6e2a6c8 65778@@ -634,8 +634,8 @@ cpu_master_loop:
8308f9c9
MT
65779 }
65780
65781 kgdb_restore:
65782- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
65783- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
65784+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
65785+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
65786 if (kgdb_info[sstep_cpu].task)
65787 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
65788 else
c6e2a6c8 65789@@ -861,18 +861,18 @@ static void kgdb_unregister_callbacks(void)
8308f9c9
MT
65790 static void kgdb_tasklet_bpt(unsigned long ing)
65791 {
65792 kgdb_breakpoint();
65793- atomic_set(&kgdb_break_tasklet_var, 0);
65794+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
65795 }
65796
65797 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
65798
65799 void kgdb_schedule_breakpoint(void)
65800 {
65801- if (atomic_read(&kgdb_break_tasklet_var) ||
65802+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
65803 atomic_read(&kgdb_active) != -1 ||
65804 atomic_read(&kgdb_setting_breakpoint))
65805 return;
65806- atomic_inc(&kgdb_break_tasklet_var);
65807+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
65808 tasklet_schedule(&kgdb_tasklet_breakpoint);
65809 }
65810 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
fe2de317 65811diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
c6e2a6c8 65812index 67b847d..93834dd 100644
fe2de317
MT
65813--- a/kernel/debug/kdb/kdb_main.c
65814+++ b/kernel/debug/kdb/kdb_main.c
c6e2a6c8 65815@@ -1983,7 +1983,7 @@ static int kdb_lsmod(int argc, const char **argv)
57199397
MT
65816 list_for_each_entry(mod, kdb_modules, list) {
65817
65818 kdb_printf("%-20s%8u 0x%p ", mod->name,
65819- mod->core_size, (void *)mod);
65820+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
65821 #ifdef CONFIG_MODULE_UNLOAD
5e856224 65822 kdb_printf("%4ld ", module_refcount(mod));
57199397 65823 #endif
c6e2a6c8 65824@@ -1993,7 +1993,7 @@ static int kdb_lsmod(int argc, const char **argv)
57199397
MT
65825 kdb_printf(" (Loading)");
65826 else
65827 kdb_printf(" (Live)");
65828- kdb_printf(" 0x%p", mod->module_core);
65829+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
65830
65831 #ifdef CONFIG_MODULE_UNLOAD
65832 {
fe2de317 65833diff --git a/kernel/events/core.c b/kernel/events/core.c
c6e2a6c8 65834index fd126f8..70b755b 100644
fe2de317
MT
65835--- a/kernel/events/core.c
65836+++ b/kernel/events/core.c
c6e2a6c8 65837@@ -181,7 +181,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
15a11c5b
MT
65838 return 0;
65839 }
65840
65841-static atomic64_t perf_event_id;
65842+static atomic64_unchecked_t perf_event_id;
65843
65844 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
65845 enum event_type_t event_type);
c6e2a6c8 65846@@ -2659,7 +2659,7 @@ static void __perf_event_read(void *info)
15a11c5b
MT
65847
65848 static inline u64 perf_event_count(struct perf_event *event)
65849 {
65850- return local64_read(&event->count) + atomic64_read(&event->child_count);
65851+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
65852 }
65853
65854 static u64 perf_event_read(struct perf_event *event)
c6e2a6c8 65855@@ -2983,9 +2983,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
15a11c5b
MT
65856 mutex_lock(&event->child_mutex);
65857 total += perf_event_read(event);
65858 *enabled += event->total_time_enabled +
65859- atomic64_read(&event->child_total_time_enabled);
65860+ atomic64_read_unchecked(&event->child_total_time_enabled);
65861 *running += event->total_time_running +
65862- atomic64_read(&event->child_total_time_running);
65863+ atomic64_read_unchecked(&event->child_total_time_running);
65864
65865 list_for_each_entry(child, &event->child_list, child_list) {
65866 total += perf_event_read(child);
c6e2a6c8 65867@@ -3393,10 +3393,10 @@ void perf_event_update_userpage(struct perf_event *event)
15a11c5b
MT
65868 userpg->offset -= local64_read(&event->hw.prev_count);
65869
6e9df6a3 65870 userpg->time_enabled = enabled +
15a11c5b
MT
65871- atomic64_read(&event->child_total_time_enabled);
65872+ atomic64_read_unchecked(&event->child_total_time_enabled);
65873
6e9df6a3 65874 userpg->time_running = running +
15a11c5b
MT
65875- atomic64_read(&event->child_total_time_running);
65876+ atomic64_read_unchecked(&event->child_total_time_running);
65877
c6e2a6c8
MT
65878 arch_perf_update_userpage(userpg, now);
65879
65880@@ -3829,11 +3829,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
15a11c5b
MT
65881 values[n++] = perf_event_count(event);
65882 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
65883 values[n++] = enabled +
65884- atomic64_read(&event->child_total_time_enabled);
65885+ atomic64_read_unchecked(&event->child_total_time_enabled);
65886 }
65887 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
65888 values[n++] = running +
65889- atomic64_read(&event->child_total_time_running);
65890+ atomic64_read_unchecked(&event->child_total_time_running);
65891 }
65892 if (read_format & PERF_FORMAT_ID)
65893 values[n++] = primary_event_id(event);
c6e2a6c8 65894@@ -4511,12 +4511,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
15a11c5b
MT
65895 * need to add enough zero bytes after the string to handle
65896 * the 64bit alignment we do later.
65897 */
65898- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
65899+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
65900 if (!buf) {
65901 name = strncpy(tmp, "//enomem", sizeof(tmp));
65902 goto got_name;
65903 }
65904- name = d_path(&file->f_path, buf, PATH_MAX);
65905+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
65906 if (IS_ERR(name)) {
65907 name = strncpy(tmp, "//toolong", sizeof(tmp));
65908 goto got_name;
c6e2a6c8 65909@@ -5929,7 +5929,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
15a11c5b
MT
65910 event->parent = parent_event;
65911
65912 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65913- event->id = atomic64_inc_return(&perf_event_id);
65914+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
65915
65916 event->state = PERF_EVENT_STATE_INACTIVE;
65917
c6e2a6c8 65918@@ -6491,10 +6491,10 @@ static void sync_child_event(struct perf_event *child_event,
15a11c5b
MT
65919 /*
65920 * Add back the child's count to the parent's count:
65921 */
65922- atomic64_add(child_val, &parent_event->child_count);
65923- atomic64_add(child_event->total_time_enabled,
65924+ atomic64_add_unchecked(child_val, &parent_event->child_count);
65925+ atomic64_add_unchecked(child_event->total_time_enabled,
65926 &parent_event->child_total_time_enabled);
65927- atomic64_add(child_event->total_time_running,
65928+ atomic64_add_unchecked(child_event->total_time_running,
65929 &parent_event->child_total_time_running);
65930
65931 /*
fe2de317 65932diff --git a/kernel/exit.c b/kernel/exit.c
572b4308 65933index 9d81012..d7911f1 100644
fe2de317
MT
65934--- a/kernel/exit.c
65935+++ b/kernel/exit.c
c6e2a6c8 65936@@ -59,6 +59,10 @@
57199397 65937 #include <asm/pgtable.h>
58c5fc13 65938 #include <asm/mmu_context.h>
58c5fc13
MT
65939
65940+#ifdef CONFIG_GRKERNSEC
65941+extern rwlock_t grsec_exec_file_lock;
65942+#endif
65943+
65944 static void exit_mm(struct task_struct * tsk);
65945
57199397 65946 static void __unhash_process(struct task_struct *p, bool group_dead)
c6e2a6c8 65947@@ -170,6 +174,10 @@ void release_task(struct task_struct * p)
58c5fc13
MT
65948 struct task_struct *leader;
65949 int zap_leader;
65950 repeat:
15a11c5b 65951+#ifdef CONFIG_NET
58c5fc13 65952+ gr_del_task_from_ip_table(p);
15a11c5b 65953+#endif
58c5fc13 65954+
58c5fc13 65955 /* don't need to get the RCU readlock here - the process is dead and
df50ba0c 65956 * can't be modifying its own credentials. But shut RCU-lockdep up */
6e9df6a3 65957 rcu_read_lock();
c6e2a6c8 65958@@ -382,7 +390,7 @@ int allow_signal(int sig)
ae4e228f
MT
65959 * know it'll be handled, so that they don't get converted to
65960 * SIGKILL or just silently dropped.
65961 */
65962- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
65963+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
65964 recalc_sigpending();
65965 spin_unlock_irq(&current->sighand->siglock);
65966 return 0;
c6e2a6c8 65967@@ -418,6 +426,17 @@ void daemonize(const char *name, ...)
58c5fc13
MT
65968 vsnprintf(current->comm, sizeof(current->comm), name, args);
65969 va_end(args);
65970
65971+#ifdef CONFIG_GRKERNSEC
65972+ write_lock(&grsec_exec_file_lock);
65973+ if (current->exec_file) {
65974+ fput(current->exec_file);
65975+ current->exec_file = NULL;
65976+ }
65977+ write_unlock(&grsec_exec_file_lock);
65978+#endif
65979+
65980+ gr_set_kernel_label(current);
65981+
65982 /*
65983 * If we were started as result of loading a module, close all of the
65984 * user space pages. We don't need them, and if we didn't close them
572b4308 65985@@ -901,6 +920,8 @@ void do_exit(long code)
bc901d79
MT
65986 struct task_struct *tsk = current;
65987 int group_dead;
65988
6e9df6a3
MT
65989+ set_fs(USER_DS);
65990+
65991 profile_task_exit(tsk);
bc901d79 65992
6e9df6a3 65993 WARN_ON(blk_needs_flush_plug(tsk));
572b4308 65994@@ -917,7 +938,6 @@ void do_exit(long code)
6e9df6a3
MT
65995 * mm_release()->clear_child_tid() from writing to a user-controlled
65996 * kernel address.
bc901d79 65997 */
6e9df6a3 65998- set_fs(USER_DS);
bc901d79 65999
6e9df6a3 66000 ptrace_event(PTRACE_EVENT_EXIT, code);
bc901d79 66001
572b4308 66002@@ -978,6 +998,9 @@ void do_exit(long code)
58c5fc13
MT
66003 tsk->exit_code = code;
66004 taskstats_exit(tsk, group_dead);
66005
66006+ gr_acl_handle_psacct(tsk, code);
66007+ gr_acl_handle_exit();
66008+
66009 exit_mm(tsk);
66010
66011 if (group_dead)
572b4308 66012@@ -1094,7 +1117,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
4c928ab7
MT
66013 * Take down every thread in the group. This is called by fatal signals
66014 * as well as by sys_exit_group (below).
66015 */
5e856224 66016-void
4c928ab7
MT
66017+__noreturn void
66018 do_group_exit(int exit_code)
66019 {
66020 struct signal_struct *sig = current->signal;
fe2de317 66021diff --git a/kernel/fork.c b/kernel/fork.c
572b4308 66022index 8163333..aee97f3 100644
fe2de317
MT
66023--- a/kernel/fork.c
66024+++ b/kernel/fork.c
572b4308
MT
66025@@ -274,19 +274,24 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
66026 }
66027
66028 err = arch_dup_task_struct(tsk, orig);
66029- if (err)
66030- goto out;
66031
66032+ /*
66033+ * We defer looking at err, because we will need this setup
66034+ * for the clean up path to work correctly.
66035+ */
66036 tsk->stack = ti;
66037-
66038 setup_thread_stack(tsk, orig);
66039+
66040+ if (err)
66041+ goto out;
66042+
66043 clear_user_return_notifier(tsk);
66044 clear_tsk_need_resched(tsk);
66045 stackend = end_of_stack(tsk);
58c5fc13
MT
66046 *stackend = STACK_END_MAGIC; /* for overflow detection */
66047
66048 #ifdef CONFIG_CC_STACKPROTECTOR
66049- tsk->stack_canary = get_random_int();
66050+ tsk->stack_canary = pax_get_random_long();
66051 #endif
66052
6e9df6a3 66053 /*
572b4308 66054@@ -310,13 +315,78 @@ out:
57199397
MT
66055 }
66056
66057 #ifdef CONFIG_MMU
c6e2a6c8 66058+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
57199397
MT
66059+{
66060+ struct vm_area_struct *tmp;
66061+ unsigned long charge;
66062+ struct mempolicy *pol;
66063+ struct file *file;
66064+
66065+ charge = 0;
66066+ if (mpnt->vm_flags & VM_ACCOUNT) {
c6e2a6c8
MT
66067+ unsigned long len;
66068+ len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66069+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
57199397
MT
66070+ goto fail_nomem;
66071+ charge = len;
66072+ }
66073+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66074+ if (!tmp)
66075+ goto fail_nomem;
66076+ *tmp = *mpnt;
66077+ tmp->vm_mm = mm;
66078+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
66079+ pol = mpol_dup(vma_policy(mpnt));
66080+ if (IS_ERR(pol))
66081+ goto fail_nomem_policy;
66082+ vma_set_policy(tmp, pol);
66083+ if (anon_vma_fork(tmp, mpnt))
66084+ goto fail_nomem_anon_vma_fork;
66085+ tmp->vm_flags &= ~VM_LOCKED;
6892158b 66086+ tmp->vm_next = tmp->vm_prev = NULL;
57199397
MT
66087+ tmp->vm_mirror = NULL;
66088+ file = tmp->vm_file;
66089+ if (file) {
66090+ struct inode *inode = file->f_path.dentry->d_inode;
66091+ struct address_space *mapping = file->f_mapping;
66092+
66093+ get_file(file);
66094+ if (tmp->vm_flags & VM_DENYWRITE)
66095+ atomic_dec(&inode->i_writecount);
15a11c5b 66096+ mutex_lock(&mapping->i_mmap_mutex);
57199397
MT
66097+ if (tmp->vm_flags & VM_SHARED)
66098+ mapping->i_mmap_writable++;
57199397
MT
66099+ flush_dcache_mmap_lock(mapping);
66100+ /* insert tmp into the share list, just after mpnt */
66101+ vma_prio_tree_add(tmp, mpnt);
66102+ flush_dcache_mmap_unlock(mapping);
15a11c5b 66103+ mutex_unlock(&mapping->i_mmap_mutex);
57199397
MT
66104+ }
66105+
66106+ /*
66107+ * Clear hugetlb-related page reserves for children. This only
66108+ * affects MAP_PRIVATE mappings. Faults generated by the child
66109+ * are not guaranteed to succeed, even if read-only
66110+ */
66111+ if (is_vm_hugetlb_page(tmp))
66112+ reset_vma_resv_huge_pages(tmp);
66113+
66114+ return tmp;
66115+
66116+fail_nomem_anon_vma_fork:
66117+ mpol_put(pol);
66118+fail_nomem_policy:
66119+ kmem_cache_free(vm_area_cachep, tmp);
66120+fail_nomem:
66121+ vm_unacct_memory(charge);
66122+ return NULL;
66123+}
66124+
66125 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
66126 {
66127 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
66128 struct rb_node **rb_link, *rb_parent;
66129 int retval;
66130- unsigned long charge;
66131- struct mempolicy *pol;
66132
66133 down_write(&oldmm->mmap_sem);
66134 flush_cache_dup_mm(oldmm);
572b4308 66135@@ -328,8 +398,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
58c5fc13
MT
66136 mm->locked_vm = 0;
66137 mm->mmap = NULL;
66138 mm->mmap_cache = NULL;
66139- mm->free_area_cache = oldmm->mmap_base;
66140- mm->cached_hole_size = ~0UL;
66141+ mm->free_area_cache = oldmm->free_area_cache;
66142+ mm->cached_hole_size = oldmm->cached_hole_size;
66143 mm->map_count = 0;
66144 cpumask_clear(mm_cpumask(mm));
66145 mm->mm_rb = RB_ROOT;
572b4308 66146@@ -345,8 +415,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
57199397
MT
66147
66148 prev = NULL;
66149 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
66150- struct file *file;
66151-
66152 if (mpnt->vm_flags & VM_DONTCOPY) {
66153 long pages = vma_pages(mpnt);
66154 mm->total_vm -= pages;
572b4308 66155@@ -354,54 +422,11 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
57199397
MT
66156 -pages);
66157 continue;
66158 }
66159- charge = 0;
66160- if (mpnt->vm_flags & VM_ACCOUNT) {
c6e2a6c8
MT
66161- unsigned long len;
66162- len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
66163- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
57199397
MT
66164- goto fail_nomem;
66165- charge = len;
c6e2a6c8 66166+ tmp = dup_vma(mm, oldmm, mpnt);
fe2de317
MT
66167+ if (!tmp) {
66168+ retval = -ENOMEM;
66169+ goto out;
66170 }
57199397
MT
66171- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
66172- if (!tmp)
66173- goto fail_nomem;
66174- *tmp = *mpnt;
66175- INIT_LIST_HEAD(&tmp->anon_vma_chain);
66176- pol = mpol_dup(vma_policy(mpnt));
66177- retval = PTR_ERR(pol);
66178- if (IS_ERR(pol))
66179- goto fail_nomem_policy;
66180- vma_set_policy(tmp, pol);
6892158b 66181- tmp->vm_mm = mm;
57199397
MT
66182- if (anon_vma_fork(tmp, mpnt))
66183- goto fail_nomem_anon_vma_fork;
66184- tmp->vm_flags &= ~VM_LOCKED;
57199397
MT
66185- tmp->vm_next = tmp->vm_prev = NULL;
66186- file = tmp->vm_file;
66187- if (file) {
66188- struct inode *inode = file->f_path.dentry->d_inode;
66189- struct address_space *mapping = file->f_mapping;
66190-
66191- get_file(file);
66192- if (tmp->vm_flags & VM_DENYWRITE)
66193- atomic_dec(&inode->i_writecount);
15a11c5b 66194- mutex_lock(&mapping->i_mmap_mutex);
57199397
MT
66195- if (tmp->vm_flags & VM_SHARED)
66196- mapping->i_mmap_writable++;
57199397
MT
66197- flush_dcache_mmap_lock(mapping);
66198- /* insert tmp into the share list, just after mpnt */
66199- vma_prio_tree_add(tmp, mpnt);
66200- flush_dcache_mmap_unlock(mapping);
15a11c5b 66201- mutex_unlock(&mapping->i_mmap_mutex);
fe2de317
MT
66202- }
66203-
66204- /*
57199397
MT
66205- * Clear hugetlb-related page reserves for children. This only
66206- * affects MAP_PRIVATE mappings. Faults generated by the child
66207- * are not guaranteed to succeed, even if read-only
66208- */
66209- if (is_vm_hugetlb_page(tmp))
66210- reset_vma_resv_huge_pages(tmp);
fe2de317
MT
66211
66212 /*
57199397 66213 * Link in the new vma and copy the page table entries.
572b4308 66214@@ -424,6 +449,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
58c5fc13
MT
66215 if (retval)
66216 goto out;
66217 }
66218+
66219+#ifdef CONFIG_PAX_SEGMEXEC
66220+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
66221+ struct vm_area_struct *mpnt_m;
66222+
66223+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
66224+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
66225+
66226+ if (!mpnt->vm_mirror)
66227+ continue;
66228+
66229+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
66230+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
66231+ mpnt->vm_mirror = mpnt_m;
66232+ } else {
66233+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
66234+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
66235+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
66236+ mpnt->vm_mirror->vm_mirror = mpnt;
66237+ }
66238+ }
66239+ BUG_ON(mpnt_m);
66240+ }
66241+#endif
66242+
66243 /* a new mm has just been created */
66244 arch_dup_mmap(oldmm, mm);
66245 retval = 0;
572b4308 66246@@ -432,14 +482,6 @@ out:
57199397
MT
66247 flush_tlb_mm(oldmm);
66248 up_write(&oldmm->mmap_sem);
66249 return retval;
66250-fail_nomem_anon_vma_fork:
66251- mpol_put(pol);
66252-fail_nomem_policy:
66253- kmem_cache_free(vm_area_cachep, tmp);
66254-fail_nomem:
66255- retval = -ENOMEM;
66256- vm_unacct_memory(charge);
66257- goto out;
66258 }
66259
6e9df6a3 66260 static inline int mm_alloc_pgd(struct mm_struct *mm)
572b4308 66261@@ -676,8 +718,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
5e856224 66262 return ERR_PTR(err);
4c928ab7 66263
5e856224
MT
66264 mm = get_task_mm(task);
66265- if (mm && mm != current->mm &&
66266- !ptrace_may_access(task, mode)) {
4c928ab7 66267+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
5e856224
MT
66268+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
66269 mmput(mm);
66270 mm = ERR_PTR(-EACCES);
66271 }
572b4308 66272@@ -899,13 +941,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
6892158b 66273 spin_unlock(&fs->lock);
58c5fc13
MT
66274 return -EAGAIN;
66275 }
66276- fs->users++;
66277+ atomic_inc(&fs->users);
6892158b 66278 spin_unlock(&fs->lock);
58c5fc13
MT
66279 return 0;
66280 }
df50ba0c
MT
66281 tsk->fs = copy_fs_struct(fs);
66282 if (!tsk->fs)
66283 return -ENOMEM;
66284+ gr_set_chroot_entries(tsk, &tsk->fs->root);
66285 return 0;
66286 }
66287
572b4308 66288@@ -1172,6 +1215,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
58c5fc13
MT
66289 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
66290 #endif
66291 retval = -EAGAIN;
66292+
66293+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
66294+
66295 if (atomic_read(&p->real_cred->user->processes) >=
df50ba0c 66296 task_rlimit(p, RLIMIT_NPROC)) {
6e9df6a3 66297 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
572b4308
MT
66298@@ -1392,6 +1438,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
66299 /* Need tasklist lock for parent etc handling! */
66300 write_lock_irq(&tasklist_lock);
58c5fc13 66301
572b4308 66302+ /* synchronizes with gr_set_acls() */
58c5fc13
MT
66303+ gr_copy_label(p);
66304+
572b4308
MT
66305 /* CLONE_PARENT re-uses the old parent */
66306 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
66307 p->real_parent = current->real_parent;
66308@@ -1502,6 +1551,8 @@ bad_fork_cleanup_count:
58c5fc13
MT
66309 bad_fork_free:
66310 free_task(p);
66311 fork_out:
66312+ gr_log_forkfail(retval);
66313+
66314 return ERR_PTR(retval);
66315 }
66316
572b4308 66317@@ -1602,6 +1653,8 @@ long do_fork(unsigned long clone_flags,
58c5fc13
MT
66318 if (clone_flags & CLONE_PARENT_SETTID)
66319 put_user(nr, parent_tidptr);
66320
66321+ gr_handle_brute_check();
66322+
66323 if (clone_flags & CLONE_VFORK) {
66324 p->vfork_done = &vfork;
66325 init_completion(&vfork);
572b4308 66326@@ -1700,7 +1753,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
58c5fc13
MT
66327 return 0;
66328
66329 /* don't need lock here; in the worst case we'll do useless copy */
66330- if (fs->users == 1)
66331+ if (atomic_read(&fs->users) == 1)
66332 return 0;
66333
66334 *new_fsp = copy_fs_struct(fs);
572b4308 66335@@ -1789,7 +1842,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
58c5fc13 66336 fs = current->fs;
6892158b 66337 spin_lock(&fs->lock);
58c5fc13
MT
66338 current->fs = new_fs;
66339- if (--fs->users)
df50ba0c 66340+ gr_set_chroot_entries(current, &current->fs->root);
58c5fc13
MT
66341+ if (atomic_dec_return(&fs->users))
66342 new_fs = NULL;
66343 else
66344 new_fs = fs;
fe2de317 66345diff --git a/kernel/futex.c b/kernel/futex.c
c6e2a6c8 66346index e2b0fb9..db818ac 100644
fe2de317
MT
66347--- a/kernel/futex.c
66348+++ b/kernel/futex.c
ae4e228f
MT
66349@@ -54,6 +54,7 @@
66350 #include <linux/mount.h>
66351 #include <linux/pagemap.h>
66352 #include <linux/syscalls.h>
66353+#include <linux/ptrace.h>
66354 #include <linux/signal.h>
4c928ab7 66355 #include <linux/export.h>
ae4e228f 66356 #include <linux/magic.h>
5e856224 66357@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
16454cff 66358 struct page *page, *page_head;
15a11c5b 66359 int err, ro = 0;
58c5fc13
MT
66360
66361+#ifdef CONFIG_PAX_SEGMEXEC
66362+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
66363+ return -EFAULT;
66364+#endif
66365+
66366 /*
66367 * The futex address must be "naturally" aligned.
66368 */
c6e2a6c8 66369@@ -2711,6 +2717,7 @@ static int __init futex_init(void)
58c5fc13 66370 {
bc901d79
MT
66371 u32 curval;
66372 int i;
66373+ mm_segment_t oldfs;
58c5fc13 66374
bc901d79
MT
66375 /*
66376 * This will fail and we want it. Some arch implementations do
c6e2a6c8 66377@@ -2722,8 +2729,11 @@ static int __init futex_init(void)
bc901d79
MT
66378 * implementation, the non-functional ones will return
66379 * -ENOSYS.
66380 */
66381+ oldfs = get_fs();
66382+ set_fs(USER_DS);
66a7e928 66383 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
bc901d79 66384 futex_cmpxchg_enabled = 1;
66a7e928 66385+ set_fs(oldfs);
bc901d79 66386
66a7e928 66387 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
6e9df6a3 66388 plist_head_init(&futex_queues[i].chain);
fe2de317
MT
66389diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
66390index 9b22d03..6295b62 100644
66391--- a/kernel/gcov/base.c
66392+++ b/kernel/gcov/base.c
58c5fc13
MT
66393@@ -102,11 +102,6 @@ void gcov_enable_events(void)
66394 }
66395
66396 #ifdef CONFIG_MODULES
66397-static inline int within(void *addr, void *start, unsigned long size)
66398-{
66399- return ((addr >= start) && (addr < start + size));
66400-}
66401-
66402 /* Update list and generate events when modules are unloaded. */
66403 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
66404 void *data)
fe2de317 66405@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
58c5fc13
MT
66406 prev = NULL;
66407 /* Remove entries located in module from linked list. */
66408 for (info = gcov_info_head; info; info = info->next) {
66409- if (within(info, mod->module_core, mod->core_size)) {
66410+ if (within_module_core_rw((unsigned long)info, mod)) {
66411 if (prev)
66412 prev->next = info->next;
66413 else
fe2de317 66414diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
572b4308 66415index 6db7a5e..25b6648 100644
fe2de317
MT
66416--- a/kernel/hrtimer.c
66417+++ b/kernel/hrtimer.c
572b4308 66418@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
ae4e228f
MT
66419 local_irq_restore(flags);
66420 }
66421
66422-static void run_hrtimer_softirq(struct softirq_action *h)
66423+static void run_hrtimer_softirq(void)
66424 {
572b4308
MT
66425 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
66426
fe2de317 66427diff --git a/kernel/jump_label.c b/kernel/jump_label.c
c6e2a6c8 66428index 4304919..408c4c0 100644
fe2de317
MT
66429--- a/kernel/jump_label.c
66430+++ b/kernel/jump_label.c
c6e2a6c8
MT
66431@@ -13,6 +13,7 @@
66432 #include <linux/sort.h>
66433 #include <linux/err.h>
66434 #include <linux/static_key.h>
66435+#include <linux/mm.h>
66436
66437 #ifdef HAVE_JUMP_LABEL
66438
66439@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
15a11c5b
MT
66440
66441 size = (((unsigned long)stop - (unsigned long)start)
66442 / sizeof(struct jump_entry));
66443+ pax_open_kernel();
66444 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
66445+ pax_close_kernel();
bc901d79
MT
66446 }
66447
c6e2a6c8
MT
66448 static void jump_label_update(struct static_key *key, int enable);
66449@@ -356,10 +359,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
15a11c5b
MT
66450 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
66451 struct jump_entry *iter;
66452
bc901d79 66453+ pax_open_kernel();
15a11c5b
MT
66454 for (iter = iter_start; iter < iter_stop; iter++) {
66455 if (within_module_init(iter->code, mod))
66456 iter->code = 0;
66457 }
bc901d79 66458+ pax_close_kernel();
15a11c5b 66459 }
bc901d79 66460
15a11c5b 66461 static int
fe2de317 66462diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
572b4308 66463index 079f1d3..4e80e69 100644
fe2de317
MT
66464--- a/kernel/kallsyms.c
66465+++ b/kernel/kallsyms.c
58c5fc13
MT
66466@@ -11,6 +11,9 @@
66467 * Changed the compression method from stem compression to "table lookup"
66468 * compression (see scripts/kallsyms.c for a more complete description)
66469 */
66470+#ifdef CONFIG_GRKERNSEC_HIDESYM
66471+#define __INCLUDED_BY_HIDESYM 1
66472+#endif
66473 #include <linux/kallsyms.h>
66474 #include <linux/module.h>
66475 #include <linux/init.h>
fe2de317 66476@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
58c5fc13
MT
66477
66478 static inline int is_kernel_inittext(unsigned long addr)
66479 {
66480+ if (system_state != SYSTEM_BOOTING)
66481+ return 0;
66482+
66483 if (addr >= (unsigned long)_sinittext
66484 && addr <= (unsigned long)_einittext)
66485 return 1;
57199397
MT
66486 return 0;
66487 }
58c5fc13 66488
ae4e228f 66489+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
df50ba0c 66490+#ifdef CONFIG_MODULES
57199397
MT
66491+static inline int is_module_text(unsigned long addr)
66492+{
66493+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
66494+ return 1;
66495+
66496+ addr = ktla_ktva(addr);
66497+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
66498+}
66499+#else
66500+static inline int is_module_text(unsigned long addr)
66501+{
66502+ return 0;
66503+}
66504+#endif
df50ba0c 66505+#endif
58c5fc13 66506+
57199397
MT
66507 static inline int is_kernel_text(unsigned long addr)
66508 {
66509 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
fe2de317 66510@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
57199397
MT
66511
66512 static inline int is_kernel(unsigned long addr)
66513 {
ae4e228f 66514+
57199397
MT
66515+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66516+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
58c5fc13 66517+ return 1;
ae4e228f 66518+
57199397
MT
66519+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
66520+#else
66521 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
ae4e228f 66522+#endif
58c5fc13 66523+
58c5fc13 66524 return 1;
66a7e928 66525 return in_gate_area_no_mm(addr);
57199397
MT
66526 }
66527
66528 static int is_ksym_addr(unsigned long addr)
66529 {
66530+
66531+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66532+ if (is_module_text(addr))
66533+ return 0;
66534+#endif
66535+
66536 if (all_var)
66537 return is_kernel(addr);
66538
fe2de317 66539@@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
58c5fc13
MT
66540
66541 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
66542 {
66543- iter->name[0] = '\0';
66544 iter->nameoff = get_symbol_offset(new_pos);
66545 iter->pos = new_pos;
66546 }
fe2de317 66547@@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, void *p)
ae4e228f
MT
66548 {
66549 struct kallsym_iter *iter = m->private;
66550
66551+#ifdef CONFIG_GRKERNSEC_HIDESYM
66552+ if (current_uid())
66553+ return 0;
66554+#endif
66555+
66556 /* Some debugging symbols have no name. Ignore them. */
66557 if (!iter->name[0])
66558 return 0;
572b4308
MT
66559@@ -515,11 +558,22 @@ static int s_show(struct seq_file *m, void *p)
66560 */
66561 type = iter->exported ? toupper(iter->type) :
66562 tolower(iter->type);
66563+
66564+#ifdef CONFIG_GRKERNSEC_HIDESYM
66565+ seq_printf(m, "%pP %c %s\t[%s]\n", (void *)iter->value,
66566+ type, iter->name, iter->module_name);
66567+#else
66568 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
66569 type, iter->name, iter->module_name);
66570+#endif
66571 } else
66572+#ifdef CONFIG_GRKERNSEC_HIDESYM
66573+ seq_printf(m, "%pP %c %s\n", (void *)iter->value,
66574+ iter->type, iter->name);
66575+#else
66576 seq_printf(m, "%pK %c %s\n", (void *)iter->value,
66577 iter->type, iter->name);
66578+#endif
66579 return 0;
66580 }
66581
66582@@ -540,7 +594,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
58c5fc13
MT
66583 struct kallsym_iter *iter;
66584 int ret;
66585
66586- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
66587+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
66588 if (!iter)
66589 return -ENOMEM;
66590 reset_iter(iter, 0);
fe2de317 66591diff --git a/kernel/kexec.c b/kernel/kexec.c
c6e2a6c8 66592index 4e2e472..cd0c7ae 100644
fe2de317
MT
66593--- a/kernel/kexec.c
66594+++ b/kernel/kexec.c
c6e2a6c8 66595@@ -1046,7 +1046,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
6e9df6a3
MT
66596 unsigned long flags)
66597 {
66598 struct compat_kexec_segment in;
66599- struct kexec_segment out, __user *ksegments;
66600+ struct kexec_segment out;
66601+ struct kexec_segment __user *ksegments;
66602 unsigned long i, result;
66603
66604 /* Don't allow clients that don't understand the native
fe2de317 66605diff --git a/kernel/kmod.c b/kernel/kmod.c
c6e2a6c8 66606index 05698a7..a4c1e3a 100644
fe2de317
MT
66607--- a/kernel/kmod.c
66608+++ b/kernel/kmod.c
c6e2a6c8
MT
66609@@ -66,7 +66,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
66610 kfree(info->argv);
66611 }
66612
66613-static int call_modprobe(char *module_name, int wait)
66614+static int call_modprobe(char *module_name, char *module_param, int wait)
66615 {
66616 static char *envp[] = {
66617 "HOME=/",
66618@@ -75,7 +75,7 @@ static int call_modprobe(char *module_name, int wait)
66619 NULL
66620 };
66621
66622- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
66623+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
66624 if (!argv)
66625 goto out;
66626
66627@@ -87,7 +87,8 @@ static int call_modprobe(char *module_name, int wait)
66628 argv[1] = "-q";
66629 argv[2] = "--";
66630 argv[3] = module_name; /* check free_modprobe_argv() */
66631- argv[4] = NULL;
66632+ argv[4] = module_param;
66633+ argv[5] = NULL;
66634
66635 return call_usermodehelper_fns(modprobe_path, argv, envp,
66636 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
66637@@ -112,9 +113,8 @@ out:
71d190be
MT
66638 * If module auto-loading support is disabled then this function
66639 * becomes a no-operation.
66640 */
66641-int __request_module(bool wait, const char *fmt, ...)
66642+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
66643 {
66644- va_list args;
66645 char module_name[MODULE_NAME_LEN];
66646 unsigned int max_modprobes;
66647 int ret;
c6e2a6c8 66648@@ -122,9 +122,7 @@ int __request_module(bool wait, const char *fmt, ...)
71d190be
MT
66649 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
66650 static int kmod_loop_msg;
66651
66652- va_start(args, fmt);
66653- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
66654- va_end(args);
66655+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
66656 if (ret >= MODULE_NAME_LEN)
66657 return -ENAMETOOLONG;
66658
c6e2a6c8 66659@@ -132,6 +130,20 @@ int __request_module(bool wait, const char *fmt, ...)
ae4e228f
MT
66660 if (ret)
66661 return ret;
58c5fc13
MT
66662
66663+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71d190be
MT
66664+ if (!current_uid()) {
66665+ /* hack to workaround consolekit/udisks stupidity */
66666+ read_lock(&tasklist_lock);
66667+ if (!strcmp(current->comm, "mount") &&
66668+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
66669+ read_unlock(&tasklist_lock);
66670+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
66671+ return -EPERM;
66672+ }
66673+ read_unlock(&tasklist_lock);
58c5fc13
MT
66674+ }
66675+#endif
66676+
66677 /* If modprobe needs a service that is in a module, we get a recursive
66678 * loop. Limit the number of running kmod threads to max_threads/2 or
66679 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
c6e2a6c8
MT
66680@@ -160,11 +172,52 @@ int __request_module(bool wait, const char *fmt, ...)
66681
66682 trace_module_request(module_name, wait, _RET_IP_);
66683
66684- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
66685+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
66686
71d190be
MT
66687 atomic_dec(&kmod_concurrent);
66688 return ret;
66689 }
66690+
66691+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
66692+{
66693+ va_list args;
66694+ int ret;
66695+
66696+ va_start(args, fmt);
66697+ ret = ____request_module(wait, module_param, fmt, args);
66698+ va_end(args);
66699+
66700+ return ret;
66701+}
66702+
66703+int __request_module(bool wait, const char *fmt, ...)
66704+{
66705+ va_list args;
66706+ int ret;
66707+
66708+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66709+ if (current_uid()) {
66710+ char module_param[MODULE_NAME_LEN];
66711+
66712+ memset(module_param, 0, sizeof(module_param));
66713+
66714+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
66715+
66716+ va_start(args, fmt);
66717+ ret = ____request_module(wait, module_param, fmt, args);
66718+ va_end(args);
66719+
66720+ return ret;
66721+ }
66722+#endif
66723+
66724+ va_start(args, fmt);
66725+ ret = ____request_module(wait, NULL, fmt, args);
66726+ va_end(args);
66727+
66728+ return ret;
66729+}
66730+
66731 EXPORT_SYMBOL(__request_module);
66732 #endif /* CONFIG_MODULES */
66733
c6e2a6c8 66734@@ -267,7 +320,7 @@ static int wait_for_helper(void *data)
6e9df6a3
MT
66735 *
66736 * Thus the __user pointer cast is valid here.
66737 */
66738- sys_wait4(pid, (int __user *)&ret, 0, NULL);
66739+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
66740
66741 /*
66742 * If ret is 0, either ____call_usermodehelper failed and the
fe2de317 66743diff --git a/kernel/kprobes.c b/kernel/kprobes.c
5e856224 66744index c62b854..cb67968 100644
fe2de317
MT
66745--- a/kernel/kprobes.c
66746+++ b/kernel/kprobes.c
66747@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
58c5fc13
MT
66748 * kernel image and loaded module images reside. This is required
66749 * so x86_64 can correctly handle the %rip-relative fixups.
66750 */
66751- kip->insns = module_alloc(PAGE_SIZE);
66752+ kip->insns = module_alloc_exec(PAGE_SIZE);
66753 if (!kip->insns) {
66754 kfree(kip);
66755 return NULL;
fe2de317 66756@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
ae4e228f 66757 */
df50ba0c 66758 if (!list_is_singular(&kip->list)) {
ae4e228f 66759 list_del(&kip->list);
58c5fc13
MT
66760- module_free(NULL, kip->insns);
66761+ module_free_exec(NULL, kip->insns);
66762 kfree(kip);
66763 }
66764 return 1;
4c928ab7 66765@@ -1955,7 +1955,7 @@ static int __init init_kprobes(void)
df50ba0c
MT
66766 {
66767 int i, err = 0;
66768 unsigned long offset = 0, size = 0;
66769- char *modname, namebuf[128];
66770+ char *modname, namebuf[KSYM_NAME_LEN];
66771 const char *symbol_name;
66772 void *addr;
66773 struct kprobe_blackpoint *kb;
4c928ab7 66774@@ -2081,7 +2081,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
df50ba0c
MT
66775 const char *sym = NULL;
66776 unsigned int i = *(loff_t *) v;
66777 unsigned long offset = 0;
66778- char *modname, namebuf[128];
66779+ char *modname, namebuf[KSYM_NAME_LEN];
66780
66781 head = &kprobe_table[i];
66782 preempt_disable();
5e856224
MT
66783diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
66784index 4e316e1..5501eef 100644
66785--- a/kernel/ksysfs.c
66786+++ b/kernel/ksysfs.c
66787@@ -47,6 +47,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
66788 {
66789 if (count+1 > UEVENT_HELPER_PATH_LEN)
66790 return -ENOENT;
66791+ if (!capable(CAP_SYS_ADMIN))
66792+ return -EPERM;
66793 memcpy(uevent_helper, buf, count);
66794 uevent_helper[count] = '\0';
66795 if (count && uevent_helper[count-1] == '\n')
fe2de317 66796diff --git a/kernel/lockdep.c b/kernel/lockdep.c
c6e2a6c8 66797index ea9ee45..67ebc8f 100644
fe2de317
MT
66798--- a/kernel/lockdep.c
66799+++ b/kernel/lockdep.c
5e856224 66800@@ -590,6 +590,10 @@ static int static_obj(void *obj)
df50ba0c
MT
66801 end = (unsigned long) &_end,
66802 addr = (unsigned long) obj;
58c5fc13
MT
66803
66804+#ifdef CONFIG_PAX_KERNEXEC
ae4e228f 66805+ start = ktla_ktva(start);
58c5fc13
MT
66806+#endif
66807+
66808 /*
66809 * static variable?
66810 */
5e856224 66811@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
ae4e228f
MT
66812 if (!static_obj(lock->key)) {
66813 debug_locks_off();
66814 printk("INFO: trying to register non-static key.\n");
66815+ printk("lock:%pS key:%pS.\n", lock, lock->key);
66816 printk("the code is fine but needs lockdep annotation.\n");
66817 printk("turning off the locking correctness validator.\n");
66818 dump_stack();
4c928ab7 66819@@ -3042,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
bc901d79
MT
66820 if (!class)
66821 return 0;
66822 }
66823- atomic_inc((atomic_t *)&class->ops);
66824+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
66825 if (very_verbose(class)) {
66826 printk("\nacquire class [%p] %s", class->key, class->name);
66827 if (class->name_version > 1)
fe2de317 66828diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
4c928ab7 66829index 91c32a0..b2c71c5 100644
fe2de317
MT
66830--- a/kernel/lockdep_proc.c
66831+++ b/kernel/lockdep_proc.c
66832@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
df50ba0c
MT
66833
66834 static void print_name(struct seq_file *m, struct lock_class *class)
66835 {
66836- char str[128];
66837+ char str[KSYM_NAME_LEN];
66838 const char *name = class->name;
66839
66840 if (!name) {
fe2de317 66841diff --git a/kernel/module.c b/kernel/module.c
c6e2a6c8 66842index 78ac6ec..e87db0e 100644
fe2de317
MT
66843--- a/kernel/module.c
66844+++ b/kernel/module.c
15a11c5b 66845@@ -58,6 +58,7 @@
71d190be
MT
66846 #include <linux/jump_label.h>
66847 #include <linux/pfn.h>
15a11c5b 66848 #include <linux/bsearch.h>
71d190be
MT
66849+#include <linux/grsecurity.h>
66850
66851 #define CREATE_TRACE_POINTS
66852 #include <trace/events/module.h>
c6e2a6c8 66853@@ -114,7 +115,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
58c5fc13 66854
57199397
MT
66855 /* Bounds of module allocation, for speeding __module_address.
66856 * Protected by module_mutex. */
58c5fc13
MT
66857-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
66858+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
66859+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
66860
66861 int register_module_notifier(struct notifier_block * nb)
66862 {
c6e2a6c8 66863@@ -278,7 +280,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
58c5fc13
MT
66864 return true;
66865
66866 list_for_each_entry_rcu(mod, &modules, list) {
66867- struct symsearch arr[] = {
66868+ struct symsearch modarr[] = {
66869 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
66870 NOT_GPL_ONLY, false },
66871 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
c6e2a6c8 66872@@ -300,7 +302,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
58c5fc13
MT
66873 #endif
66874 };
66875
66876- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
66877+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
66878 return true;
66879 }
66880 return false;
c6e2a6c8 66881@@ -432,7 +434,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
df50ba0c
MT
66882 static int percpu_modalloc(struct module *mod,
66883 unsigned long size, unsigned long align)
ae4e228f 66884 {
58c5fc13
MT
66885- if (align > PAGE_SIZE) {
66886+ if (align-1 >= PAGE_SIZE) {
66887 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
df50ba0c 66888 mod->name, align, PAGE_SIZE);
58c5fc13 66889 align = PAGE_SIZE;
c6e2a6c8 66890@@ -1032,7 +1034,7 @@ struct module_attribute module_uevent =
5e856224
MT
66891 static ssize_t show_coresize(struct module_attribute *mattr,
66892 struct module_kobject *mk, char *buffer)
66893 {
66894- return sprintf(buffer, "%u\n", mk->mod->core_size);
66895+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
66896 }
66897
66898 static struct module_attribute modinfo_coresize =
c6e2a6c8 66899@@ -1041,7 +1043,7 @@ static struct module_attribute modinfo_coresize =
5e856224
MT
66900 static ssize_t show_initsize(struct module_attribute *mattr,
66901 struct module_kobject *mk, char *buffer)
66902 {
66903- return sprintf(buffer, "%u\n", mk->mod->init_size);
66904+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
66905 }
66906
66907 static struct module_attribute modinfo_initsize =
c6e2a6c8 66908@@ -1255,7 +1257,7 @@ resolve_symbol_wait(struct module *mod,
c52201e0
MT
66909 */
66910 #ifdef CONFIG_SYSFS
66911
66912-#ifdef CONFIG_KALLSYMS
66913+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
66914 static inline bool sect_empty(const Elf_Shdr *sect)
66915 {
66916 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
c6e2a6c8 66917@@ -1721,21 +1723,21 @@ static void set_section_ro_nx(void *base,
16454cff 66918
15a11c5b
MT
66919 static void unset_module_core_ro_nx(struct module *mod)
66920 {
66921- set_page_attributes(mod->module_core + mod->core_text_size,
66922- mod->module_core + mod->core_size,
66923+ set_page_attributes(mod->module_core_rw,
66924+ mod->module_core_rw + mod->core_size_rw,
66925 set_memory_x);
66926- set_page_attributes(mod->module_core,
66927- mod->module_core + mod->core_ro_size,
66928+ set_page_attributes(mod->module_core_rx,
66929+ mod->module_core_rx + mod->core_size_rx,
66930 set_memory_rw);
66931 }
16454cff 66932
15a11c5b
MT
66933 static void unset_module_init_ro_nx(struct module *mod)
66934 {
66935- set_page_attributes(mod->module_init + mod->init_text_size,
66936- mod->module_init + mod->init_size,
66937+ set_page_attributes(mod->module_init_rw,
66938+ mod->module_init_rw + mod->init_size_rw,
66939 set_memory_x);
66940- set_page_attributes(mod->module_init,
66941- mod->module_init + mod->init_ro_size,
66942+ set_page_attributes(mod->module_init_rx,
66943+ mod->module_init_rx + mod->init_size_rx,
66944 set_memory_rw);
16454cff
MT
66945 }
66946
c6e2a6c8 66947@@ -1746,14 +1748,14 @@ void set_all_modules_text_rw(void)
16454cff
MT
66948
66949 mutex_lock(&module_mutex);
66950 list_for_each_entry_rcu(mod, &modules, list) {
66951- if ((mod->module_core) && (mod->core_text_size)) {
66952- set_page_attributes(mod->module_core,
66953- mod->module_core + mod->core_text_size,
66954+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
66955+ set_page_attributes(mod->module_core_rx,
66956+ mod->module_core_rx + mod->core_size_rx,
66957 set_memory_rw);
66958 }
66959- if ((mod->module_init) && (mod->init_text_size)) {
66960- set_page_attributes(mod->module_init,
66961- mod->module_init + mod->init_text_size,
66962+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
66963+ set_page_attributes(mod->module_init_rx,
66964+ mod->module_init_rx + mod->init_size_rx,
66965 set_memory_rw);
66966 }
66967 }
c6e2a6c8 66968@@ -1767,14 +1769,14 @@ void set_all_modules_text_ro(void)
16454cff
MT
66969
66970 mutex_lock(&module_mutex);
66971 list_for_each_entry_rcu(mod, &modules, list) {
66972- if ((mod->module_core) && (mod->core_text_size)) {
66973- set_page_attributes(mod->module_core,
66974- mod->module_core + mod->core_text_size,
66975+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
66976+ set_page_attributes(mod->module_core_rx,
66977+ mod->module_core_rx + mod->core_size_rx,
66978 set_memory_ro);
66979 }
66980- if ((mod->module_init) && (mod->init_text_size)) {
66981- set_page_attributes(mod->module_init,
66982- mod->module_init + mod->init_text_size,
66983+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
66984+ set_page_attributes(mod->module_init_rx,
66985+ mod->module_init_rx + mod->init_size_rx,
66986 set_memory_ro);
66987 }
66988 }
c6e2a6c8 66989@@ -1820,16 +1822,19 @@ static void free_module(struct module *mod)
58c5fc13
MT
66990
66991 /* This may be NULL, but that's OK */
15a11c5b 66992 unset_module_init_ro_nx(mod);
58c5fc13
MT
66993- module_free(mod, mod->module_init);
66994+ module_free(mod, mod->module_init_rw);
66995+ module_free_exec(mod, mod->module_init_rx);
66996 kfree(mod->args);
df50ba0c 66997 percpu_modfree(mod);
6892158b 66998
58c5fc13
MT
66999 /* Free lock-classes: */
67000- lockdep_free_key_range(mod->module_core, mod->core_size);
67001+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
67002+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
67003
67004 /* Finally, free the core (containing the module structure) */
15a11c5b 67005 unset_module_core_ro_nx(mod);
58c5fc13
MT
67006- module_free(mod, mod->module_core);
67007+ module_free_exec(mod, mod->module_core_rx);
67008+ module_free(mod, mod->module_core_rw);
58c5fc13 67009
ae4e228f
MT
67010 #ifdef CONFIG_MPU
67011 update_protections(current->mm);
c6e2a6c8 67012@@ -1899,9 +1904,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
71d190be
MT
67013 int ret = 0;
67014 const struct kernel_symbol *ksym;
c6e2a6c8 67015
71d190be
MT
67016+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67017+ int is_fs_load = 0;
67018+ int register_filesystem_found = 0;
8308f9c9 67019+ char *p;
71d190be 67020+
8308f9c9
MT
67021+ p = strstr(mod->args, "grsec_modharden_fs");
67022+ if (p) {
67023+ char *endptr = p + strlen("grsec_modharden_fs");
67024+ /* copy \0 as well */
67025+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
71d190be 67026+ is_fs_load = 1;
8308f9c9 67027+ }
71d190be 67028+#endif
c6e2a6c8 67029+
71d190be
MT
67030 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
67031 const char *name = info->strtab + sym[i].st_name;
67032
67033+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67034+ /* it's a real shame this will never get ripped and copied
67035+ upstream! ;(
67036+ */
67037+ if (is_fs_load && !strcmp(name, "register_filesystem"))
67038+ register_filesystem_found = 1;
67039+#endif
67040+
67041 switch (sym[i].st_shndx) {
67042 case SHN_COMMON:
67043 /* We compiled with -fno-common. These are not
c6e2a6c8 67044@@ -1922,7 +1949,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
6892158b 67045 ksym = resolve_symbol_wait(mod, info, name);
58c5fc13 67046 /* Ok if resolved. */
57199397 67047 if (ksym && !IS_ERR(ksym)) {
ae4e228f 67048+ pax_open_kernel();
58c5fc13 67049 sym[i].st_value = ksym->value;
ae4e228f 67050+ pax_close_kernel();
58c5fc13
MT
67051 break;
67052 }
67053
c6e2a6c8 67054@@ -1941,11 +1970,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
df50ba0c 67055 secbase = (unsigned long)mod_percpu(mod);
58c5fc13 67056 else
6892158b 67057 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
ae4e228f 67058+ pax_open_kernel();
58c5fc13 67059 sym[i].st_value += secbase;
ae4e228f 67060+ pax_close_kernel();
58c5fc13
MT
67061 break;
67062 }
67063 }
71d190be
MT
67064
67065+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67066+ if (is_fs_load && !register_filesystem_found) {
67067+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
67068+ ret = -EPERM;
67069+ }
67070+#endif
67071+
67072 return ret;
67073 }
67074
c6e2a6c8 67075@@ -2049,22 +2087,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
58c5fc13 67076 || s->sh_entsize != ~0UL
6892158b 67077 || strstarts(sname, ".init"))
58c5fc13
MT
67078 continue;
67079- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
67080+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67081+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
67082+ else
67083+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
5e856224 67084 pr_debug("\t%s\n", sname);
58c5fc13 67085 }
16454cff
MT
67086- switch (m) {
67087- case 0: /* executable */
67088- mod->core_size = debug_align(mod->core_size);
58c5fc13 67089- mod->core_text_size = mod->core_size;
16454cff
MT
67090- break;
67091- case 1: /* RO: text and ro-data */
67092- mod->core_size = debug_align(mod->core_size);
67093- mod->core_ro_size = mod->core_size;
67094- break;
67095- case 3: /* whole core */
67096- mod->core_size = debug_align(mod->core_size);
67097- break;
67098- }
58c5fc13
MT
67099 }
67100
5e856224 67101 pr_debug("Init section allocation order:\n");
c6e2a6c8 67102@@ -2078,23 +2106,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
58c5fc13 67103 || s->sh_entsize != ~0UL
6892158b 67104 || !strstarts(sname, ".init"))
58c5fc13
MT
67105 continue;
67106- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
67107- | INIT_OFFSET_MASK);
67108+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
67109+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
67110+ else
67111+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
67112+ s->sh_entsize |= INIT_OFFSET_MASK;
5e856224 67113 pr_debug("\t%s\n", sname);
58c5fc13 67114 }
16454cff
MT
67115- switch (m) {
67116- case 0: /* executable */
67117- mod->init_size = debug_align(mod->init_size);
58c5fc13 67118- mod->init_text_size = mod->init_size;
16454cff
MT
67119- break;
67120- case 1: /* RO: text and ro-data */
67121- mod->init_size = debug_align(mod->init_size);
67122- mod->init_ro_size = mod->init_size;
67123- break;
67124- case 3: /* whole init */
67125- mod->init_size = debug_align(mod->init_size);
67126- break;
67127- }
58c5fc13
MT
67128 }
67129 }
67130
c6e2a6c8 67131@@ -2266,7 +2284,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
58c5fc13 67132
ae4e228f
MT
67133 /* Put symbol section at end of init part of module. */
67134 symsect->sh_flags |= SHF_ALLOC;
67135- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
67136+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
6892158b 67137 info->index.sym) | INIT_OFFSET_MASK;
5e856224 67138 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
ae4e228f 67139
c6e2a6c8 67140@@ -2281,13 +2299,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
ae4e228f
MT
67141 }
67142
67143 /* Append room for core symbols at end of core part. */
6892158b 67144- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
5e856224
MT
67145- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
67146- mod->core_size += strtab_size;
6892158b 67147+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
5e856224
MT
67148+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
67149+ mod->core_size_rx += strtab_size;
ae4e228f
MT
67150
67151 /* Put string table section at end of init part of module. */
67152 strsect->sh_flags |= SHF_ALLOC;
67153- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
67154+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
6892158b 67155 info->index.str) | INIT_OFFSET_MASK;
5e856224 67156 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
ae4e228f 67157 }
c6e2a6c8 67158@@ -2305,12 +2323,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
6892158b
MT
67159 /* Make sure we get permanent strtab: don't use info->strtab. */
67160 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
58c5fc13 67161
ae4e228f
MT
67162+ pax_open_kernel();
67163+
58c5fc13 67164 /* Set types up while we still have access to sections. */
ae4e228f 67165 for (i = 0; i < mod->num_symtab; i++)
6892158b 67166 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
ae4e228f 67167
6892158b 67168- mod->core_symtab = dst = mod->module_core + info->symoffs;
5e856224 67169- mod->core_strtab = s = mod->module_core + info->stroffs;
6892158b 67170+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
5e856224 67171+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
ae4e228f
MT
67172 src = mod->symtab;
67173 *dst = *src;
5e856224 67174 *s++ = 0;
c6e2a6c8 67175@@ -2323,6 +2343,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
5e856224 67176 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1;
ae4e228f
MT
67177 }
67178 mod->core_num_syms = ndst;
58c5fc13 67179+
ae4e228f 67180+ pax_close_kernel();
58c5fc13
MT
67181 }
67182 #else
6892158b 67183 static inline void layout_symtab(struct module *mod, struct load_info *info)
c6e2a6c8 67184@@ -2356,17 +2378,33 @@ void * __weak module_alloc(unsigned long size)
6e9df6a3 67185 return size == 0 ? NULL : vmalloc_exec(size);
58c5fc13
MT
67186 }
67187
67188-static void *module_alloc_update_bounds(unsigned long size)
67189+static void *module_alloc_update_bounds_rw(unsigned long size)
67190 {
67191 void *ret = module_alloc(size);
67192
67193 if (ret) {
57199397 67194 mutex_lock(&module_mutex);
58c5fc13
MT
67195 /* Update module bounds. */
67196- if ((unsigned long)ret < module_addr_min)
67197- module_addr_min = (unsigned long)ret;
67198- if ((unsigned long)ret + size > module_addr_max)
67199- module_addr_max = (unsigned long)ret + size;
67200+ if ((unsigned long)ret < module_addr_min_rw)
67201+ module_addr_min_rw = (unsigned long)ret;
67202+ if ((unsigned long)ret + size > module_addr_max_rw)
67203+ module_addr_max_rw = (unsigned long)ret + size;
57199397 67204+ mutex_unlock(&module_mutex);
58c5fc13
MT
67205+ }
67206+ return ret;
67207+}
67208+
67209+static void *module_alloc_update_bounds_rx(unsigned long size)
67210+{
67211+ void *ret = module_alloc_exec(size);
67212+
67213+ if (ret) {
57199397 67214+ mutex_lock(&module_mutex);
58c5fc13
MT
67215+ /* Update module bounds. */
67216+ if ((unsigned long)ret < module_addr_min_rx)
67217+ module_addr_min_rx = (unsigned long)ret;
67218+ if ((unsigned long)ret + size > module_addr_max_rx)
67219+ module_addr_max_rx = (unsigned long)ret + size;
57199397 67220 mutex_unlock(&module_mutex);
58c5fc13
MT
67221 }
67222 return ret;
c6e2a6c8 67223@@ -2543,8 +2581,14 @@ static struct module *setup_load_info(struct load_info *info)
fe2de317
MT
67224 static int check_modinfo(struct module *mod, struct load_info *info)
67225 {
67226 const char *modmagic = get_modinfo(info, "vermagic");
67227+ const char *license = get_modinfo(info, "license");
67228 int err;
67229
67230+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
67231+ if (!license || !license_is_gpl_compatible(license))
67232+ return -ENOEXEC;
67233+#endif
67234+
67235 /* This is allowed: modprobe --force will invalidate it. */
67236 if (!modmagic) {
67237 err = try_to_force_load(mod, "bad vermagic");
c6e2a6c8 67238@@ -2567,7 +2611,7 @@ static int check_modinfo(struct module *mod, struct load_info *info)
fe2de317
MT
67239 }
67240
67241 /* Set up license info based on the info section */
67242- set_license(mod, get_modinfo(info, "license"));
67243+ set_license(mod, license);
67244
67245 return 0;
67246 }
c6e2a6c8 67247@@ -2661,7 +2705,7 @@ static int move_module(struct module *mod, struct load_info *info)
6892158b 67248 void *ptr;
58c5fc13
MT
67249
67250 /* Do the allocs. */
67251- ptr = module_alloc_update_bounds(mod->core_size);
67252+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
67253 /*
67254 * The pointer to this block is stored in the module structure
67255 * which is inside the block. Just mark it as not being a
c6e2a6c8 67256@@ -2671,23 +2715,50 @@ static int move_module(struct module *mod, struct load_info *info)
6892158b
MT
67257 if (!ptr)
67258 return -ENOMEM;
67259
58c5fc13
MT
67260- memset(ptr, 0, mod->core_size);
67261- mod->module_core = ptr;
67262+ memset(ptr, 0, mod->core_size_rw);
67263+ mod->module_core_rw = ptr;
67264
67265- ptr = module_alloc_update_bounds(mod->init_size);
67266+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
67267 /*
67268 * The pointer to this block is stored in the module structure
67269 * which is inside the block. This block doesn't need to be
67270 * scanned as it contains data and code that will be freed
67271 * after the module is initialized.
67272 */
67273- kmemleak_ignore(ptr);
67274- if (!ptr && mod->init_size) {
6892158b 67275- module_free(mod, mod->module_core);
58c5fc13
MT
67276+ kmemleak_not_leak(ptr);
67277+ if (!ptr && mod->init_size_rw) {
6892158b 67278+ module_free(mod, mod->module_core_rw);
16454cff
MT
67279 return -ENOMEM;
67280 }
67281- memset(ptr, 0, mod->init_size);
67282- mod->module_init = ptr;
58c5fc13
MT
67283+ memset(ptr, 0, mod->init_size_rw);
67284+ mod->module_init_rw = ptr;
67285+
67286+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
67287+ kmemleak_not_leak(ptr);
67288+ if (!ptr) {
6892158b
MT
67289+ module_free(mod, mod->module_init_rw);
67290+ module_free(mod, mod->module_core_rw);
c52201e0
MT
67291+ return -ENOMEM;
67292+ }
58c5fc13 67293+
ae4e228f 67294+ pax_open_kernel();
58c5fc13 67295+ memset(ptr, 0, mod->core_size_rx);
ae4e228f 67296+ pax_close_kernel();
58c5fc13
MT
67297+ mod->module_core_rx = ptr;
67298+
67299+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
67300+ kmemleak_not_leak(ptr);
67301+ if (!ptr && mod->init_size_rx) {
6892158b
MT
67302+ module_free_exec(mod, mod->module_core_rx);
67303+ module_free(mod, mod->module_init_rw);
67304+ module_free(mod, mod->module_core_rw);
16454cff
MT
67305+ return -ENOMEM;
67306+ }
58c5fc13 67307+
ae4e228f 67308+ pax_open_kernel();
58c5fc13 67309+ memset(ptr, 0, mod->init_size_rx);
ae4e228f 67310+ pax_close_kernel();
58c5fc13
MT
67311+ mod->module_init_rx = ptr;
67312
67313 /* Transfer each section which specifies SHF_ALLOC */
5e856224 67314 pr_debug("final section addresses:\n");
c6e2a6c8 67315@@ -2698,16 +2769,45 @@ static int move_module(struct module *mod, struct load_info *info)
6892158b 67316 if (!(shdr->sh_flags & SHF_ALLOC))
58c5fc13
MT
67317 continue;
67318
6892158b 67319- if (shdr->sh_entsize & INIT_OFFSET_MASK)
58c5fc13 67320- dest = mod->module_init
6892158b 67321- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58c5fc13 67322- else
6892158b
MT
67323- dest = mod->module_core + shdr->sh_entsize;
67324+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
67325+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58c5fc13 67326+ dest = mod->module_init_rw
6892158b 67327+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58c5fc13
MT
67328+ else
67329+ dest = mod->module_init_rx
6892158b 67330+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58c5fc13 67331+ } else {
6892158b
MT
67332+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
67333+ dest = mod->module_core_rw + shdr->sh_entsize;
58c5fc13 67334+ else
6892158b 67335+ dest = mod->module_core_rx + shdr->sh_entsize;
58c5fc13
MT
67336+ }
67337+
6892158b
MT
67338+ if (shdr->sh_type != SHT_NOBITS) {
67339+
58c5fc13 67340+#ifdef CONFIG_PAX_KERNEXEC
bc901d79
MT
67341+#ifdef CONFIG_X86_64
67342+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
67343+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
67344+#endif
6892158b 67345+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
ae4e228f 67346+ pax_open_kernel();
6892158b 67347+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
ae4e228f 67348+ pax_close_kernel();
58c5fc13
MT
67349+ } else
67350+#endif
6892158b
MT
67351
67352- if (shdr->sh_type != SHT_NOBITS)
67353 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58c5fc13
MT
67354+ }
67355 /* Update sh_addr to point to copy in image. */
6892158b 67356- shdr->sh_addr = (unsigned long)dest;
58c5fc13
MT
67357+
67358+#ifdef CONFIG_PAX_KERNEXEC
6892158b
MT
67359+ if (shdr->sh_flags & SHF_EXECINSTR)
67360+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
58c5fc13
MT
67361+ else
67362+#endif
67363+
6892158b 67364+ shdr->sh_addr = (unsigned long)dest;
5e856224
MT
67365 pr_debug("\t0x%lx %s\n",
67366 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
58c5fc13 67367 }
c6e2a6c8 67368@@ -2758,12 +2858,12 @@ static void flush_module_icache(const struct module *mod)
58c5fc13
MT
67369 * Do it before processing of module parameters, so the module
67370 * can provide parameter accessor functions of its own.
67371 */
67372- if (mod->module_init)
67373- flush_icache_range((unsigned long)mod->module_init,
67374- (unsigned long)mod->module_init
67375- + mod->init_size);
67376- flush_icache_range((unsigned long)mod->module_core,
67377- (unsigned long)mod->module_core + mod->core_size);
67378+ if (mod->module_init_rx)
67379+ flush_icache_range((unsigned long)mod->module_init_rx,
67380+ (unsigned long)mod->module_init_rx
67381+ + mod->init_size_rx);
67382+ flush_icache_range((unsigned long)mod->module_core_rx,
67383+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
67384
67385 set_fs(old_fs);
6892158b 67386 }
c6e2a6c8 67387@@ -2833,8 +2933,10 @@ out:
5e856224 67388 static void module_deallocate(struct module *mod, struct load_info *info)
6892158b 67389 {
6892158b 67390 percpu_modfree(mod);
58c5fc13 67391- module_free(mod, mod->module_init);
58c5fc13
MT
67392- module_free(mod, mod->module_core);
67393+ module_free_exec(mod, mod->module_init_rx);
58c5fc13 67394+ module_free_exec(mod, mod->module_core_rx);
58c5fc13 67395+ module_free(mod, mod->module_init_rw);
58c5fc13 67396+ module_free(mod, mod->module_core_rw);
6892158b
MT
67397 }
67398
6e9df6a3 67399 int __weak module_finalize(const Elf_Ehdr *hdr,
c6e2a6c8 67400@@ -2898,9 +3000,38 @@ static struct module *load_module(void __user *umod,
71d190be
MT
67401 if (err)
67402 goto free_unload;
67403
67404+ /* Now copy in args */
67405+ mod->args = strndup_user(uargs, ~0UL >> 1);
67406+ if (IS_ERR(mod->args)) {
67407+ err = PTR_ERR(mod->args);
67408+ goto free_unload;
67409+ }
67410+
67411 /* Set up MODINFO_ATTR fields */
67412 setup_modinfo(mod, &info);
67413
67414+#ifdef CONFIG_GRKERNSEC_MODHARDEN
67415+ {
67416+ char *p, *p2;
67417+
67418+ if (strstr(mod->args, "grsec_modharden_netdev")) {
67419+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
67420+ err = -EPERM;
67421+ goto free_modinfo;
67422+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
67423+ p += strlen("grsec_modharden_normal");
67424+ p2 = strstr(p, "_");
67425+ if (p2) {
67426+ *p2 = '\0';
67427+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
67428+ *p2 = '_';
67429+ }
67430+ err = -EPERM;
67431+ goto free_modinfo;
67432+ }
67433+ }
67434+#endif
67435+
67436 /* Fix up syms, so that st_value is a pointer to location. */
67437 err = simplify_symbols(mod, &info);
67438 if (err < 0)
c6e2a6c8 67439@@ -2916,13 +3047,6 @@ static struct module *load_module(void __user *umod,
71d190be
MT
67440
67441 flush_module_icache(mod);
67442
67443- /* Now copy in args */
67444- mod->args = strndup_user(uargs, ~0UL >> 1);
67445- if (IS_ERR(mod->args)) {
67446- err = PTR_ERR(mod->args);
67447- goto free_arch_cleanup;
67448- }
67449-
67450 /* Mark state as coming so strong_try_module_get() ignores us. */
67451 mod->state = MODULE_STATE_COMING;
67452
c6e2a6c8 67453@@ -2980,11 +3104,10 @@ static struct module *load_module(void __user *umod,
71d190be
MT
67454 unlock:
67455 mutex_unlock(&module_mutex);
67456 synchronize_sched();
67457- kfree(mod->args);
67458- free_arch_cleanup:
67459 module_arch_cleanup(mod);
67460 free_modinfo:
67461 free_modinfo(mod);
67462+ kfree(mod->args);
67463 free_unload:
67464 module_unload_free(mod);
67465 free_module:
c6e2a6c8 67466@@ -3025,16 +3148,16 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
16454cff
MT
67467 MODULE_STATE_COMING, mod);
67468
67469 /* Set RO and NX regions for core */
67470- set_section_ro_nx(mod->module_core,
67471- mod->core_text_size,
67472- mod->core_ro_size,
67473- mod->core_size);
67474+ set_section_ro_nx(mod->module_core_rx,
67475+ mod->core_size_rx,
67476+ mod->core_size_rx,
67477+ mod->core_size_rx);
67478
67479 /* Set RO and NX regions for init */
67480- set_section_ro_nx(mod->module_init,
67481- mod->init_text_size,
67482- mod->init_ro_size,
67483- mod->init_size);
67484+ set_section_ro_nx(mod->module_init_rx,
67485+ mod->init_size_rx,
67486+ mod->init_size_rx,
67487+ mod->init_size_rx);
67488
67489 do_mod_ctors(mod);
67490 /* Start the module */
c6e2a6c8 67491@@ -3080,11 +3203,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
ae4e228f
MT
67492 mod->strtab = mod->core_strtab;
67493 #endif
15a11c5b 67494 unset_module_init_ro_nx(mod);
58c5fc13
MT
67495- module_free(mod, mod->module_init);
67496- mod->module_init = NULL;
67497- mod->init_size = 0;
15a11c5b 67498- mod->init_ro_size = 0;
58c5fc13
MT
67499- mod->init_text_size = 0;
67500+ module_free(mod, mod->module_init_rw);
67501+ module_free_exec(mod, mod->module_init_rx);
67502+ mod->module_init_rw = NULL;
67503+ mod->module_init_rx = NULL;
67504+ mod->init_size_rw = 0;
67505+ mod->init_size_rx = 0;
67506 mutex_unlock(&module_mutex);
67507
67508 return 0;
c6e2a6c8 67509@@ -3115,10 +3239,16 @@ static const char *get_ksymbol(struct module *mod,
58c5fc13
MT
67510 unsigned long nextval;
67511
67512 /* At worse, next value is at end of module */
67513- if (within_module_init(addr, mod))
67514- nextval = (unsigned long)mod->module_init+mod->init_text_size;
67515+ if (within_module_init_rx(addr, mod))
67516+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
67517+ else if (within_module_init_rw(addr, mod))
67518+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
67519+ else if (within_module_core_rx(addr, mod))
67520+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
67521+ else if (within_module_core_rw(addr, mod))
67522+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
67523 else
67524- nextval = (unsigned long)mod->module_core+mod->core_text_size;
67525+ return NULL;
67526
66a7e928 67527 /* Scan for closest preceding symbol, and next symbol. (ELF
58c5fc13 67528 starts real symbols at 1). */
c6e2a6c8 67529@@ -3353,7 +3483,7 @@ static int m_show(struct seq_file *m, void *p)
58c5fc13
MT
67530 char buf[8];
67531
67532 seq_printf(m, "%s %u",
67533- mod->name, mod->init_size + mod->core_size);
67534+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
67535 print_unload_info(m, mod);
67536
67537 /* Informative for users. */
c6e2a6c8 67538@@ -3362,7 +3492,7 @@ static int m_show(struct seq_file *m, void *p)
58c5fc13
MT
67539 mod->state == MODULE_STATE_COMING ? "Loading":
67540 "Live");
67541 /* Used by oprofile and other similar tools. */
66a7e928
MT
67542- seq_printf(m, " 0x%pK", mod->module_core);
67543+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58c5fc13
MT
67544
67545 /* Taints info */
67546 if (mod->taints)
c6e2a6c8 67547@@ -3398,7 +3528,17 @@ static const struct file_operations proc_modules_operations = {
58c5fc13
MT
67548
67549 static int __init proc_modules_init(void)
67550 {
67551+#ifndef CONFIG_GRKERNSEC_HIDESYM
67552+#ifdef CONFIG_GRKERNSEC_PROC_USER
67553+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67554+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67555+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
67556+#else
67557 proc_create("modules", 0, NULL, &proc_modules_operations);
67558+#endif
67559+#else
67560+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
67561+#endif
67562 return 0;
67563 }
67564 module_init(proc_modules_init);
c6e2a6c8 67565@@ -3457,12 +3597,12 @@ struct module *__module_address(unsigned long addr)
58c5fc13
MT
67566 {
67567 struct module *mod;
67568
67569- if (addr < module_addr_min || addr > module_addr_max)
67570+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
67571+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
67572 return NULL;
67573
67574 list_for_each_entry_rcu(mod, &modules, list)
67575- if (within_module_core(addr, mod)
67576- || within_module_init(addr, mod))
67577+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
67578 return mod;
67579 return NULL;
67580 }
c6e2a6c8 67581@@ -3496,11 +3636,20 @@ bool is_module_text_address(unsigned long addr)
58c5fc13
MT
67582 */
67583 struct module *__module_text_address(unsigned long addr)
67584 {
67585- struct module *mod = __module_address(addr);
67586+ struct module *mod;
67587+
67588+#ifdef CONFIG_X86_32
67589+ addr = ktla_ktva(addr);
67590+#endif
67591+
67592+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
67593+ return NULL;
67594+
67595+ mod = __module_address(addr);
67596+
67597 if (mod) {
67598 /* Make sure it's within the text section. */
67599- if (!within(addr, mod->module_init, mod->init_text_size)
67600- && !within(addr, mod->module_core, mod->core_text_size))
67601+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
67602 mod = NULL;
67603 }
67604 return mod;
fe2de317 67605diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
4c928ab7 67606index 7e3443f..b2a1e6b 100644
fe2de317
MT
67607--- a/kernel/mutex-debug.c
67608+++ b/kernel/mutex-debug.c
67609@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
71d190be
MT
67610 }
67611
67612 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67613- struct thread_info *ti)
67614+ struct task_struct *task)
67615 {
67616 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
67617
67618 /* Mark the current thread as blocked on the lock: */
67619- ti->task->blocked_on = waiter;
67620+ task->blocked_on = waiter;
67621 }
67622
67623 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67624- struct thread_info *ti)
67625+ struct task_struct *task)
67626 {
67627 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
67628- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
67629- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
67630- ti->task->blocked_on = NULL;
67631+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
66a7e928 67632+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
71d190be
MT
67633+ task->blocked_on = NULL;
67634
67635 list_del_init(&waiter->list);
67636 waiter->task = NULL;
fe2de317
MT
67637diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
67638index 0799fd3..d06ae3b 100644
67639--- a/kernel/mutex-debug.h
67640+++ b/kernel/mutex-debug.h
67641@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
71d190be
MT
67642 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
67643 extern void debug_mutex_add_waiter(struct mutex *lock,
67644 struct mutex_waiter *waiter,
67645- struct thread_info *ti);
67646+ struct task_struct *task);
67647 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
67648- struct thread_info *ti);
67649+ struct task_struct *task);
67650 extern void debug_mutex_unlock(struct mutex *lock);
67651 extern void debug_mutex_init(struct mutex *lock, const char *name,
67652 struct lock_class_key *key);
fe2de317 67653diff --git a/kernel/mutex.c b/kernel/mutex.c
c6e2a6c8 67654index a307cc9..27fd2e9 100644
fe2de317
MT
67655--- a/kernel/mutex.c
67656+++ b/kernel/mutex.c
67657@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67658 spin_lock_mutex(&lock->wait_lock, flags);
67659
67660 debug_mutex_lock_common(lock, &waiter);
67661- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
67662+ debug_mutex_add_waiter(lock, &waiter, task);
67663
67664 /* add waiting tasks to the end of the waitqueue (FIFO): */
67665 list_add_tail(&waiter.list, &lock->wait_list);
67666@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
67667 * TASK_UNINTERRUPTIBLE case.)
67668 */
67669 if (unlikely(signal_pending_state(state, task))) {
67670- mutex_remove_waiter(lock, &waiter,
67671- task_thread_info(task));
67672+ mutex_remove_waiter(lock, &waiter, task);
67673 mutex_release(&lock->dep_map, 1, ip);
67674 spin_unlock_mutex(&lock->wait_lock, flags);
67675
c6e2a6c8 67676@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
fe2de317
MT
67677 done:
67678 lock_acquired(&lock->dep_map, ip);
67679 /* got the lock - rejoice! */
67680- mutex_remove_waiter(lock, &waiter, current_thread_info());
67681+ mutex_remove_waiter(lock, &waiter, task);
67682 mutex_set_owner(lock);
67683
67684 /* set it to 0 if there are no waiters left: */
fe2de317 67685diff --git a/kernel/panic.c b/kernel/panic.c
c1e3898a 67686index 9ed023b..e49543e 100644
fe2de317
MT
67687--- a/kernel/panic.c
67688+++ b/kernel/panic.c
5e856224 67689@@ -402,7 +402,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
bc901d79
MT
67690 const char *board;
67691
67692 printk(KERN_WARNING "------------[ cut here ]------------\n");
67693- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
67694+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
67695 board = dmi_get_system_info(DMI_PRODUCT_NAME);
67696 if (board)
67697 printk(KERN_WARNING "Hardware name: %s\n", board);
5e856224 67698@@ -457,7 +457,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58c5fc13
MT
67699 */
67700 void __stack_chk_fail(void)
67701 {
67702- panic("stack-protector: Kernel stack is corrupted in: %p\n",
67703+ dump_stack();
bc901d79 67704+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58c5fc13
MT
67705 __builtin_return_address(0));
67706 }
67707 EXPORT_SYMBOL(__stack_chk_fail);
fe2de317 67708diff --git a/kernel/pid.c b/kernel/pid.c
5e856224 67709index 9f08dfa..6765c40 100644
fe2de317
MT
67710--- a/kernel/pid.c
67711+++ b/kernel/pid.c
58c5fc13
MT
67712@@ -33,6 +33,7 @@
67713 #include <linux/rculist.h>
67714 #include <linux/bootmem.h>
67715 #include <linux/hash.h>
67716+#include <linux/security.h>
67717 #include <linux/pid_namespace.h>
67718 #include <linux/init_task.h>
67719 #include <linux/syscalls.h>
fe2de317 67720@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
58c5fc13
MT
67721
67722 int pid_max = PID_MAX_DEFAULT;
67723
67724-#define RESERVED_PIDS 300
67725+#define RESERVED_PIDS 500
67726
67727 int pid_max_min = RESERVED_PIDS + 1;
67728 int pid_max_max = PID_MAX_LIMIT;
5e856224 67729@@ -420,10 +421,18 @@ EXPORT_SYMBOL(pid_task);
58c5fc13
MT
67730 */
67731 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
67732 {
58c5fc13 67733+ struct task_struct *task;
bc901d79 67734+
4c928ab7
MT
67735 rcu_lockdep_assert(rcu_read_lock_held(),
67736 "find_task_by_pid_ns() needs rcu_read_lock()"
67737 " protection");
bc901d79 67738- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
4c928ab7 67739+
58c5fc13
MT
67740+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
67741+
67742+ if (gr_pid_is_chrooted(task))
67743+ return NULL;
67744+
67745+ return task;
67746 }
67747
67748 struct task_struct *find_task_by_vpid(pid_t vnr)
5e856224 67749@@ -431,6 +440,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
15a11c5b
MT
67750 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
67751 }
67752
67753+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
67754+{
4c928ab7
MT
67755+ rcu_lockdep_assert(rcu_read_lock_held(),
67756+ "find_task_by_pid_ns() needs rcu_read_lock()"
67757+ " protection");
15a11c5b
MT
67758+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
67759+}
67760+
67761 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
67762 {
67763 struct pid *pid;
fe2de317 67764diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
5e856224 67765index 125cb67..a4d1c30 100644
fe2de317
MT
67766--- a/kernel/posix-cpu-timers.c
67767+++ b/kernel/posix-cpu-timers.c
58c5fc13
MT
67768@@ -6,6 +6,7 @@
67769 #include <linux/posix-timers.h>
67770 #include <linux/errno.h>
67771 #include <linux/math64.h>
67772+#include <linux/security.h>
67773 #include <asm/uaccess.h>
67774 #include <linux/kernel_stat.h>
ae4e228f 67775 #include <trace/events/timer.h>
5e856224 67776@@ -1578,14 +1579,14 @@ struct k_clock clock_posix_cpu = {
66a7e928
MT
67777
67778 static __init int init_posix_cpu_timers(void)
67779 {
67780- struct k_clock process = {
15a11c5b 67781+ static struct k_clock process = {
66a7e928
MT
67782 .clock_getres = process_cpu_clock_getres,
67783 .clock_get = process_cpu_clock_get,
67784 .timer_create = process_cpu_timer_create,
67785 .nsleep = process_cpu_nsleep,
67786 .nsleep_restart = process_cpu_nsleep_restart,
67787 };
67788- struct k_clock thread = {
15a11c5b 67789+ static struct k_clock thread = {
66a7e928
MT
67790 .clock_getres = thread_cpu_clock_getres,
67791 .clock_get = thread_cpu_clock_get,
67792 .timer_create = thread_cpu_timer_create,
fe2de317 67793diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
4c928ab7 67794index 69185ae..cc2847a 100644
fe2de317
MT
67795--- a/kernel/posix-timers.c
67796+++ b/kernel/posix-timers.c
66a7e928 67797@@ -43,6 +43,7 @@
bc901d79 67798 #include <linux/idr.h>
66a7e928 67799 #include <linux/posix-clock.h>
bc901d79
MT
67800 #include <linux/posix-timers.h>
67801+#include <linux/grsecurity.h>
67802 #include <linux/syscalls.h>
67803 #include <linux/wait.h>
67804 #include <linux/workqueue.h>
15a11c5b
MT
67805@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
67806 * which we beg off on and pass to do_sys_settimeofday().
67807 */
67808
67809-static struct k_clock posix_clocks[MAX_CLOCKS];
67810+static struct k_clock *posix_clocks[MAX_CLOCKS];
67811
67812 /*
67813 * These ones are defined below.
fe2de317 67814@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
66a7e928
MT
67815 */
67816 static __init int init_posix_timers(void)
67817 {
67818- struct k_clock clock_realtime = {
15a11c5b 67819+ static struct k_clock clock_realtime = {
66a7e928
MT
67820 .clock_getres = hrtimer_get_res,
67821 .clock_get = posix_clock_realtime_get,
67822 .clock_set = posix_clock_realtime_set,
fe2de317 67823@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
66a7e928
MT
67824 .timer_get = common_timer_get,
67825 .timer_del = common_timer_del,
67826 };
67827- struct k_clock clock_monotonic = {
15a11c5b 67828+ static struct k_clock clock_monotonic = {
66a7e928
MT
67829 .clock_getres = hrtimer_get_res,
67830 .clock_get = posix_ktime_get_ts,
67831 .nsleep = common_nsleep,
fe2de317 67832@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
66a7e928
MT
67833 .timer_get = common_timer_get,
67834 .timer_del = common_timer_del,
67835 };
67836- struct k_clock clock_monotonic_raw = {
15a11c5b 67837+ static struct k_clock clock_monotonic_raw = {
66a7e928
MT
67838 .clock_getres = hrtimer_get_res,
67839 .clock_get = posix_get_monotonic_raw,
67840 };
67841- struct k_clock clock_realtime_coarse = {
15a11c5b 67842+ static struct k_clock clock_realtime_coarse = {
66a7e928
MT
67843 .clock_getres = posix_get_coarse_res,
67844 .clock_get = posix_get_realtime_coarse,
67845 };
67846- struct k_clock clock_monotonic_coarse = {
15a11c5b 67847+ static struct k_clock clock_monotonic_coarse = {
66a7e928
MT
67848 .clock_getres = posix_get_coarse_res,
67849 .clock_get = posix_get_monotonic_coarse,
67850 };
67851- struct k_clock clock_boottime = {
15a11c5b 67852+ static struct k_clock clock_boottime = {
66a7e928
MT
67853 .clock_getres = hrtimer_get_res,
67854 .clock_get = posix_get_boottime,
67855 .nsleep = common_nsleep,
4c928ab7 67856@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
15a11c5b
MT
67857 return;
67858 }
66a7e928 67859
15a11c5b
MT
67860- posix_clocks[clock_id] = *new_clock;
67861+ posix_clocks[clock_id] = new_clock;
66a7e928 67862 }
15a11c5b 67863 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
66a7e928 67864
4c928ab7 67865@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
66a7e928 67866 return (id & CLOCKFD_MASK) == CLOCKFD ?
15a11c5b 67867 &clock_posix_dynamic : &clock_posix_cpu;
66a7e928 67868
15a11c5b
MT
67869- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
67870+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
67871 return NULL;
67872- return &posix_clocks[id];
67873+ return posix_clocks[id];
67874 }
66a7e928 67875
15a11c5b 67876 static int common_timer_create(struct k_itimer *new_timer)
4c928ab7 67877@@ -959,6 +960,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
bc901d79
MT
67878 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
67879 return -EFAULT;
df50ba0c 67880
bc901d79
MT
67881+ /* only the CLOCK_REALTIME clock can be set, all other clocks
67882+ have their clock_set fptr set to a nosettime dummy function
67883+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
67884+ call common_clock_set, which calls do_sys_settimeofday, which
67885+ we hook
67886+ */
67887+
66a7e928 67888 return kc->clock_set(which_clock, &new_tp);
bc901d79
MT
67889 }
67890
fe2de317
MT
67891diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
67892index d523593..68197a4 100644
67893--- a/kernel/power/poweroff.c
67894+++ b/kernel/power/poweroff.c
67895@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
58c5fc13
MT
67896 .enable_mask = SYSRQ_ENABLE_BOOT,
67897 };
67898
67899-static int pm_sysrq_init(void)
67900+static int __init pm_sysrq_init(void)
67901 {
67902 register_sysrq_key('o', &sysrq_poweroff_op);
67903 return 0;
fe2de317 67904diff --git a/kernel/power/process.c b/kernel/power/process.c
c6e2a6c8 67905index 19db29f..33b52b6 100644
fe2de317
MT
67906--- a/kernel/power/process.c
67907+++ b/kernel/power/process.c
5e856224 67908@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
58c5fc13
MT
67909 u64 elapsed_csecs64;
67910 unsigned int elapsed_csecs;
bc901d79 67911 bool wakeup = false;
58c5fc13
MT
67912+ bool timedout = false;
67913
67914 do_gettimeofday(&start);
67915
5e856224 67916@@ -43,6 +44,8 @@ static int try_to_freeze_tasks(bool user_only)
6892158b 67917
ae4e228f 67918 while (true) {
58c5fc13
MT
67919 todo = 0;
67920+ if (time_after(jiffies, end_time))
67921+ timedout = true;
67922 read_lock(&tasklist_lock);
67923 do_each_thread(g, p) {
5e856224 67924 if (p == current || !freeze_task(p))
c6e2a6c8
MT
67925@@ -58,9 +61,13 @@ static int try_to_freeze_tasks(bool user_only)
67926 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
67927 * transition can't race with task state testing here.
58c5fc13
MT
67928 */
67929- if (!task_is_stopped_or_traced(p) &&
67930- !freezer_should_skip(p))
67931+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
67932 todo++;
67933+ if (timedout) {
67934+ printk(KERN_ERR "Task refusing to freeze:\n");
67935+ sched_show_task(p);
67936+ }
67937+ }
67938 } while_each_thread(g, p);
67939 read_unlock(&tasklist_lock);
6892158b 67940
c6e2a6c8 67941@@ -69,7 +76,7 @@ static int try_to_freeze_tasks(bool user_only)
6892158b
MT
67942 todo += wq_busy;
67943 }
67944
ae4e228f
MT
67945- if (!todo || time_after(jiffies, end_time))
67946+ if (!todo || timedout)
67947 break;
67948
16454cff 67949 if (pm_wakeup_pending()) {
fe2de317 67950diff --git a/kernel/printk.c b/kernel/printk.c
c6e2a6c8 67951index b663c2c..1d6ba7a 100644
fe2de317
MT
67952--- a/kernel/printk.c
67953+++ b/kernel/printk.c
c6e2a6c8 67954@@ -316,6 +316,11 @@ static int check_syslog_permissions(int type, bool from_file)
16454cff
MT
67955 if (from_file && type != SYSLOG_ACTION_OPEN)
67956 return 0;
58c5fc13
MT
67957
67958+#ifdef CONFIG_GRKERNSEC_DMESG
16454cff 67959+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58c5fc13
MT
67960+ return -EPERM;
67961+#endif
67962+
16454cff
MT
67963 if (syslog_action_restricted(type)) {
67964 if (capable(CAP_SYSLOG))
67965 return 0;
fe2de317 67966diff --git a/kernel/profile.c b/kernel/profile.c
4c928ab7 67967index 76b8e77..a2930e8 100644
fe2de317
MT
67968--- a/kernel/profile.c
67969+++ b/kernel/profile.c
8308f9c9
MT
67970@@ -39,7 +39,7 @@ struct profile_hit {
67971 /* Oprofile timer tick hook */
67972 static int (*timer_hook)(struct pt_regs *) __read_mostly;
67973
67974-static atomic_t *prof_buffer;
67975+static atomic_unchecked_t *prof_buffer;
67976 static unsigned long prof_len, prof_shift;
67977
67978 int prof_on __read_mostly;
15a11c5b 67979@@ -281,7 +281,7 @@ static void profile_flip_buffers(void)
8308f9c9
MT
67980 hits[i].pc = 0;
67981 continue;
67982 }
67983- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67984+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67985 hits[i].hits = hits[i].pc = 0;
67986 }
67987 }
fe2de317 67988@@ -342,9 +342,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
8308f9c9
MT
67989 * Add the current hit(s) and flush the write-queue out
67990 * to the global buffer:
67991 */
67992- atomic_add(nr_hits, &prof_buffer[pc]);
67993+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
67994 for (i = 0; i < NR_PROFILE_HIT; ++i) {
67995- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
67996+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
67997 hits[i].pc = hits[i].hits = 0;
67998 }
67999 out:
fe2de317 68000@@ -419,7 +419,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
15a11c5b
MT
68001 {
68002 unsigned long pc;
8308f9c9
MT
68003 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
68004- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
68005+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
68006 }
68007 #endif /* !CONFIG_SMP */
15a11c5b 68008
fe2de317 68009@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
8308f9c9
MT
68010 return -EFAULT;
68011 buf++; p++; count--; read++;
68012 }
68013- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
68014+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
68015 if (copy_to_user(buf, (void *)pnt, count))
68016 return -EFAULT;
68017 read += count;
fe2de317 68018@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
8308f9c9
MT
68019 }
68020 #endif
68021 profile_discard_flip_buffers();
68022- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
68023+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
68024 return count;
68025 }
68026
fe2de317 68027diff --git a/kernel/ptrace.c b/kernel/ptrace.c
c6e2a6c8 68028index ee8d49b..bd3d790 100644
fe2de317
MT
68029--- a/kernel/ptrace.c
68030+++ b/kernel/ptrace.c
c6e2a6c8
MT
68031@@ -280,7 +280,7 @@ static int ptrace_attach(struct task_struct *task, long request,
68032
6e9df6a3 68033 if (seize)
c6e2a6c8 68034 flags |= PT_SEIZED;
5e856224
MT
68035- if (ns_capable(task_user_ns(task), CAP_SYS_PTRACE))
68036+ if (ns_capable_nolog(task_user_ns(task), CAP_SYS_PTRACE))
c6e2a6c8
MT
68037 flags |= PT_PTRACE_CAP;
68038 task->ptrace = flags;
58c5fc13 68039
c6e2a6c8 68040@@ -487,7 +487,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
ae4e228f
MT
68041 break;
68042 return -EIO;
68043 }
68044- if (copy_to_user(dst, buf, retval))
68045+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
68046 return -EFAULT;
68047 copied += retval;
68048 src += retval;
c6e2a6c8 68049@@ -672,7 +672,7 @@ int ptrace_request(struct task_struct *child, long request,
6e9df6a3 68050 bool seized = child->ptrace & PT_SEIZED;
bc901d79 68051 int ret = -EIO;
6e9df6a3 68052 siginfo_t siginfo, *si;
bc901d79
MT
68053- void __user *datavp = (void __user *) data;
68054+ void __user *datavp = (__force void __user *) data;
68055 unsigned long __user *datalp = datavp;
6e9df6a3 68056 unsigned long flags;
ae4e228f 68057
c6e2a6c8 68058@@ -874,14 +874,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
ae4e228f
MT
68059 goto out;
68060 }
58c5fc13
MT
68061
68062+ if (gr_handle_ptrace(child, request)) {
68063+ ret = -EPERM;
68064+ goto out_put_task_struct;
68065+ }
68066+
6e9df6a3 68067 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
c6e2a6c8 68068 ret = ptrace_attach(child, request, addr, data);
ae4e228f
MT
68069 /*
68070 * Some architectures need to do book-keeping after
68071 * a ptrace attach.
68072 */
68073- if (!ret)
68074+ if (!ret) {
68075 arch_ptrace_attach(child);
68076+ gr_audit_ptrace(child);
68077+ }
68078 goto out_put_task_struct;
68079 }
58c5fc13 68080
c6e2a6c8 68081@@ -907,7 +914,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
ae4e228f
MT
68082 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
68083 if (copied != sizeof(tmp))
68084 return -EIO;
68085- return put_user(tmp, (unsigned long __user *)data);
68086+ return put_user(tmp, (__force unsigned long __user *)data);
58c5fc13
MT
68087 }
68088
bc901d79 68089 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
c6e2a6c8 68090@@ -1017,14 +1024,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
bc901d79
MT
68091 goto out;
68092 }
68093
68094+ if (gr_handle_ptrace(child, request)) {
68095+ ret = -EPERM;
68096+ goto out_put_task_struct;
68097+ }
68098+
6e9df6a3 68099 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
c6e2a6c8 68100 ret = ptrace_attach(child, request, addr, data);
bc901d79
MT
68101 /*
68102 * Some architectures need to do book-keeping after
68103 * a ptrace attach.
68104 */
68105- if (!ret)
68106+ if (!ret) {
68107 arch_ptrace_attach(child);
68108+ gr_audit_ptrace(child);
68109+ }
68110 goto out_put_task_struct;
68111 }
68112
4c928ab7 68113diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
c6e2a6c8 68114index 37a5444..eec170a 100644
4c928ab7
MT
68115--- a/kernel/rcutiny.c
68116+++ b/kernel/rcutiny.c
68117@@ -46,7 +46,7 @@
68118 struct rcu_ctrlblk;
68119 static void invoke_rcu_callbacks(void);
68120 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
68121-static void rcu_process_callbacks(struct softirq_action *unused);
68122+static void rcu_process_callbacks(void);
68123 static void __call_rcu(struct rcu_head *head,
68124 void (*func)(struct rcu_head *rcu),
68125 struct rcu_ctrlblk *rcp);
c6e2a6c8 68126@@ -307,7 +307,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
5e856224 68127 rcu_is_callbacks_kthread()));
4c928ab7
MT
68128 }
68129
68130-static void rcu_process_callbacks(struct softirq_action *unused)
68131+static void rcu_process_callbacks(void)
68132 {
68133 __rcu_process_callbacks(&rcu_sched_ctrlblk);
68134 __rcu_process_callbacks(&rcu_bh_ctrlblk);
5e856224 68135diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
c6e2a6c8 68136index 22ecea0..3789898 100644
5e856224
MT
68137--- a/kernel/rcutiny_plugin.h
68138+++ b/kernel/rcutiny_plugin.h
c6e2a6c8 68139@@ -955,7 +955,7 @@ static int rcu_kthread(void *arg)
5e856224
MT
68140 have_rcu_kthread_work = morework;
68141 local_irq_restore(flags);
68142 if (work)
68143- rcu_process_callbacks(NULL);
68144+ rcu_process_callbacks();
68145 schedule_timeout_interruptible(1); /* Leave CPU for others. */
68146 }
68147
fe2de317 68148diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
c6e2a6c8 68149index a89b381..efdcad8 100644
fe2de317
MT
68150--- a/kernel/rcutorture.c
68151+++ b/kernel/rcutorture.c
c6e2a6c8 68152@@ -158,12 +158,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
8308f9c9
MT
68153 { 0 };
68154 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
68155 { 0 };
68156-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68157-static atomic_t n_rcu_torture_alloc;
68158-static atomic_t n_rcu_torture_alloc_fail;
68159-static atomic_t n_rcu_torture_free;
68160-static atomic_t n_rcu_torture_mberror;
68161-static atomic_t n_rcu_torture_error;
68162+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
68163+static atomic_unchecked_t n_rcu_torture_alloc;
68164+static atomic_unchecked_t n_rcu_torture_alloc_fail;
68165+static atomic_unchecked_t n_rcu_torture_free;
68166+static atomic_unchecked_t n_rcu_torture_mberror;
68167+static atomic_unchecked_t n_rcu_torture_error;
68168 static long n_rcu_torture_boost_ktrerror;
68169 static long n_rcu_torture_boost_rterror;
15a11c5b 68170 static long n_rcu_torture_boost_failure;
c6e2a6c8 68171@@ -253,11 +253,11 @@ rcu_torture_alloc(void)
8308f9c9
MT
68172
68173 spin_lock_bh(&rcu_torture_lock);
68174 if (list_empty(&rcu_torture_freelist)) {
68175- atomic_inc(&n_rcu_torture_alloc_fail);
68176+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
68177 spin_unlock_bh(&rcu_torture_lock);
68178 return NULL;
68179 }
68180- atomic_inc(&n_rcu_torture_alloc);
68181+ atomic_inc_unchecked(&n_rcu_torture_alloc);
68182 p = rcu_torture_freelist.next;
68183 list_del_init(p);
68184 spin_unlock_bh(&rcu_torture_lock);
c6e2a6c8 68185@@ -270,7 +270,7 @@ rcu_torture_alloc(void)
8308f9c9
MT
68186 static void
68187 rcu_torture_free(struct rcu_torture *p)
68188 {
68189- atomic_inc(&n_rcu_torture_free);
68190+ atomic_inc_unchecked(&n_rcu_torture_free);
68191 spin_lock_bh(&rcu_torture_lock);
68192 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
68193 spin_unlock_bh(&rcu_torture_lock);
c6e2a6c8 68194@@ -390,7 +390,7 @@ rcu_torture_cb(struct rcu_head *p)
8308f9c9
MT
68195 i = rp->rtort_pipe_count;
68196 if (i > RCU_TORTURE_PIPE_LEN)
68197 i = RCU_TORTURE_PIPE_LEN;
68198- atomic_inc(&rcu_torture_wcount[i]);
68199+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
68200 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68201 rp->rtort_mbtest = 0;
68202 rcu_torture_free(rp);
c6e2a6c8 68203@@ -437,7 +437,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
8308f9c9
MT
68204 i = rp->rtort_pipe_count;
68205 if (i > RCU_TORTURE_PIPE_LEN)
68206 i = RCU_TORTURE_PIPE_LEN;
68207- atomic_inc(&rcu_torture_wcount[i]);
68208+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
68209 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
68210 rp->rtort_mbtest = 0;
68211 list_del(&rp->rtort_free);
c6e2a6c8 68212@@ -926,7 +926,7 @@ rcu_torture_writer(void *arg)
8308f9c9
MT
68213 i = old_rp->rtort_pipe_count;
68214 if (i > RCU_TORTURE_PIPE_LEN)
68215 i = RCU_TORTURE_PIPE_LEN;
68216- atomic_inc(&rcu_torture_wcount[i]);
68217+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
68218 old_rp->rtort_pipe_count++;
68219 cur_ops->deferred_free(old_rp);
68220 }
c6e2a6c8 68221@@ -1007,7 +1007,7 @@ static void rcu_torture_timer(unsigned long unused)
8308f9c9 68222 }
c6e2a6c8 68223 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
8308f9c9
MT
68224 if (p->rtort_mbtest == 0)
68225- atomic_inc(&n_rcu_torture_mberror);
68226+ atomic_inc_unchecked(&n_rcu_torture_mberror);
68227 spin_lock(&rand_lock);
68228 cur_ops->read_delay(&rand);
68229 n_rcu_torture_timers++;
c6e2a6c8 68230@@ -1071,7 +1071,7 @@ rcu_torture_reader(void *arg)
8308f9c9 68231 }
c6e2a6c8 68232 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
8308f9c9
MT
68233 if (p->rtort_mbtest == 0)
68234- atomic_inc(&n_rcu_torture_mberror);
68235+ atomic_inc_unchecked(&n_rcu_torture_mberror);
68236 cur_ops->read_delay(&rand);
68237 preempt_disable();
68238 pipe_count = p->rtort_pipe_count;
c6e2a6c8 68239@@ -1133,10 +1133,10 @@ rcu_torture_printk(char *page)
8308f9c9
MT
68240 rcu_torture_current,
68241 rcu_torture_current_version,
68242 list_empty(&rcu_torture_freelist),
68243- atomic_read(&n_rcu_torture_alloc),
68244- atomic_read(&n_rcu_torture_alloc_fail),
68245- atomic_read(&n_rcu_torture_free),
68246- atomic_read(&n_rcu_torture_mberror),
68247+ atomic_read_unchecked(&n_rcu_torture_alloc),
68248+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
68249+ atomic_read_unchecked(&n_rcu_torture_free),
68250+ atomic_read_unchecked(&n_rcu_torture_mberror),
68251 n_rcu_torture_boost_ktrerror,
68252 n_rcu_torture_boost_rterror,
8308f9c9 68253 n_rcu_torture_boost_failure,
c6e2a6c8 68254@@ -1146,7 +1146,7 @@ rcu_torture_printk(char *page)
5e856224
MT
68255 n_online_attempts,
68256 n_offline_successes,
68257 n_offline_attempts);
8308f9c9
MT
68258- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
68259+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
68260 n_rcu_torture_boost_ktrerror != 0 ||
68261 n_rcu_torture_boost_rterror != 0 ||
15a11c5b 68262 n_rcu_torture_boost_failure != 0)
c6e2a6c8 68263@@ -1154,7 +1154,7 @@ rcu_torture_printk(char *page)
8308f9c9
MT
68264 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
68265 if (i > 1) {
68266 cnt += sprintf(&page[cnt], "!!! ");
68267- atomic_inc(&n_rcu_torture_error);
68268+ atomic_inc_unchecked(&n_rcu_torture_error);
68269 WARN_ON_ONCE(1);
68270 }
68271 cnt += sprintf(&page[cnt], "Reader Pipe: ");
c6e2a6c8 68272@@ -1168,7 +1168,7 @@ rcu_torture_printk(char *page)
8308f9c9
MT
68273 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
68274 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68275 cnt += sprintf(&page[cnt], " %d",
68276- atomic_read(&rcu_torture_wcount[i]));
68277+ atomic_read_unchecked(&rcu_torture_wcount[i]));
68278 }
68279 cnt += sprintf(&page[cnt], "\n");
68280 if (cur_ops->stats)
c6e2a6c8 68281@@ -1676,7 +1676,7 @@ rcu_torture_cleanup(void)
8308f9c9
MT
68282
68283 if (cur_ops->cleanup)
68284 cur_ops->cleanup();
68285- if (atomic_read(&n_rcu_torture_error))
68286+ if (atomic_read_unchecked(&n_rcu_torture_error))
68287 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
c6e2a6c8
MT
68288 else if (n_online_successes != n_online_attempts ||
68289 n_offline_successes != n_offline_attempts)
68290@@ -1744,17 +1744,17 @@ rcu_torture_init(void)
8308f9c9
MT
68291
68292 rcu_torture_current = NULL;
68293 rcu_torture_current_version = 0;
68294- atomic_set(&n_rcu_torture_alloc, 0);
68295- atomic_set(&n_rcu_torture_alloc_fail, 0);
68296- atomic_set(&n_rcu_torture_free, 0);
68297- atomic_set(&n_rcu_torture_mberror, 0);
68298- atomic_set(&n_rcu_torture_error, 0);
68299+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
68300+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
68301+ atomic_set_unchecked(&n_rcu_torture_free, 0);
68302+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
68303+ atomic_set_unchecked(&n_rcu_torture_error, 0);
68304 n_rcu_torture_boost_ktrerror = 0;
68305 n_rcu_torture_boost_rterror = 0;
8308f9c9
MT
68306 n_rcu_torture_boost_failure = 0;
68307 n_rcu_torture_boosts = 0;
68308 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
68309- atomic_set(&rcu_torture_wcount[i], 0);
68310+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
68311 for_each_possible_cpu(cpu) {
68312 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
68313 per_cpu(rcu_torture_count, cpu)[i] = 0;
fe2de317 68314diff --git a/kernel/rcutree.c b/kernel/rcutree.c
c6e2a6c8 68315index d0c5baf..109b2e7 100644
fe2de317
MT
68316--- a/kernel/rcutree.c
68317+++ b/kernel/rcutree.c
c6e2a6c8 68318@@ -357,9 +357,9 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
5e856224 68319 rcu_prepare_for_idle(smp_processor_id());
15a11c5b
MT
68320 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68321 smp_mb__before_atomic_inc(); /* See above. */
68322- atomic_inc(&rdtp->dynticks);
68323+ atomic_inc_unchecked(&rdtp->dynticks);
68324 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
68325- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68326+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
15a11c5b 68327
c6e2a6c8
MT
68328 /*
68329 * The idle task is not permitted to enter the idle loop while
68330@@ -448,10 +448,10 @@ void rcu_irq_exit(void)
5e856224
MT
68331 static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
68332 {
15a11c5b
MT
68333 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
68334- atomic_inc(&rdtp->dynticks);
68335+ atomic_inc_unchecked(&rdtp->dynticks);
68336 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68337 smp_mb__after_atomic_inc(); /* See above. */
68338- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68339+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
5e856224
MT
68340 rcu_cleanup_after_idle(smp_processor_id());
68341 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
68342 if (!is_idle_task(current)) {
c6e2a6c8 68343@@ -545,14 +545,14 @@ void rcu_nmi_enter(void)
15a11c5b
MT
68344 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
68345
68346 if (rdtp->dynticks_nmi_nesting == 0 &&
68347- (atomic_read(&rdtp->dynticks) & 0x1))
68348+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
68349 return;
68350 rdtp->dynticks_nmi_nesting++;
68351 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
68352- atomic_inc(&rdtp->dynticks);
68353+ atomic_inc_unchecked(&rdtp->dynticks);
68354 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
68355 smp_mb__after_atomic_inc(); /* See above. */
68356- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
68357+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
68358 }
68359
68360 /**
c6e2a6c8 68361@@ -571,9 +571,9 @@ void rcu_nmi_exit(void)
15a11c5b
MT
68362 return;
68363 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
68364 smp_mb__before_atomic_inc(); /* See above. */
68365- atomic_inc(&rdtp->dynticks);
68366+ atomic_inc_unchecked(&rdtp->dynticks);
68367 smp_mb__after_atomic_inc(); /* Force delay to next write. */
68368- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
68369+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
68370 }
68371
5e856224 68372 #ifdef CONFIG_PROVE_RCU
c6e2a6c8 68373@@ -589,7 +589,7 @@ int rcu_is_cpu_idle(void)
5e856224
MT
68374 int ret;
68375
68376 preempt_disable();
68377- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68378+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
68379 preempt_enable();
68380 return ret;
68381 }
c6e2a6c8 68382@@ -659,7 +659,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
15a11c5b
MT
68383 */
68384 static int dyntick_save_progress_counter(struct rcu_data *rdp)
68385 {
68386- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
68387+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
5e856224 68388 return (rdp->dynticks_snap & 0x1) == 0;
15a11c5b
MT
68389 }
68390
c6e2a6c8 68391@@ -674,7 +674,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
4c928ab7
MT
68392 unsigned int curr;
68393 unsigned int snap;
15a11c5b 68394
4c928ab7
MT
68395- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
68396+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
68397 snap = (unsigned int)rdp->dynticks_snap;
15a11c5b
MT
68398
68399 /*
c6e2a6c8
MT
68400@@ -704,10 +704,10 @@ static int jiffies_till_stall_check(void)
68401 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
68402 */
68403 if (till_stall_check < 3) {
68404- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
68405+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
68406 till_stall_check = 3;
68407 } else if (till_stall_check > 300) {
68408- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
68409+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
68410 till_stall_check = 300;
68411 }
68412 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
68413@@ -1766,7 +1766,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
58c5fc13 68414 /*
4c928ab7 68415 * Do RCU core processing for the current CPU.
58c5fc13 68416 */
ae4e228f
MT
68417-static void rcu_process_callbacks(struct softirq_action *unused)
68418+static void rcu_process_callbacks(void)
68419 {
4c928ab7 68420 trace_rcu_utilization("Start RCU core");
15a11c5b 68421 __rcu_process_callbacks(&rcu_sched_state,
c6e2a6c8
MT
68422@@ -1949,8 +1949,8 @@ void synchronize_rcu_bh(void)
68423 }
68424 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
8308f9c9
MT
68425
68426-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
68427-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
68428+static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
68429+static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
68430
68431 static int synchronize_sched_expedited_cpu_stop(void *data)
68432 {
c6e2a6c8 68433@@ -2011,7 +2011,7 @@ void synchronize_sched_expedited(void)
8308f9c9
MT
68434 int firstsnap, s, snap, trycount = 0;
68435
68436 /* Note that atomic_inc_return() implies full memory barrier. */
68437- firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
68438+ firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
68439 get_online_cpus();
c6e2a6c8 68440 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
8308f9c9 68441
c6e2a6c8 68442@@ -2033,7 +2033,7 @@ void synchronize_sched_expedited(void)
8308f9c9
MT
68443 }
68444
68445 /* Check to see if someone else did our work for us. */
68446- s = atomic_read(&sync_sched_expedited_done);
68447+ s = atomic_read_unchecked(&sync_sched_expedited_done);
68448 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
68449 smp_mb(); /* ensure test happens before caller kfree */
68450 return;
c6e2a6c8 68451@@ -2048,7 +2048,7 @@ void synchronize_sched_expedited(void)
8308f9c9
MT
68452 * grace period works for us.
68453 */
68454 get_online_cpus();
5e856224
MT
68455- snap = atomic_read(&sync_sched_expedited_started);
68456+ snap = atomic_read_unchecked(&sync_sched_expedited_started);
8308f9c9
MT
68457 smp_mb(); /* ensure read is before try_stop_cpus(). */
68458 }
68459
c6e2a6c8 68460@@ -2059,12 +2059,12 @@ void synchronize_sched_expedited(void)
8308f9c9
MT
68461 * than we did beat us to the punch.
68462 */
68463 do {
68464- s = atomic_read(&sync_sched_expedited_done);
68465+ s = atomic_read_unchecked(&sync_sched_expedited_done);
68466 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
68467 smp_mb(); /* ensure test happens before caller kfree */
68468 break;
68469 }
68470- } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
68471+ } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
68472
68473 put_online_cpus();
68474 }
c6e2a6c8
MT
68475@@ -2262,7 +2262,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
68476 rdp->qlen = 0;
68477 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
68478 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
68479- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
68480+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
68481 rdp->cpu = cpu;
68482 rdp->rsp = rsp;
68483 raw_spin_unlock_irqrestore(&rnp->lock, flags);
68484@@ -2290,8 +2290,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
68485 rdp->n_force_qs_snap = rsp->n_force_qs;
68486 rdp->blimit = blimit;
68487 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
68488- atomic_set(&rdp->dynticks->dynticks,
68489- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
68490+ atomic_set_unchecked(&rdp->dynticks->dynticks,
68491+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
68492 rcu_prepare_for_idle_init(cpu);
68493 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
68494
68495diff --git a/kernel/rcutree.h b/kernel/rcutree.h
68496index cdd1be0..5b2efb4 100644
68497--- a/kernel/rcutree.h
68498+++ b/kernel/rcutree.h
68499@@ -87,7 +87,7 @@ struct rcu_dynticks {
68500 long long dynticks_nesting; /* Track irq/process nesting level. */
68501 /* Process level is worth LLONG_MAX/2. */
68502 int dynticks_nmi_nesting; /* Track NMI nesting level. */
68503- atomic_t dynticks; /* Even value for idle, else odd. */
68504+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
68505 };
68506
68507 /* RCU's kthread states for tracing. */
68508diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
68509index c023464..7f57225 100644
68510--- a/kernel/rcutree_plugin.h
68511+++ b/kernel/rcutree_plugin.h
68512@@ -909,7 +909,7 @@ void synchronize_rcu_expedited(void)
68513
68514 /* Clean up and exit. */
68515 smp_mb(); /* ensure expedited GP seen before counter increment. */
68516- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
68517+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
68518 unlock_mb_ret:
68519 mutex_unlock(&sync_rcu_preempt_exp_mutex);
68520 mb_ret:
fe2de317 68521diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
c6e2a6c8 68522index ed459ed..a03c3fa 100644
fe2de317
MT
68523--- a/kernel/rcutree_trace.c
68524+++ b/kernel/rcutree_trace.c
5e856224
MT
68525@@ -68,7 +68,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
68526 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
fe2de317 68527 rdp->qs_pending);
5e856224 68528 seq_printf(m, " dt=%d/%llx/%d df=%lu",
fe2de317
MT
68529- atomic_read(&rdp->dynticks->dynticks),
68530+ atomic_read_unchecked(&rdp->dynticks->dynticks),
68531 rdp->dynticks->dynticks_nesting,
68532 rdp->dynticks->dynticks_nmi_nesting,
68533 rdp->dynticks_fqs);
5e856224
MT
68534@@ -140,7 +140,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
68535 rdp->passed_quiesce, rdp->passed_quiesce_gpnum,
fe2de317 68536 rdp->qs_pending);
5e856224 68537 seq_printf(m, ",%d,%llx,%d,%lu",
fe2de317
MT
68538- atomic_read(&rdp->dynticks->dynticks),
68539+ atomic_read_unchecked(&rdp->dynticks->dynticks),
68540 rdp->dynticks->dynticks_nesting,
68541 rdp->dynticks->dynticks_nmi_nesting,
68542 rdp->dynticks_fqs);
fe2de317 68543diff --git a/kernel/resource.c b/kernel/resource.c
c6e2a6c8 68544index 7e8ea66..1efd11f 100644
fe2de317
MT
68545--- a/kernel/resource.c
68546+++ b/kernel/resource.c
68547@@ -141,8 +141,18 @@ static const struct file_operations proc_iomem_operations = {
58c5fc13
MT
68548
68549 static int __init ioresources_init(void)
68550 {
68551+#ifdef CONFIG_GRKERNSEC_PROC_ADD
68552+#ifdef CONFIG_GRKERNSEC_PROC_USER
68553+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
68554+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
68555+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68556+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
68557+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
68558+#endif
68559+#else
68560 proc_create("ioports", 0, NULL, &proc_ioports_operations);
68561 proc_create("iomem", 0, NULL, &proc_iomem_operations);
68562+#endif
68563 return 0;
68564 }
68565 __initcall(ioresources_init);
fe2de317 68566diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
5e856224 68567index 98ec494..4241d6d 100644
fe2de317
MT
68568--- a/kernel/rtmutex-tester.c
68569+++ b/kernel/rtmutex-tester.c
66a7e928 68570@@ -20,7 +20,7 @@
8308f9c9
MT
68571 #define MAX_RT_TEST_MUTEXES 8
68572
68573 static spinlock_t rttest_lock;
68574-static atomic_t rttest_event;
68575+static atomic_unchecked_t rttest_event;
68576
68577 struct test_thread_data {
68578 int opcode;
fe2de317 68579@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
68580
68581 case RTTEST_LOCKCONT:
68582 td->mutexes[td->opdata] = 1;
68583- td->event = atomic_add_return(1, &rttest_event);
68584+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68585 return 0;
68586
68587 case RTTEST_RESET:
fe2de317 68588@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
68589 return 0;
68590
68591 case RTTEST_RESETEVENT:
68592- atomic_set(&rttest_event, 0);
68593+ atomic_set_unchecked(&rttest_event, 0);
68594 return 0;
68595
68596 default:
fe2de317 68597@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
68598 return ret;
68599
68600 td->mutexes[id] = 1;
68601- td->event = atomic_add_return(1, &rttest_event);
68602+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68603 rt_mutex_lock(&mutexes[id]);
68604- td->event = atomic_add_return(1, &rttest_event);
68605+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68606 td->mutexes[id] = 4;
68607 return 0;
68608
fe2de317 68609@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
68610 return ret;
68611
68612 td->mutexes[id] = 1;
68613- td->event = atomic_add_return(1, &rttest_event);
68614+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68615 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
68616- td->event = atomic_add_return(1, &rttest_event);
68617+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68618 td->mutexes[id] = ret ? 0 : 4;
68619 return ret ? -EINTR : 0;
68620
fe2de317 68621@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
8308f9c9
MT
68622 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
68623 return ret;
68624
68625- td->event = atomic_add_return(1, &rttest_event);
68626+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68627 rt_mutex_unlock(&mutexes[id]);
68628- td->event = atomic_add_return(1, &rttest_event);
68629+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68630 td->mutexes[id] = 0;
68631 return 0;
68632
fe2de317 68633@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
8308f9c9
MT
68634 break;
68635
68636 td->mutexes[dat] = 2;
68637- td->event = atomic_add_return(1, &rttest_event);
68638+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68639 break;
68640
66a7e928 68641 default:
fe2de317 68642@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
8308f9c9
MT
68643 return;
68644
68645 td->mutexes[dat] = 3;
68646- td->event = atomic_add_return(1, &rttest_event);
68647+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68648 break;
68649
68650 case RTTEST_LOCKNOWAIT:
fe2de317 68651@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
8308f9c9
MT
68652 return;
68653
68654 td->mutexes[dat] = 1;
68655- td->event = atomic_add_return(1, &rttest_event);
68656+ td->event = atomic_add_return_unchecked(1, &rttest_event);
68657 return;
68658
66a7e928 68659 default:
5e856224 68660diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
c6e2a6c8 68661index 0984a21..939f183 100644
5e856224
MT
68662--- a/kernel/sched/auto_group.c
68663+++ b/kernel/sched/auto_group.c
68664@@ -11,7 +11,7 @@
4c928ab7 68665
5e856224
MT
68666 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
68667 static struct autogroup autogroup_default;
68668-static atomic_t autogroup_seq_nr;
68669+static atomic_unchecked_t autogroup_seq_nr;
66a7e928 68670
5e856224
MT
68671 void __init autogroup_init(struct task_struct *init_task)
68672 {
68673@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
4c928ab7 68674
5e856224
MT
68675 kref_init(&ag->kref);
68676 init_rwsem(&ag->lock);
68677- ag->id = atomic_inc_return(&autogroup_seq_nr);
68678+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
68679 ag->tg = tg;
68680 #ifdef CONFIG_RT_GROUP_SCHED
68681 /*
68682diff --git a/kernel/sched/core.c b/kernel/sched/core.c
572b4308 68683index 817bf70..9099fb4 100644
5e856224
MT
68684--- a/kernel/sched/core.c
68685+++ b/kernel/sched/core.c
572b4308 68686@@ -4038,6 +4038,8 @@ int can_nice(const struct task_struct *p, const int nice)
58c5fc13
MT
68687 /* convert nice value [19,-20] to rlimit style value [1,40] */
68688 int nice_rlim = 20 - nice;
68689
68690+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
68691+
df50ba0c 68692 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
58c5fc13
MT
68693 capable(CAP_SYS_NICE));
68694 }
572b4308 68695@@ -4071,7 +4073,8 @@ SYSCALL_DEFINE1(nice, int, increment)
58c5fc13
MT
68696 if (nice > 19)
68697 nice = 19;
68698
68699- if (increment < 0 && !can_nice(current, nice))
68700+ if (increment < 0 && (!can_nice(current, nice) ||
68701+ gr_handle_chroot_nice()))
68702 return -EPERM;
68703
68704 retval = security_task_setnice(current, nice);
572b4308 68705@@ -4228,6 +4231,7 @@ recheck:
6892158b
MT
68706 unsigned long rlim_rtprio =
68707 task_rlimit(p, RLIMIT_RTPRIO);
58c5fc13 68708
6892158b 68709+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
df50ba0c
MT
68710 /* can't set/change the rt policy */
68711 if (policy != p->policy && !rlim_rtprio)
68712 return -EPERM;
5e856224 68713diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
c6e2a6c8 68714index e955364..eacd2a4 100644
5e856224
MT
68715--- a/kernel/sched/fair.c
68716+++ b/kernel/sched/fair.c
c6e2a6c8 68717@@ -5107,7 +5107,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
6892158b
MT
68718 * run_rebalance_domains is triggered when needed from the scheduler tick.
68719 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
df50ba0c
MT
68720 */
68721-static void run_rebalance_domains(struct softirq_action *h)
68722+static void run_rebalance_domains(void)
68723 {
68724 int this_cpu = smp_processor_id();
68725 struct rq *this_rq = cpu_rq(this_cpu);
fe2de317 68726diff --git a/kernel/signal.c b/kernel/signal.c
c6e2a6c8 68727index 17afcaf..4500b05 100644
fe2de317
MT
68728--- a/kernel/signal.c
68729+++ b/kernel/signal.c
c6e2a6c8 68730@@ -47,12 +47,12 @@ static struct kmem_cache *sigqueue_cachep;
df50ba0c
MT
68731
68732 int print_fatal_signals __read_mostly;
68733
68734-static void __user *sig_handler(struct task_struct *t, int sig)
68735+static __sighandler_t sig_handler(struct task_struct *t, int sig)
68736 {
68737 return t->sighand->action[sig - 1].sa.sa_handler;
68738 }
68739
68740-static int sig_handler_ignored(void __user *handler, int sig)
68741+static int sig_handler_ignored(__sighandler_t handler, int sig)
68742 {
68743 /* Is it explicitly or implicitly ignored? */
68744 return handler == SIG_IGN ||
5e856224 68745@@ -61,7 +61,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
c6e2a6c8
MT
68746
68747 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
df50ba0c
MT
68748 {
68749- void __user *handler;
68750+ __sighandler_t handler;
68751
68752 handler = sig_handler(t, sig);
68753
5e856224 68754@@ -365,6 +365,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
58c5fc13 68755 atomic_inc(&user->sigpending);
ae4e228f
MT
68756 rcu_read_unlock();
68757
58c5fc13
MT
68758+ if (!override_rlimit)
68759+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
ae4e228f 68760+
58c5fc13
MT
68761 if (override_rlimit ||
68762 atomic_read(&user->sigpending) <=
df50ba0c 68763 task_rlimit(t, RLIMIT_SIGPENDING)) {
5e856224 68764@@ -489,7 +492,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
df50ba0c
MT
68765
68766 int unhandled_signal(struct task_struct *tsk, int sig)
68767 {
68768- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
68769+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
68770 if (is_global_init(tsk))
68771 return 1;
68772 if (handler != SIG_IGN && handler != SIG_DFL)
5e856224 68773@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
58c5fc13
MT
68774 }
68775 }
68776
15a11c5b
MT
68777+ /* allow glibc communication via tgkill to other threads in our
68778+ thread group */
68779+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
68780+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
68781+ && gr_handle_signal(t, sig))
58c5fc13
MT
68782+ return -EPERM;
68783+
68784 return security_task_kill(t, info, sig, 0);
68785 }
68786
c6e2a6c8 68787@@ -1204,7 +1214,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
58c5fc13
MT
68788 return send_signal(sig, info, p, 1);
68789 }
68790
68791-static int
68792+int
68793 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
68794 {
68795 return send_signal(sig, info, t, 0);
c6e2a6c8 68796@@ -1241,6 +1251,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
c52201e0
MT
68797 unsigned long int flags;
68798 int ret, blocked, ignored;
68799 struct k_sigaction *action;
68800+ int is_unhandled = 0;
68801
68802 spin_lock_irqsave(&t->sighand->siglock, flags);
68803 action = &t->sighand->action[sig-1];
c6e2a6c8 68804@@ -1255,9 +1266,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
c52201e0
MT
68805 }
68806 if (action->sa.sa_handler == SIG_DFL)
68807 t->signal->flags &= ~SIGNAL_UNKILLABLE;
68808+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
68809+ is_unhandled = 1;
58c5fc13
MT
68810 ret = specific_send_sig_info(sig, info, t);
68811 spin_unlock_irqrestore(&t->sighand->siglock, flags);
68812
c52201e0
MT
68813+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
68814+ normal operation */
68815+ if (is_unhandled) {
68816+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
68817+ gr_handle_crash(t, sig);
68818+ }
58c5fc13
MT
68819+
68820 return ret;
68821 }
68822
c6e2a6c8 68823@@ -1324,8 +1344,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
57199397
MT
68824 ret = check_kill_permission(sig, info, p);
68825 rcu_read_unlock();
ae4e228f
MT
68826
68827- if (!ret && sig)
68828+ if (!ret && sig) {
68829 ret = do_send_sig_info(sig, info, p, true);
58c5fc13
MT
68830+ if (!ret)
68831+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
ae4e228f 68832+ }
58c5fc13
MT
68833
68834 return ret;
ae4e228f 68835 }
c6e2a6c8 68836@@ -2840,7 +2863,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
15a11c5b
MT
68837 int error = -ESRCH;
68838
68839 rcu_read_lock();
68840- p = find_task_by_vpid(pid);
68841+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68842+ /* allow glibc communication via tgkill to other threads in our
68843+ thread group */
68844+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
68845+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
68846+ p = find_task_by_vpid_unrestricted(pid);
68847+ else
68848+#endif
68849+ p = find_task_by_vpid(pid);
68850 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
68851 error = check_kill_permission(sig, info, p);
68852 /*
fe2de317 68853diff --git a/kernel/smp.c b/kernel/smp.c
c6e2a6c8 68854index 2f8b10e..a41bc14 100644
fe2de317
MT
68855--- a/kernel/smp.c
68856+++ b/kernel/smp.c
68857@@ -580,22 +580,22 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
ae4e228f
MT
68858 }
68859 EXPORT_SYMBOL(smp_call_function);
68860
68861-void ipi_call_lock(void)
68862+void ipi_call_lock(void) __acquires(call_function.lock)
68863 {
68864 raw_spin_lock(&call_function.lock);
68865 }
68866
68867-void ipi_call_unlock(void)
68868+void ipi_call_unlock(void) __releases(call_function.lock)
68869 {
68870 raw_spin_unlock(&call_function.lock);
68871 }
68872
68873-void ipi_call_lock_irq(void)
68874+void ipi_call_lock_irq(void) __acquires(call_function.lock)
68875 {
68876 raw_spin_lock_irq(&call_function.lock);
68877 }
68878
68879-void ipi_call_unlock_irq(void)
68880+void ipi_call_unlock_irq(void) __releases(call_function.lock)
68881 {
68882 raw_spin_unlock_irq(&call_function.lock);
68883 }
fe2de317 68884diff --git a/kernel/softirq.c b/kernel/softirq.c
c6e2a6c8 68885index 671f959..91c51cb 100644
fe2de317
MT
68886--- a/kernel/softirq.c
68887+++ b/kernel/softirq.c
68888@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
ae4e228f 68889
66a7e928 68890 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
ae4e228f
MT
68891
68892-char *softirq_to_name[NR_SOFTIRQS] = {
68893+const char * const softirq_to_name[NR_SOFTIRQS] = {
68894 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
15a11c5b 68895 "TASKLET", "SCHED", "HRTIMER", "RCU"
ae4e228f 68896 };
bc901d79
MT
68897@@ -235,7 +235,7 @@ restart:
68898 kstat_incr_softirqs_this_cpu(vec_nr);
ae4e228f 68899
bc901d79 68900 trace_softirq_entry(vec_nr);
ae4e228f
MT
68901- h->action(h);
68902+ h->action();
bc901d79 68903 trace_softirq_exit(vec_nr);
ae4e228f 68904 if (unlikely(prev_count != preempt_count())) {
bc901d79 68905 printk(KERN_ERR "huh, entered softirq %u %s %p"
c6e2a6c8
MT
68906@@ -381,9 +381,11 @@ void __raise_softirq_irqoff(unsigned int nr)
68907 or_softirq_pending(1UL << nr);
ae4e228f
MT
68908 }
68909
68910-void open_softirq(int nr, void (*action)(struct softirq_action *))
68911+void open_softirq(int nr, void (*action)(void))
68912 {
15a11c5b
MT
68913- softirq_vec[nr].action = action;
68914+ pax_open_kernel();
68915+ *(void **)&softirq_vec[nr].action = action;
68916+ pax_close_kernel();
ae4e228f 68917 }
15a11c5b
MT
68918
68919 /*
c6e2a6c8 68920@@ -437,7 +439,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
ae4e228f
MT
68921
68922 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
68923
68924-static void tasklet_action(struct softirq_action *a)
68925+static void tasklet_action(void)
68926 {
68927 struct tasklet_struct *list;
68928
c6e2a6c8 68929@@ -472,7 +474,7 @@ static void tasklet_action(struct softirq_action *a)
ae4e228f
MT
68930 }
68931 }
68932
68933-static void tasklet_hi_action(struct softirq_action *a)
68934+static void tasklet_hi_action(void)
68935 {
68936 struct tasklet_struct *list;
68937
fe2de317 68938diff --git a/kernel/sys.c b/kernel/sys.c
c6e2a6c8 68939index e7006eb..8fb7c51 100644
fe2de317
MT
68940--- a/kernel/sys.c
68941+++ b/kernel/sys.c
4c928ab7 68942@@ -158,6 +158,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
58c5fc13
MT
68943 error = -EACCES;
68944 goto out;
68945 }
68946+
68947+ if (gr_handle_chroot_setpriority(p, niceval)) {
68948+ error = -EACCES;
68949+ goto out;
68950+ }
68951+
68952 no_nice = security_task_setnice(p, niceval);
68953 if (no_nice) {
68954 error = no_nice;
c6e2a6c8 68955@@ -581,6 +587,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
58c5fc13
MT
68956 goto error;
68957 }
68958
68959+ if (gr_check_group_change(new->gid, new->egid, -1))
68960+ goto error;
68961+
68962 if (rgid != (gid_t) -1 ||
68963 (egid != (gid_t) -1 && egid != old->gid))
68964 new->sgid = new->egid;
c6e2a6c8 68965@@ -610,6 +619,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
57199397 68966 old = current_cred();
58c5fc13
MT
68967
68968 retval = -EPERM;
68969+
68970+ if (gr_check_group_change(gid, gid, gid))
68971+ goto error;
68972+
66a7e928 68973 if (nsown_capable(CAP_SETGID))
58c5fc13
MT
68974 new->gid = new->egid = new->sgid = new->fsgid = gid;
68975 else if (gid == old->gid || gid == old->sgid)
c6e2a6c8 68976@@ -627,7 +640,7 @@ error:
4c928ab7
MT
68977 /*
68978 * change the user struct in a credentials set to match the new UID
68979 */
68980-static int set_user(struct cred *new)
68981+int set_user(struct cred *new)
68982 {
68983 struct user_struct *new_user;
68984
c6e2a6c8 68985@@ -697,6 +710,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
58c5fc13
MT
68986 goto error;
68987 }
68988
68989+ if (gr_check_user_change(new->uid, new->euid, -1))
68990+ goto error;
68991+
68992 if (new->uid != old->uid) {
68993 retval = set_user(new);
68994 if (retval < 0)
c6e2a6c8 68995@@ -741,6 +757,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
57199397 68996 old = current_cred();
58c5fc13
MT
68997
68998 retval = -EPERM;
68999+
69000+ if (gr_check_crash_uid(uid))
69001+ goto error;
69002+ if (gr_check_user_change(uid, uid, uid))
69003+ goto error;
69004+
66a7e928 69005 if (nsown_capable(CAP_SETUID)) {
58c5fc13
MT
69006 new->suid = new->uid = uid;
69007 if (uid != old->uid) {
c6e2a6c8 69008@@ -795,6 +817,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
58c5fc13
MT
69009 goto error;
69010 }
69011
69012+ if (gr_check_user_change(ruid, euid, -1))
69013+ goto error;
69014+
69015 if (ruid != (uid_t) -1) {
69016 new->uid = ruid;
69017 if (ruid != old->uid) {
c6e2a6c8 69018@@ -859,6 +884,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
58c5fc13
MT
69019 goto error;
69020 }
69021
69022+ if (gr_check_group_change(rgid, egid, -1))
69023+ goto error;
69024+
69025 if (rgid != (gid_t) -1)
69026 new->gid = rgid;
69027 if (egid != (gid_t) -1)
c6e2a6c8 69028@@ -905,6 +933,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
57199397
MT
69029 old = current_cred();
69030 old_fsuid = old->fsuid;
58c5fc13
MT
69031
69032+ if (gr_check_user_change(-1, -1, uid))
69033+ goto error;
69034+
69035 if (uid == old->uid || uid == old->euid ||
69036 uid == old->suid || uid == old->fsuid ||
66a7e928 69037 nsown_capable(CAP_SETUID)) {
c6e2a6c8 69038@@ -915,6 +946,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
57199397
MT
69039 }
69040 }
69041
69042+error:
69043 abort_creds(new);
69044 return old_fsuid;
69045
c6e2a6c8 69046@@ -941,12 +973,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
58c5fc13
MT
69047 if (gid == old->gid || gid == old->egid ||
69048 gid == old->sgid || gid == old->fsgid ||
66a7e928 69049 nsown_capable(CAP_SETGID)) {
58c5fc13
MT
69050+ if (gr_check_group_change(-1, -1, gid))
69051+ goto error;
69052+
69053 if (gid != old_fsgid) {
69054 new->fsgid = gid;
69055 goto change_okay;
57199397
MT
69056 }
69057 }
69058
69059+error:
69060 abort_creds(new);
69061 return old_fsgid;
69062
c6e2a6c8 69063@@ -1198,7 +1234,10 @@ static int override_release(char __user *release, int len)
fe2de317
MT
69064 }
69065 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
69066 snprintf(buf, len, "2.6.%u%s", v, rest);
69067- ret = copy_to_user(release, buf, len);
69068+ if (len > sizeof(buf))
69069+ ret = -EFAULT;
69070+ else
69071+ ret = copy_to_user(release, buf, len);
69072 }
69073 return ret;
69074 }
c6e2a6c8 69075@@ -1252,19 +1291,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
6e9df6a3
MT
69076 return -EFAULT;
69077
69078 down_read(&uts_sem);
69079- error = __copy_to_user(&name->sysname, &utsname()->sysname,
69080+ error = __copy_to_user(name->sysname, &utsname()->sysname,
69081 __OLD_UTS_LEN);
69082 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
69083- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
69084+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
69085 __OLD_UTS_LEN);
69086 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
69087- error |= __copy_to_user(&name->release, &utsname()->release,
69088+ error |= __copy_to_user(name->release, &utsname()->release,
69089 __OLD_UTS_LEN);
69090 error |= __put_user(0, name->release + __OLD_UTS_LEN);
69091- error |= __copy_to_user(&name->version, &utsname()->version,
69092+ error |= __copy_to_user(name->version, &utsname()->version,
69093 __OLD_UTS_LEN);
69094 error |= __put_user(0, name->version + __OLD_UTS_LEN);
69095- error |= __copy_to_user(&name->machine, &utsname()->machine,
69096+ error |= __copy_to_user(name->machine, &utsname()->machine,
69097 __OLD_UTS_LEN);
69098 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
69099 up_read(&uts_sem);
c6e2a6c8 69100@@ -1847,7 +1886,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
58c5fc13
MT
69101 error = get_dumpable(me->mm);
69102 break;
69103 case PR_SET_DUMPABLE:
69104- if (arg2 < 0 || arg2 > 1) {
69105+ if (arg2 > 1) {
69106 error = -EINVAL;
69107 break;
69108 }
fe2de317 69109diff --git a/kernel/sysctl.c b/kernel/sysctl.c
c6e2a6c8 69110index 4ab1187..0b75ced 100644
fe2de317
MT
69111--- a/kernel/sysctl.c
69112+++ b/kernel/sysctl.c
c6e2a6c8 69113@@ -91,7 +91,6 @@
ae4e228f 69114
58c5fc13
MT
69115
69116 #if defined(CONFIG_SYSCTL)
c6e2a6c8 69117-
58c5fc13 69118 /* External variables not in a header file. */
df50ba0c 69119 extern int sysctl_overcommit_memory;
c6e2a6c8
MT
69120 extern int sysctl_overcommit_ratio;
69121@@ -169,10 +168,8 @@ static int proc_taint(struct ctl_table *table, int write,
69122 void __user *buffer, size_t *lenp, loff_t *ppos);
69123 #endif
69124
69125-#ifdef CONFIG_PRINTK
69126 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
69127 void __user *buffer, size_t *lenp, loff_t *ppos);
69128-#endif
69129
69130 #ifdef CONFIG_MAGIC_SYSRQ
69131 /* Note: sysrq code uses it's own private copy */
69132@@ -196,6 +193,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
57199397 69133
58c5fc13 69134 #endif
58c5fc13 69135
c6e2a6c8
MT
69136+extern struct ctl_table grsecurity_table[];
69137+
69138 static struct ctl_table kern_table[];
69139 static struct ctl_table vm_table[];
69140 static struct ctl_table fs_table[];
69141@@ -210,6 +209,20 @@ extern struct ctl_table epoll_table[];
58c5fc13
MT
69142 int sysctl_legacy_va_layout;
69143 #endif
69144
69145+#ifdef CONFIG_PAX_SOFTMODE
69146+static ctl_table pax_table[] = {
69147+ {
58c5fc13
MT
69148+ .procname = "softmode",
69149+ .data = &pax_softmode,
69150+ .maxlen = sizeof(unsigned int),
69151+ .mode = 0600,
69152+ .proc_handler = &proc_dointvec,
69153+ },
69154+
ae4e228f 69155+ { }
58c5fc13
MT
69156+};
69157+#endif
69158+
df50ba0c 69159 /* The default sysctl tables: */
58c5fc13 69160
c6e2a6c8
MT
69161 static struct ctl_table sysctl_base_table[] = {
69162@@ -256,6 +269,22 @@ static int max_extfrag_threshold = 1000;
58c5fc13
MT
69163 #endif
69164
69165 static struct ctl_table kern_table[] = {
ae4e228f 69166+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
58c5fc13 69167+ {
58c5fc13
MT
69168+ .procname = "grsecurity",
69169+ .mode = 0500,
69170+ .child = grsecurity_table,
69171+ },
69172+#endif
69173+
69174+#ifdef CONFIG_PAX_SOFTMODE
69175+ {
58c5fc13
MT
69176+ .procname = "pax",
69177+ .mode = 0500,
69178+ .child = pax_table,
69179+ },
69180+#endif
69181+
58c5fc13 69182 {
ae4e228f
MT
69183 .procname = "sched_child_runs_first",
69184 .data = &sysctl_sched_child_runs_first,
c6e2a6c8 69185@@ -540,7 +569,7 @@ static struct ctl_table kern_table[] = {
bc901d79
MT
69186 .data = &modprobe_path,
69187 .maxlen = KMOD_PATH_LEN,
69188 .mode = 0644,
69189- .proc_handler = proc_dostring,
69190+ .proc_handler = proc_dostring_modpriv,
69191 },
69192 {
69193 .procname = "modules_disabled",
c6e2a6c8 69194@@ -707,16 +736,20 @@ static struct ctl_table kern_table[] = {
16454cff
MT
69195 .extra1 = &zero,
69196 .extra2 = &one,
69197 },
69198+#endif
69199 {
69200 .procname = "kptr_restrict",
69201 .data = &kptr_restrict,
69202 .maxlen = sizeof(int),
69203 .mode = 0644,
5e856224 69204 .proc_handler = proc_dointvec_minmax_sysadmin,
16454cff
MT
69205+#ifdef CONFIG_GRKERNSEC_HIDESYM
69206+ .extra1 = &two,
69207+#else
69208 .extra1 = &zero,
69209+#endif
69210 .extra2 = &two,
69211 },
69212-#endif
69213 {
69214 .procname = "ngroups_max",
69215 .data = &ngroups_max,
c6e2a6c8 69216@@ -1215,6 +1248,13 @@ static struct ctl_table vm_table[] = {
57199397
MT
69217 .proc_handler = proc_dointvec_minmax,
69218 .extra1 = &zero,
69219 },
69220+ {
69221+ .procname = "heap_stack_gap",
69222+ .data = &sysctl_heap_stack_gap,
69223+ .maxlen = sizeof(sysctl_heap_stack_gap),
69224+ .mode = 0644,
69225+ .proc_handler = proc_doulongvec_minmax,
69226+ },
69227 #else
69228 {
69229 .procname = "nr_trim_pages",
c6e2a6c8 69230@@ -1645,6 +1685,16 @@ int proc_dostring(struct ctl_table *table, int write,
bc901d79
MT
69231 buffer, lenp, ppos);
69232 }
69233
69234+int proc_dostring_modpriv(struct ctl_table *table, int write,
69235+ void __user *buffer, size_t *lenp, loff_t *ppos)
69236+{
69237+ if (write && !capable(CAP_SYS_MODULE))
69238+ return -EPERM;
69239+
69240+ return _proc_do_string(table->data, table->maxlen, write,
69241+ buffer, lenp, ppos);
69242+}
69243+
69244 static size_t proc_skip_spaces(char **buf)
69245 {
69246 size_t ret;
c6e2a6c8 69247@@ -1750,6 +1800,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
57199397
MT
69248 len = strlen(tmp);
69249 if (len > *size)
69250 len = *size;
69251+ if (len > sizeof(tmp))
69252+ len = sizeof(tmp);
69253 if (copy_to_user(*buf, tmp, len))
69254 return -EFAULT;
69255 *size -= len;
c6e2a6c8
MT
69256@@ -1942,7 +1994,6 @@ static int proc_taint(struct ctl_table *table, int write,
69257 return err;
69258 }
69259
69260-#ifdef CONFIG_PRINTK
69261 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
69262 void __user *buffer, size_t *lenp, loff_t *ppos)
69263 {
69264@@ -1951,7 +2002,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
69265
69266 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
69267 }
69268-#endif
69269
69270 struct do_proc_dointvec_minmax_conv_param {
69271 int *min;
69272@@ -2066,8 +2116,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
6892158b
MT
69273 *i = val;
69274 } else {
69275 val = convdiv * (*i) / convmul;
69276- if (!first)
69277+ if (!first) {
69278 err = proc_put_char(&buffer, &left, '\t');
69279+ if (err)
69280+ break;
69281+ }
69282 err = proc_put_long(&buffer, &left, val, false);
69283 if (err)
69284 break;
c6e2a6c8 69285@@ -2459,6 +2512,12 @@ int proc_dostring(struct ctl_table *table, int write,
bc901d79
MT
69286 return -ENOSYS;
69287 }
69288
69289+int proc_dostring_modpriv(struct ctl_table *table, int write,
69290+ void __user *buffer, size_t *lenp, loff_t *ppos)
69291+{
69292+ return -ENOSYS;
69293+}
69294+
69295 int proc_dointvec(struct ctl_table *table, int write,
69296 void __user *buffer, size_t *lenp, loff_t *ppos)
69297 {
c6e2a6c8 69298@@ -2515,5 +2574,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
bc901d79
MT
69299 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
69300 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
69301 EXPORT_SYMBOL(proc_dostring);
69302+EXPORT_SYMBOL(proc_dostring_modpriv);
69303 EXPORT_SYMBOL(proc_doulongvec_minmax);
69304 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
fe2de317 69305diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
4c928ab7 69306index a650694..aaeeb20 100644
fe2de317
MT
69307--- a/kernel/sysctl_binary.c
69308+++ b/kernel/sysctl_binary.c
69309@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
69310 int i;
69311
69312 set_fs(KERNEL_DS);
69313- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69314+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69315 set_fs(old_fs);
69316 if (result < 0)
69317 goto out_kfree;
69318@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
69319 }
69320
69321 set_fs(KERNEL_DS);
69322- result = vfs_write(file, buffer, str - buffer, &pos);
69323+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69324 set_fs(old_fs);
69325 if (result < 0)
69326 goto out_kfree;
69327@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
69328 int i;
69329
69330 set_fs(KERNEL_DS);
69331- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
69332+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
69333 set_fs(old_fs);
69334 if (result < 0)
69335 goto out_kfree;
69336@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
69337 }
69338
69339 set_fs(KERNEL_DS);
69340- result = vfs_write(file, buffer, str - buffer, &pos);
69341+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
69342 set_fs(old_fs);
69343 if (result < 0)
69344 goto out_kfree;
69345@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
69346 int i;
69347
69348 set_fs(KERNEL_DS);
69349- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69350+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69351 set_fs(old_fs);
69352 if (result < 0)
69353 goto out;
69354@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69355 __le16 dnaddr;
69356
69357 set_fs(KERNEL_DS);
69358- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
69359+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
69360 set_fs(old_fs);
69361 if (result < 0)
69362 goto out;
69363@@ -1233,7 +1233,7 @@ static ssize_t bin_dn_node_address(struct file *file,
69364 le16_to_cpu(dnaddr) & 0x3ff);
69365
69366 set_fs(KERNEL_DS);
69367- result = vfs_write(file, buf, len, &pos);
69368+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
69369 set_fs(old_fs);
69370 if (result < 0)
69371 goto out;
fe2de317
MT
69372diff --git a/kernel/taskstats.c b/kernel/taskstats.c
69373index e660464..c8b9e67 100644
69374--- a/kernel/taskstats.c
69375+++ b/kernel/taskstats.c
df50ba0c 69376@@ -27,9 +27,12 @@
58c5fc13
MT
69377 #include <linux/cgroup.h>
69378 #include <linux/fs.h>
69379 #include <linux/file.h>
69380+#include <linux/grsecurity.h>
69381 #include <net/genetlink.h>
6e9df6a3 69382 #include <linux/atomic.h>
58c5fc13
MT
69383
69384+extern int gr_is_taskstats_denied(int pid);
69385+
69386 /*
69387 * Maximum length of a cpumask that can be specified in
69388 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
6e9df6a3 69389@@ -556,6 +559,9 @@ err:
58c5fc13 69390
bc901d79
MT
69391 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
69392 {
58c5fc13
MT
69393+ if (gr_is_taskstats_denied(current->pid))
69394+ return -EACCES;
69395+
bc901d79
MT
69396 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
69397 return cmd_attr_register_cpumask(info);
69398 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
fe2de317 69399diff --git a/kernel/time.c b/kernel/time.c
c6e2a6c8 69400index ba744cf..267b7c5 100644
fe2de317
MT
69401--- a/kernel/time.c
69402+++ b/kernel/time.c
69403@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
69404 return error;
69405
69406 if (tz) {
69407+ /* we log in do_settimeofday called below, so don't log twice
69408+ */
69409+ if (!tv)
69410+ gr_log_timechange();
69411+
fe2de317
MT
69412 sys_tz = *tz;
69413 update_vsyscall_tz();
c6e2a6c8 69414 if (firsttime) {
fe2de317 69415diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
c6e2a6c8 69416index 8a538c5..def79d4 100644
fe2de317
MT
69417--- a/kernel/time/alarmtimer.c
69418+++ b/kernel/time/alarmtimer.c
c6e2a6c8 69419@@ -779,7 +779,7 @@ static int __init alarmtimer_init(void)
4c928ab7 69420 struct platform_device *pdev;
15a11c5b
MT
69421 int error = 0;
69422 int i;
69423- struct k_clock alarm_clock = {
69424+ static struct k_clock alarm_clock = {
69425 .clock_getres = alarm_clock_getres,
69426 .clock_get = alarm_clock_get,
69427 .timer_create = alarm_timer_create,
fe2de317 69428diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
c6e2a6c8 69429index f113755..ec24223 100644
fe2de317
MT
69430--- a/kernel/time/tick-broadcast.c
69431+++ b/kernel/time/tick-broadcast.c
69432@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
58c5fc13
MT
69433 * then clear the broadcast bit.
69434 */
69435 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
69436- int cpu = smp_processor_id();
69437+ cpu = smp_processor_id();
69438
69439 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
69440 tick_broadcast_clear_oneshot(cpu);
fe2de317 69441diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
572b4308 69442index 7c50de8..e29a94d 100644
fe2de317
MT
69443--- a/kernel/time/timekeeping.c
69444+++ b/kernel/time/timekeeping.c
bc901d79
MT
69445@@ -14,6 +14,7 @@
69446 #include <linux/init.h>
69447 #include <linux/mm.h>
69448 #include <linux/sched.h>
69449+#include <linux/grsecurity.h>
66a7e928 69450 #include <linux/syscore_ops.h>
bc901d79
MT
69451 #include <linux/clocksource.h>
69452 #include <linux/jiffies.h>
572b4308 69453@@ -388,6 +389,8 @@ int do_settimeofday(const struct timespec *tv)
bc901d79
MT
69454 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
69455 return -EINVAL;
69456
69457+ gr_log_timechange();
69458+
c6e2a6c8 69459 write_seqlock_irqsave(&timekeeper.lock, flags);
bc901d79
MT
69460
69461 timekeeping_forward_now();
fe2de317
MT
69462diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
69463index 3258455..f35227d 100644
69464--- a/kernel/time/timer_list.c
69465+++ b/kernel/time/timer_list.c
69466@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
57199397
MT
69467
69468 static void print_name_offset(struct seq_file *m, void *sym)
69469 {
69470+#ifdef CONFIG_GRKERNSEC_HIDESYM
69471+ SEQ_printf(m, "<%p>", NULL);
69472+#else
69473 char symname[KSYM_NAME_LEN];
69474
69475 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
16454cff 69476 SEQ_printf(m, "<%pK>", sym);
57199397
MT
69477 else
69478 SEQ_printf(m, "%s", symname);
69479+#endif
69480 }
69481
69482 static void
69483@@ -112,7 +116,11 @@ next_one:
69484 static void
69485 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
69486 {
69487+#ifdef CONFIG_GRKERNSEC_HIDESYM
69488+ SEQ_printf(m, " .base: %p\n", NULL);
69489+#else
16454cff 69490 SEQ_printf(m, " .base: %pK\n", base);
57199397
MT
69491+#endif
69492 SEQ_printf(m, " .index: %d\n",
69493 base->index);
69494 SEQ_printf(m, " .resolution: %Lu nsecs\n",
fe2de317 69495@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
57199397
MT
69496 {
69497 struct proc_dir_entry *pe;
69498
69499+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69500+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
69501+#else
69502 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
69503+#endif
69504 if (!pe)
69505 return -ENOMEM;
69506 return 0;
fe2de317 69507diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
4c928ab7 69508index 0b537f2..9e71eca 100644
fe2de317
MT
69509--- a/kernel/time/timer_stats.c
69510+++ b/kernel/time/timer_stats.c
8308f9c9
MT
69511@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
69512 static unsigned long nr_entries;
69513 static struct entry entries[MAX_ENTRIES];
69514
69515-static atomic_t overflow_count;
69516+static atomic_unchecked_t overflow_count;
69517
69518 /*
69519 * The entries are in a hash-table, for fast lookup:
69520@@ -140,7 +140,7 @@ static void reset_entries(void)
69521 nr_entries = 0;
69522 memset(entries, 0, sizeof(entries));
69523 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
69524- atomic_set(&overflow_count, 0);
69525+ atomic_set_unchecked(&overflow_count, 0);
69526 }
69527
69528 static struct entry *alloc_entry(void)
fe2de317 69529@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
8308f9c9
MT
69530 if (likely(entry))
69531 entry->count++;
69532 else
69533- atomic_inc(&overflow_count);
69534+ atomic_inc_unchecked(&overflow_count);
69535
69536 out_unlock:
69537 raw_spin_unlock_irqrestore(lock, flags);
fe2de317 69538@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
57199397
MT
69539
69540 static void print_name_offset(struct seq_file *m, unsigned long addr)
69541 {
69542+#ifdef CONFIG_GRKERNSEC_HIDESYM
69543+ seq_printf(m, "<%p>", NULL);
69544+#else
69545 char symname[KSYM_NAME_LEN];
69546
69547 if (lookup_symbol_name(addr, symname) < 0)
69548 seq_printf(m, "<%p>", (void *)addr);
69549 else
69550 seq_printf(m, "%s", symname);
69551+#endif
69552 }
69553
69554 static int tstats_show(struct seq_file *m, void *v)
fe2de317 69555@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
8308f9c9
MT
69556
69557 seq_puts(m, "Timer Stats Version: v0.2\n");
69558 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
69559- if (atomic_read(&overflow_count))
69560+ if (atomic_read_unchecked(&overflow_count))
69561 seq_printf(m, "Overflow: %d entries\n",
69562- atomic_read(&overflow_count));
69563+ atomic_read_unchecked(&overflow_count));
69564
69565 for (i = 0; i < nr_entries; i++) {
69566 entry = entries + i;
fe2de317 69567@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
57199397
MT
69568 {
69569 struct proc_dir_entry *pe;
69570
69571+#ifdef CONFIG_GRKERNSEC_PROC_ADD
69572+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
69573+#else
69574 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
69575+#endif
69576 if (!pe)
69577 return -ENOMEM;
69578 return 0;
fe2de317 69579diff --git a/kernel/timer.c b/kernel/timer.c
5e856224 69580index a297ffc..5e16b0b 100644
fe2de317
MT
69581--- a/kernel/timer.c
69582+++ b/kernel/timer.c
5e856224 69583@@ -1354,7 +1354,7 @@ void update_process_times(int user_tick)
ae4e228f
MT
69584 /*
69585 * This function runs timers and the timer-tq in bottom half context.
69586 */
69587-static void run_timer_softirq(struct softirq_action *h)
69588+static void run_timer_softirq(void)
69589 {
16454cff 69590 struct tvec_base *base = __this_cpu_read(tvec_bases);
58c5fc13 69591
fe2de317 69592diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
c6e2a6c8 69593index c0bd030..62a1927 100644
fe2de317
MT
69594--- a/kernel/trace/blktrace.c
69595+++ b/kernel/trace/blktrace.c
c6e2a6c8 69596@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
8308f9c9
MT
69597 struct blk_trace *bt = filp->private_data;
69598 char buf[16];
69599
69600- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
69601+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
69602
69603 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
69604 }
c6e2a6c8 69605@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
8308f9c9
MT
69606 return 1;
69607
69608 bt = buf->chan->private_data;
69609- atomic_inc(&bt->dropped);
69610+ atomic_inc_unchecked(&bt->dropped);
69611 return 0;
69612 }
69613
c6e2a6c8 69614@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
8308f9c9
MT
69615
69616 bt->dir = dir;
69617 bt->dev = dev;
69618- atomic_set(&bt->dropped, 0);
69619+ atomic_set_unchecked(&bt->dropped, 0);
69620
69621 ret = -EIO;
69622 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
fe2de317 69623diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
c6e2a6c8 69624index 0fa92f6..89950b2 100644
fe2de317
MT
69625--- a/kernel/trace/ftrace.c
69626+++ b/kernel/trace/ftrace.c
c6e2a6c8 69627@@ -1800,12 +1800,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
15a11c5b
MT
69628 if (unlikely(ftrace_disabled))
69629 return 0;
ae4e228f
MT
69630
69631+ ret = ftrace_arch_code_modify_prepare();
69632+ FTRACE_WARN_ON(ret);
69633+ if (ret)
69634+ return 0;
69635+
69636 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
69637+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
69638 if (ret) {
69639 ftrace_bug(ret, ip);
ae4e228f
MT
69640- return 0;
69641 }
69642- return 1;
69643+ return ret ? 0 : 1;
58c5fc13
MT
69644 }
69645
ae4e228f 69646 /*
c6e2a6c8 69647@@ -2917,7 +2922,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
66a7e928
MT
69648
69649 int
69650 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
69651- void *data)
69652+ void *data)
69653 {
69654 struct ftrace_func_probe *entry;
69655 struct ftrace_page *pg;
fe2de317 69656diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
572b4308 69657index 55e4d4c..8c915ec 100644
fe2de317
MT
69658--- a/kernel/trace/trace.c
69659+++ b/kernel/trace/trace.c
572b4308 69660@@ -4316,10 +4316,9 @@ static const struct file_operations tracing_dyn_info_fops = {
ae4e228f
MT
69661 };
69662 #endif
58c5fc13 69663
ae4e228f
MT
69664-static struct dentry *d_tracer;
69665-
69666 struct dentry *tracing_init_dentry(void)
69667 {
69668+ static struct dentry *d_tracer;
69669 static int once;
69670
69671 if (d_tracer)
572b4308 69672@@ -4339,10 +4338,9 @@ struct dentry *tracing_init_dentry(void)
ae4e228f 69673 return d_tracer;
58c5fc13
MT
69674 }
69675
ae4e228f
MT
69676-static struct dentry *d_percpu;
69677-
69678 struct dentry *tracing_dentry_percpu(void)
69679 {
69680+ static struct dentry *d_percpu;
69681 static int once;
69682 struct dentry *d_tracer;
69683
fe2de317 69684diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
c6e2a6c8 69685index 29111da..d190fe2 100644
fe2de317
MT
69686--- a/kernel/trace/trace_events.c
69687+++ b/kernel/trace/trace_events.c
c6e2a6c8 69688@@ -1308,10 +1308,6 @@ static LIST_HEAD(ftrace_module_file_list);
bc901d79
MT
69689 struct ftrace_module_file_ops {
69690 struct list_head list;
69691 struct module *mod;
16454cff
MT
69692- struct file_operations id;
69693- struct file_operations enable;
69694- struct file_operations format;
69695- struct file_operations filter;
16454cff
MT
69696 };
69697
69698 static struct ftrace_module_file_ops *
c6e2a6c8 69699@@ -1332,17 +1328,12 @@ trace_create_file_ops(struct module *mod)
15a11c5b
MT
69700
69701 file_ops->mod = mod;
66a7e928 69702
15a11c5b
MT
69703- file_ops->id = ftrace_event_id_fops;
69704- file_ops->id.owner = mod;
69705-
69706- file_ops->enable = ftrace_enable_fops;
69707- file_ops->enable.owner = mod;
69708-
69709- file_ops->filter = ftrace_event_filter_fops;
69710- file_ops->filter.owner = mod;
69711-
69712- file_ops->format = ftrace_event_format_fops;
69713- file_ops->format.owner = mod;
69714+ pax_open_kernel();
69715+ *(void **)&mod->trace_id.owner = mod;
69716+ *(void **)&mod->trace_enable.owner = mod;
69717+ *(void **)&mod->trace_filter.owner = mod;
69718+ *(void **)&mod->trace_format.owner = mod;
69719+ pax_close_kernel();
69720
69721 list_add(&file_ops->list, &ftrace_module_file_list);
69722
c6e2a6c8 69723@@ -1366,8 +1357,8 @@ static void trace_module_add_events(struct module *mod)
15a11c5b
MT
69724
69725 for_each_event(call, start, end) {
69726 __trace_add_event_call(*call, mod,
69727- &file_ops->id, &file_ops->enable,
69728- &file_ops->filter, &file_ops->format);
69729+ &mod->trace_id, &mod->trace_enable,
69730+ &mod->trace_filter, &mod->trace_format);
69731 }
69732 }
69733
fe2de317 69734diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
c6e2a6c8 69735index 580a05e..9b31acb 100644
fe2de317
MT
69736--- a/kernel/trace/trace_kprobe.c
69737+++ b/kernel/trace/trace_kprobe.c
69738@@ -217,7 +217,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
6e9df6a3
MT
69739 long ret;
69740 int maxlen = get_rloc_len(*(u32 *)dest);
69741 u8 *dst = get_rloc_data(dest);
69742- u8 *src = addr;
69743+ const u8 __user *src = (const u8 __force_user *)addr;
69744 mm_segment_t old_fs = get_fs();
69745 if (!maxlen)
69746 return;
fe2de317 69747@@ -229,7 +229,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
6e9df6a3
MT
69748 pagefault_disable();
69749 do
69750 ret = __copy_from_user_inatomic(dst++, src++, 1);
69751- while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen);
69752+ while (dst[-1] && ret == 0 && src - (const u8 __force_user *)addr < maxlen);
69753 dst[-1] = '\0';
69754 pagefault_enable();
69755 set_fs(old_fs);
fe2de317 69756@@ -238,7 +238,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
6e9df6a3
MT
69757 ((u8 *)get_rloc_data(dest))[0] = '\0';
69758 *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest));
69759 } else
69760- *(u32 *)dest = make_data_rloc(src - (u8 *)addr,
69761+ *(u32 *)dest = make_data_rloc(src - (const u8 __force_user *)addr,
69762 get_rloc_offs(*(u32 *)dest));
69763 }
69764 /* Return the length of string -- including null terminal byte */
fe2de317 69765@@ -252,7 +252,7 @@ static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs,
6e9df6a3
MT
69766 set_fs(KERNEL_DS);
69767 pagefault_disable();
69768 do {
69769- ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
69770+ ret = __copy_from_user_inatomic(&c, (const u8 __force_user *)addr + len, 1);
69771 len++;
69772 } while (c && ret == 0 && len < MAX_STRING_SIZE);
69773 pagefault_enable();
fe2de317
MT
69774diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
69775index fd3c8aa..5f324a6 100644
69776--- a/kernel/trace/trace_mmiotrace.c
69777+++ b/kernel/trace/trace_mmiotrace.c
8308f9c9
MT
69778@@ -24,7 +24,7 @@ struct header_iter {
69779 static struct trace_array *mmio_trace_array;
69780 static bool overrun_detected;
69781 static unsigned long prev_overruns;
69782-static atomic_t dropped_count;
69783+static atomic_unchecked_t dropped_count;
69784
69785 static void mmio_reset_data(struct trace_array *tr)
69786 {
fe2de317 69787@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
8308f9c9
MT
69788
69789 static unsigned long count_overruns(struct trace_iterator *iter)
69790 {
69791- unsigned long cnt = atomic_xchg(&dropped_count, 0);
69792+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
69793 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
69794
69795 if (over > prev_overruns)
fe2de317 69796@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
8308f9c9
MT
69797 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
69798 sizeof(*entry), 0, pc);
69799 if (!event) {
69800- atomic_inc(&dropped_count);
69801+ atomic_inc_unchecked(&dropped_count);
69802 return;
69803 }
69804 entry = ring_buffer_event_data(event);
fe2de317 69805@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
8308f9c9
MT
69806 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
69807 sizeof(*entry), 0, pc);
69808 if (!event) {
69809- atomic_inc(&dropped_count);
69810+ atomic_inc_unchecked(&dropped_count);
69811 return;
69812 }
69813 entry = ring_buffer_event_data(event);
fe2de317 69814diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
c6e2a6c8 69815index df611a0..10d8b32 100644
fe2de317
MT
69816--- a/kernel/trace/trace_output.c
69817+++ b/kernel/trace/trace_output.c
c6e2a6c8 69818@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
ae4e228f 69819
58c5fc13
MT
69820 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
69821 if (!IS_ERR(p)) {
69822- p = mangle_path(s->buffer + s->len, p, "\n");
69823+ p = mangle_path(s->buffer + s->len, p, "\n\\");
69824 if (p) {
69825 s->len = p - s->buffer;
69826 return 1;
fe2de317 69827diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
5e856224 69828index d4545f4..a9010a1 100644
fe2de317
MT
69829--- a/kernel/trace/trace_stack.c
69830+++ b/kernel/trace/trace_stack.c
5e856224 69831@@ -53,7 +53,7 @@ static inline void check_stack(void)
ae4e228f 69832 return;
58c5fc13 69833
ae4e228f
MT
69834 /* we do not handle interrupt stacks yet */
69835- if (!object_is_on_stack(&this_size))
69836+ if (!object_starts_on_stack(&this_size))
69837 return;
69838
69839 local_irq_save(flags);
fe2de317
MT
69840diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
69841index 209b379..7f76423 100644
69842--- a/kernel/trace/trace_workqueue.c
69843+++ b/kernel/trace/trace_workqueue.c
71d190be
MT
69844@@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
69845 int cpu;
69846 pid_t pid;
69847 /* Can be inserted from interrupt or user context, need to be atomic */
69848- atomic_t inserted;
69849+ atomic_unchecked_t inserted;
69850 /*
69851 * Don't need to be atomic, works are serialized in a single workqueue thread
69852 * on a single CPU.
69853@@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
69854 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
69855 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
69856 if (node->pid == wq_thread->pid) {
69857- atomic_inc(&node->inserted);
69858+ atomic_inc_unchecked(&node->inserted);
69859 goto found;
69860 }
69861 }
fe2de317 69862@@ -210,7 +210,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
71d190be
MT
69863 tsk = get_pid_task(pid, PIDTYPE_PID);
69864 if (tsk) {
69865 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
69866- atomic_read(&cws->inserted), cws->executed,
69867+ atomic_read_unchecked(&cws->inserted), cws->executed,
69868 tsk->comm);
69869 put_task_struct(tsk);
69870 }
fe2de317 69871diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
c6e2a6c8 69872index 6777153..8519f60 100644
fe2de317
MT
69873--- a/lib/Kconfig.debug
69874+++ b/lib/Kconfig.debug
c6e2a6c8 69875@@ -1132,6 +1132,7 @@ config LATENCYTOP
fe2de317
MT
69876 depends on DEBUG_KERNEL
69877 depends on STACKTRACE_SUPPORT
69878 depends on PROC_FS
69879+ depends on !GRKERNSEC_HIDESYM
4c928ab7 69880 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
fe2de317
MT
69881 select KALLSYMS
69882 select KALLSYMS_ALL
69883diff --git a/lib/bitmap.c b/lib/bitmap.c
c6e2a6c8 69884index b5a8b6a..a69623c 100644
fe2de317
MT
69885--- a/lib/bitmap.c
69886+++ b/lib/bitmap.c
c6e2a6c8 69887@@ -421,7 +421,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
6e9df6a3
MT
69888 {
69889 int c, old_c, totaldigits, ndigits, nchunks, nbits;
69890 u32 chunk;
4c928ab7 69891- const char __user __force *ubuf = (const char __user __force *)buf;
6e9df6a3
MT
69892+ const char __user *ubuf = (const char __force_user *)buf;
69893
69894 bitmap_zero(maskp, nmaskbits);
69895
c6e2a6c8 69896@@ -506,7 +506,7 @@ int bitmap_parse_user(const char __user *ubuf,
6e9df6a3
MT
69897 {
69898 if (!access_ok(VERIFY_READ, ubuf, ulen))
69899 return -EFAULT;
4c928ab7
MT
69900- return __bitmap_parse((const char __force *)ubuf,
69901+ return __bitmap_parse((const char __force_kernel *)ubuf,
69902 ulen, 1, maskp, nmaskbits);
6e9df6a3 69903
4c928ab7 69904 }
c6e2a6c8 69905@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
6e9df6a3
MT
69906 {
69907 unsigned a, b;
69908 int c, old_c, totaldigits;
4c928ab7 69909- const char __user __force *ubuf = (const char __user __force *)buf;
6e9df6a3
MT
69910+ const char __user *ubuf = (const char __force_user *)buf;
69911 int exp_digit, in_range;
69912
69913 totaldigits = c = 0;
c6e2a6c8 69914@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
6e9df6a3
MT
69915 {
69916 if (!access_ok(VERIFY_READ, ubuf, ulen))
69917 return -EFAULT;
4c928ab7 69918- return __bitmap_parselist((const char __force *)ubuf,
6e9df6a3
MT
69919+ return __bitmap_parselist((const char __force_kernel *)ubuf,
69920 ulen, 1, maskp, nmaskbits);
69921 }
69922 EXPORT_SYMBOL(bitmap_parselist_user);
fe2de317 69923diff --git a/lib/bug.c b/lib/bug.c
5e856224 69924index a28c141..2bd3d95 100644
fe2de317
MT
69925--- a/lib/bug.c
69926+++ b/lib/bug.c
69927@@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
ae4e228f
MT
69928 return BUG_TRAP_TYPE_NONE;
69929
69930 bug = find_bug(bugaddr);
69931+ if (!bug)
69932+ return BUG_TRAP_TYPE_NONE;
69933
6892158b
MT
69934 file = NULL;
69935 line = 0;
fe2de317 69936diff --git a/lib/debugobjects.c b/lib/debugobjects.c
5e856224 69937index 0ab9ae8..f01ceca 100644
fe2de317
MT
69938--- a/lib/debugobjects.c
69939+++ b/lib/debugobjects.c
5e856224 69940@@ -288,7 +288,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
ae4e228f
MT
69941 if (limit > 4)
69942 return;
69943
69944- is_on_stack = object_is_on_stack(addr);
69945+ is_on_stack = object_starts_on_stack(addr);
69946 if (is_on_stack == onstack)
69947 return;
69948
fe2de317 69949diff --git a/lib/devres.c b/lib/devres.c
c6e2a6c8 69950index 80b9c76..9e32279 100644
fe2de317
MT
69951--- a/lib/devres.c
69952+++ b/lib/devres.c
6e9df6a3
MT
69953@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
69954 void devm_iounmap(struct device *dev, void __iomem *addr)
69955 {
69956 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
69957- (void *)addr));
69958+ (void __force *)addr));
69959 iounmap(addr);
69960 }
69961 EXPORT_SYMBOL(devm_iounmap);
5e856224 69962@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
6e9df6a3
MT
69963 {
69964 ioport_unmap(addr);
69965 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
69966- devm_ioport_map_match, (void *)addr));
69967+ devm_ioport_map_match, (void __force *)addr));
69968 }
69969 EXPORT_SYMBOL(devm_ioport_unmap);
69970
fe2de317 69971diff --git a/lib/dma-debug.c b/lib/dma-debug.c
c6e2a6c8 69972index 13ef233..5241683 100644
fe2de317
MT
69973--- a/lib/dma-debug.c
69974+++ b/lib/dma-debug.c
c6e2a6c8 69975@@ -924,7 +924,7 @@ out:
58c5fc13 69976
ae4e228f
MT
69977 static void check_for_stack(struct device *dev, void *addr)
69978 {
69979- if (object_is_on_stack(addr))
69980+ if (object_starts_on_stack(addr))
69981 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
69982 "stack [addr=%p]\n", addr);
69983 }
fe2de317
MT
69984diff --git a/lib/extable.c b/lib/extable.c
69985index 4cac81e..63e9b8f 100644
69986--- a/lib/extable.c
69987+++ b/lib/extable.c
15a11c5b
MT
69988@@ -13,6 +13,7 @@
69989 #include <linux/init.h>
69990 #include <linux/sort.h>
69991 #include <asm/uaccess.h>
69992+#include <asm/pgtable.h>
69993
69994 #ifndef ARCH_HAS_SORT_EXTABLE
69995 /*
fe2de317 69996@@ -36,8 +37,10 @@ static int cmp_ex(const void *a, const void *b)
15a11c5b
MT
69997 void sort_extable(struct exception_table_entry *start,
69998 struct exception_table_entry *finish)
69999 {
70000+ pax_open_kernel();
70001 sort(start, finish - start, sizeof(struct exception_table_entry),
70002 cmp_ex, NULL);
70003+ pax_close_kernel();
70004 }
70005
70006 #ifdef CONFIG_MODULES
fe2de317
MT
70007diff --git a/lib/inflate.c b/lib/inflate.c
70008index 013a761..c28f3fc 100644
70009--- a/lib/inflate.c
70010+++ b/lib/inflate.c
6892158b 70011@@ -269,7 +269,7 @@ static void free(void *where)
58c5fc13
MT
70012 malloc_ptr = free_mem_ptr;
70013 }
70014 #else
70015-#define malloc(a) kmalloc(a, GFP_KERNEL)
70016+#define malloc(a) kmalloc((a), GFP_KERNEL)
70017 #define free(a) kfree(a)
70018 #endif
70019
5e856224 70020diff --git a/lib/ioremap.c b/lib/ioremap.c
c6e2a6c8 70021index 0c9216c..863bd89 100644
5e856224
MT
70022--- a/lib/ioremap.c
70023+++ b/lib/ioremap.c
70024@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
70025 unsigned long next;
70026
70027 phys_addr -= addr;
70028- pmd = pmd_alloc(&init_mm, pud, addr);
70029+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
70030 if (!pmd)
70031 return -ENOMEM;
70032 do {
70033@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
70034 unsigned long next;
70035
70036 phys_addr -= addr;
70037- pud = pud_alloc(&init_mm, pgd, addr);
70038+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
70039 if (!pud)
70040 return -ENOMEM;
70041 do {
4c928ab7
MT
70042diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
70043index bd2bea9..6b3c95e 100644
70044--- a/lib/is_single_threaded.c
70045+++ b/lib/is_single_threaded.c
70046@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
70047 struct task_struct *p, *t;
70048 bool ret;
70049
70050+ if (!mm)
70051+ return true;
70052+
70053 if (atomic_read(&task->signal->live) != 1)
70054 return false;
70055
fe2de317 70056diff --git a/lib/radix-tree.c b/lib/radix-tree.c
c6e2a6c8 70057index 3ac50dc..240bb7e 100644
fe2de317
MT
70058--- a/lib/radix-tree.c
70059+++ b/lib/radix-tree.c
c6e2a6c8 70060@@ -79,7 +79,7 @@ struct radix_tree_preload {
58c5fc13
MT
70061 int nr;
70062 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
70063 };
70064-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
70065+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
70066
bc901d79 70067 static inline void *ptr_to_indirect(void *ptr)
58c5fc13 70068 {
fe2de317 70069diff --git a/lib/vsprintf.c b/lib/vsprintf.c
572b4308 70070index abbabec..d5eba6c 100644
fe2de317
MT
70071--- a/lib/vsprintf.c
70072+++ b/lib/vsprintf.c
bc901d79
MT
70073@@ -16,6 +16,9 @@
70074 * - scnprintf and vscnprintf
70075 */
70076
70077+#ifdef CONFIG_GRKERNSEC_HIDESYM
70078+#define __INCLUDED_BY_HIDESYM 1
70079+#endif
70080 #include <stdarg.h>
c6e2a6c8 70081 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
bc901d79 70082 #include <linux/types.h>
c6e2a6c8 70083@@ -433,7 +436,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
bc901d79 70084 char sym[KSYM_SYMBOL_LEN];
66a7e928
MT
70085 if (ext == 'B')
70086 sprint_backtrace(sym, value);
70087- else if (ext != 'f' && ext != 's')
70088+ else if (ext != 'f' && ext != 's' && ext != 'a')
bc901d79
MT
70089 sprint_symbol(sym, value);
70090 else
70091 kallsyms_lookup(value, NULL, NULL, NULL, sym);
c6e2a6c8 70092@@ -809,7 +812,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
5e856224 70093 return number(buf, end, *(const netdev_features_t *)addr, spec);
16454cff
MT
70094 }
70095
70096+#ifdef CONFIG_GRKERNSEC_HIDESYM
66a7e928 70097+int kptr_restrict __read_mostly = 2;
16454cff 70098+#else
66a7e928 70099 int kptr_restrict __read_mostly;
16454cff
MT
70100+#endif
70101
70102 /*
70103 * Show a '%p' thing. A kernel extension is that the '%p' is followed
c6e2a6c8 70104@@ -823,6 +830,8 @@ int kptr_restrict __read_mostly;
bc901d79
MT
70105 * - 'S' For symbolic direct pointers with offset
70106 * - 's' For symbolic direct pointers without offset
66a7e928 70107 * - 'B' For backtraced symbolic direct pointers with offset
bc901d79
MT
70108+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
70109+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
70110 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
70111 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
70112 * - 'M' For a 6-byte MAC address, it prints the address in the
572b4308
MT
70113@@ -866,14 +875,25 @@ static noinline_for_stack
70114 char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70115 struct printf_spec spec)
bc901d79 70116 {
572b4308
MT
70117+#ifdef CONFIG_GRKERNSEC_HIDESYM
70118+ /* 'P' = approved pointers to copy to userland,
70119+ as in the /proc/kallsyms case, as we make it display nothing
70120+ for non-root users, and the real contents for root users
70121+ */
70122+ if (ptr > TASK_SIZE && *fmt != 'P' && is_usercopy_object(buf)) {
70123+ ptr = NULL;
70124+ goto simple;
70125+ }
70126+#endif
70127+
66a7e928 70128 if (!ptr && *fmt != 'K') {
bc901d79
MT
70129 /*
70130- * Print (null) with the same width as a pointer so it makes
70131+ * Print (nil) with the same width as a pointer so it makes
70132 * tabular output look nice.
70133 */
70134 if (spec.field_width == -1)
70135 spec.field_width = 2 * sizeof(void *);
6892158b
MT
70136- return string(buf, end, "(null)", spec);
70137+ return string(buf, end, "(nil)", spec);
bc901d79 70138 }
6892158b
MT
70139
70140 switch (*fmt) {
572b4308 70141@@ -883,6 +903,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
bc901d79
MT
70142 /* Fallthrough */
70143 case 'S':
70144 case 's':
70145+#ifdef CONFIG_GRKERNSEC_HIDESYM
70146+ break;
70147+#else
70148+ return symbol_string(buf, end, ptr, spec, *fmt);
70149+#endif
70150+ case 'A':
70151+ case 'a':
66a7e928 70152 case 'B':
bc901d79
MT
70153 return symbol_string(buf, end, ptr, spec, *fmt);
70154 case 'R':
572b4308
MT
70155@@ -920,6 +947,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70156 va_end(va);
70157 return buf;
70158 }
70159+ case 'P':
70160+ break;
70161 case 'K':
70162 /*
70163 * %pK cannot be used in IRQ context because its test
70164@@ -942,6 +971,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
70165 }
70166 break;
70167 }
70168+#ifdef CONFIG_GRKERNSEC_HIDESYM
70169+simple:
70170+#endif
70171 spec.flags |= SMALL;
70172 if (spec.field_width == -1) {
70173 spec.field_width = 2 * sizeof(void *);
70174@@ -1653,11 +1685,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
bc901d79
MT
70175 typeof(type) value; \
70176 if (sizeof(type) == 8) { \
70177 args = PTR_ALIGN(args, sizeof(u32)); \
70178- *(u32 *)&value = *(u32 *)args; \
70179- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
70180+ *(u32 *)&value = *(const u32 *)args; \
70181+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
70182 } else { \
70183 args = PTR_ALIGN(args, sizeof(type)); \
70184- value = *(typeof(type) *)args; \
70185+ value = *(const typeof(type) *)args; \
70186 } \
70187 args += sizeof(type); \
70188 value; \
572b4308 70189@@ -1720,7 +1752,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
bc901d79
MT
70190 case FORMAT_TYPE_STR: {
70191 const char *str_arg = args;
70192 args += strlen(str_arg) + 1;
70193- str = string(str, end, (char *)str_arg, spec);
70194+ str = string(str, end, str_arg, spec);
70195 break;
70196 }
70197
fe2de317
MT
70198diff --git a/localversion-grsec b/localversion-grsec
70199new file mode 100644
70200index 0000000..7cd6065
70201--- /dev/null
70202+++ b/localversion-grsec
58c5fc13
MT
70203@@ -0,0 +1 @@
70204+-grsec
fe2de317 70205diff --git a/mm/Kconfig b/mm/Kconfig
c6e2a6c8 70206index e338407..4210331 100644
fe2de317
MT
70207--- a/mm/Kconfig
70208+++ b/mm/Kconfig
5e856224 70209@@ -247,10 +247,10 @@ config KSM
fe2de317 70210 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
15a11c5b 70211
fe2de317
MT
70212 config DEFAULT_MMAP_MIN_ADDR
70213- int "Low address space to protect from user allocation"
70214+ int "Low address space to protect from user allocation"
70215 depends on MMU
70216- default 4096
70217- help
70218+ default 65536
70219+ help
70220 This is the portion of low virtual memory which should be protected
70221 from userspace allocation. Keeping a user from writing to low pages
70222 can help reduce the impact of kernel NULL pointer bugs.
c6e2a6c8
MT
70223@@ -280,7 +280,7 @@ config MEMORY_FAILURE
70224
70225 config HWPOISON_INJECT
70226 tristate "HWPoison pages injector"
70227- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
70228+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
70229 select PROC_PAGE_MONITOR
70230
70231 config NOMMU_INITIAL_TRIM_EXCESS
fe2de317 70232diff --git a/mm/filemap.c b/mm/filemap.c
c6e2a6c8 70233index 79c4b2b..596b417 100644
fe2de317
MT
70234--- a/mm/filemap.c
70235+++ b/mm/filemap.c
c6e2a6c8 70236@@ -1762,7 +1762,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
58c5fc13
MT
70237 struct address_space *mapping = file->f_mapping;
70238
70239 if (!mapping->a_ops->readpage)
70240- return -ENOEXEC;
70241+ return -ENODEV;
70242 file_accessed(file);
70243 vma->vm_ops = &generic_file_vm_ops;
70244 vma->vm_flags |= VM_CAN_NONLINEAR;
c6e2a6c8 70245@@ -2168,6 +2168,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
58c5fc13
MT
70246 *pos = i_size_read(inode);
70247
70248 if (limit != RLIM_INFINITY) {
70249+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
70250 if (*pos >= limit) {
70251 send_sig(SIGXFSZ, current, 0);
70252 return -EFBIG;
fe2de317 70253diff --git a/mm/fremap.c b/mm/fremap.c
4c928ab7 70254index 9ed4fd4..c42648d 100644
fe2de317
MT
70255--- a/mm/fremap.c
70256+++ b/mm/fremap.c
4c928ab7 70257@@ -155,6 +155,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
58c5fc13
MT
70258 retry:
70259 vma = find_vma(mm, start);
70260
70261+#ifdef CONFIG_PAX_SEGMEXEC
70262+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
70263+ goto out;
70264+#endif
70265+
70266 /*
70267 * Make sure the vma is shared, that it supports prefaulting,
70268 * and that the remapped range is valid and fully within
fe2de317 70269diff --git a/mm/highmem.c b/mm/highmem.c
4c928ab7 70270index 57d82c6..e9e0552 100644
fe2de317
MT
70271--- a/mm/highmem.c
70272+++ b/mm/highmem.c
bc901d79 70273@@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
58c5fc13
MT
70274 * So no dangers, even with speculative execution.
70275 */
70276 page = pte_page(pkmap_page_table[i]);
ae4e228f 70277+ pax_open_kernel();
58c5fc13
MT
70278 pte_clear(&init_mm, (unsigned long)page_address(page),
70279 &pkmap_page_table[i]);
ae4e228f
MT
70280-
70281+ pax_close_kernel();
58c5fc13
MT
70282 set_page_address(page, NULL);
70283 need_flush = 1;
70284 }
bc901d79 70285@@ -186,9 +187,11 @@ start:
58c5fc13
MT
70286 }
70287 }
70288 vaddr = PKMAP_ADDR(last_pkmap_nr);
ae4e228f
MT
70289+
70290+ pax_open_kernel();
58c5fc13
MT
70291 set_pte_at(&init_mm, vaddr,
70292 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
ae4e228f
MT
70293-
70294+ pax_close_kernel();
58c5fc13
MT
70295 pkmap_count[last_pkmap_nr] = 1;
70296 set_page_address(page, (void *)vaddr);
58c5fc13 70297
fe2de317 70298diff --git a/mm/huge_memory.c b/mm/huge_memory.c
c6e2a6c8 70299index f0e5306..cb9398e 100644
fe2de317
MT
70300--- a/mm/huge_memory.c
70301+++ b/mm/huge_memory.c
5e856224 70302@@ -733,7 +733,7 @@ out:
66a7e928
MT
70303 * run pte_offset_map on the pmd, if an huge pmd could
70304 * materialize from under us from a different thread.
70305 */
70306- if (unlikely(__pte_alloc(mm, vma, pmd, address)))
70307+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
70308 return VM_FAULT_OOM;
70309 /* if an huge pmd materialized from under us just retry later */
70310 if (unlikely(pmd_trans_huge(*pmd)))
fe2de317 70311diff --git a/mm/hugetlb.c b/mm/hugetlb.c
c6e2a6c8 70312index 263e177..3f36aec 100644
fe2de317
MT
70313--- a/mm/hugetlb.c
70314+++ b/mm/hugetlb.c
c6e2a6c8 70315@@ -2446,6 +2446,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
70316 return 1;
70317 }
70318
70319+#ifdef CONFIG_PAX_SEGMEXEC
70320+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
70321+{
70322+ struct mm_struct *mm = vma->vm_mm;
70323+ struct vm_area_struct *vma_m;
70324+ unsigned long address_m;
70325+ pte_t *ptep_m;
70326+
70327+ vma_m = pax_find_mirror_vma(vma);
70328+ if (!vma_m)
70329+ return;
70330+
70331+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70332+ address_m = address + SEGMEXEC_TASK_SIZE;
70333+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
70334+ get_page(page_m);
6892158b 70335+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
58c5fc13
MT
70336+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
70337+}
70338+#endif
70339+
6892158b
MT
70340 /*
70341 * Hugetlb_cow() should be called with page lock of the original hugepage held.
5e856224 70342 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
c6e2a6c8 70343@@ -2558,6 +2579,11 @@ retry_avoidcopy:
58c5fc13 70344 make_huge_pte(vma, new_page, 1));
6892158b
MT
70345 page_remove_rmap(old_page);
70346 hugepage_add_new_anon_rmap(new_page, vma, address);
58c5fc13
MT
70347+
70348+#ifdef CONFIG_PAX_SEGMEXEC
70349+ pax_mirror_huge_pte(vma, address, new_page);
70350+#endif
70351+
70352 /* Make the old page be freed below */
70353 new_page = old_page;
6892158b 70354 mmu_notifier_invalidate_range_end(mm,
c6e2a6c8 70355@@ -2712,6 +2738,10 @@ retry:
58c5fc13
MT
70356 && (vma->vm_flags & VM_SHARED)));
70357 set_huge_pte_at(mm, address, ptep, new_pte);
70358
70359+#ifdef CONFIG_PAX_SEGMEXEC
70360+ pax_mirror_huge_pte(vma, address, page);
70361+#endif
70362+
70363 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
70364 /* Optimization, do the COW without a second fault */
70365 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
c6e2a6c8 70366@@ -2741,6 +2771,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
70367 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
70368 struct hstate *h = hstate_vma(vma);
70369
70370+#ifdef CONFIG_PAX_SEGMEXEC
70371+ struct vm_area_struct *vma_m;
6892158b 70372+#endif
58c5fc13 70373+
5e856224
MT
70374 address &= huge_page_mask(h);
70375
6892158b 70376 ptep = huge_pte_offset(mm, address);
c6e2a6c8 70377@@ -2754,6 +2788,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
bc901d79 70378 VM_FAULT_SET_HINDEX(h - hstates);
6892158b
MT
70379 }
70380
70381+#ifdef CONFIG_PAX_SEGMEXEC
58c5fc13
MT
70382+ vma_m = pax_find_mirror_vma(vma);
70383+ if (vma_m) {
70384+ unsigned long address_m;
70385+
70386+ if (vma->vm_start > vma_m->vm_start) {
70387+ address_m = address;
70388+ address -= SEGMEXEC_TASK_SIZE;
70389+ vma = vma_m;
70390+ h = hstate_vma(vma);
70391+ } else
70392+ address_m = address + SEGMEXEC_TASK_SIZE;
70393+
70394+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
70395+ return VM_FAULT_OOM;
70396+ address_m &= HPAGE_MASK;
70397+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
70398+ }
70399+#endif
70400+
70401 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
70402 if (!ptep)
70403 return VM_FAULT_OOM;
fe2de317
MT
70404diff --git a/mm/internal.h b/mm/internal.h
70405index 2189af4..f2ca332 100644
70406--- a/mm/internal.h
70407+++ b/mm/internal.h
70408@@ -95,6 +95,7 @@ extern void putback_lru_page(struct page *page);
15a11c5b
MT
70409 * in mm/page_alloc.c
70410 */
70411 extern void __free_pages_bootmem(struct page *page, unsigned int order);
70412+extern void free_compound_page(struct page *page);
70413 extern void prep_compound_page(struct page *page, unsigned long order);
70414 #ifdef CONFIG_MEMORY_FAILURE
70415 extern bool is_free_buddy_page(struct page *page);
fe2de317 70416diff --git a/mm/kmemleak.c b/mm/kmemleak.c
5e856224 70417index 45eb621..6ccd8ea 100644
fe2de317
MT
70418--- a/mm/kmemleak.c
70419+++ b/mm/kmemleak.c
5e856224 70420@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
bc901d79
MT
70421
70422 for (i = 0; i < object->trace_len; i++) {
70423 void *ptr = (void *)object->trace[i];
70424- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
70425+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
70426 }
70427 }
70428
fe2de317 70429diff --git a/mm/maccess.c b/mm/maccess.c
4c928ab7 70430index d53adf9..03a24bf 100644
fe2de317
MT
70431--- a/mm/maccess.c
70432+++ b/mm/maccess.c
70433@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
6e9df6a3
MT
70434 set_fs(KERNEL_DS);
70435 pagefault_disable();
70436 ret = __copy_from_user_inatomic(dst,
70437- (__force const void __user *)src, size);
70438+ (const void __force_user *)src, size);
70439 pagefault_enable();
70440 set_fs(old_fs);
70441
fe2de317 70442@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
6e9df6a3
MT
70443
70444 set_fs(KERNEL_DS);
70445 pagefault_disable();
70446- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
70447+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
70448 pagefault_enable();
70449 set_fs(old_fs);
70450
fe2de317 70451diff --git a/mm/madvise.c b/mm/madvise.c
572b4308 70452index 55f645c..cde5320 100644
fe2de317
MT
70453--- a/mm/madvise.c
70454+++ b/mm/madvise.c
572b4308 70455@@ -46,6 +46,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
58c5fc13 70456 pgoff_t pgoff;
ae4e228f 70457 unsigned long new_flags = vma->vm_flags;
58c5fc13
MT
70458
70459+#ifdef CONFIG_PAX_SEGMEXEC
70460+ struct vm_area_struct *vma_m;
70461+#endif
70462+
70463 switch (behavior) {
70464 case MADV_NORMAL:
70465 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
572b4308 70466@@ -117,6 +121,13 @@ success:
58c5fc13
MT
70467 /*
70468 * vm_flags is protected by the mmap_sem held in write mode.
70469 */
70470+
70471+#ifdef CONFIG_PAX_SEGMEXEC
70472+ vma_m = pax_find_mirror_vma(vma);
70473+ if (vma_m)
70474+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
70475+#endif
70476+
70477 vma->vm_flags = new_flags;
70478
70479 out:
572b4308 70480@@ -175,6 +186,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
ae4e228f
MT
70481 struct vm_area_struct ** prev,
70482 unsigned long start, unsigned long end)
70483 {
58c5fc13
MT
70484+
70485+#ifdef CONFIG_PAX_SEGMEXEC
ae4e228f
MT
70486+ struct vm_area_struct *vma_m;
70487+#endif
58c5fc13 70488+
ae4e228f
MT
70489 *prev = vma;
70490 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
70491 return -EINVAL;
572b4308 70492@@ -187,6 +203,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
ae4e228f
MT
70493 zap_page_range(vma, start, end - start, &details);
70494 } else
70495 zap_page_range(vma, start, end - start, NULL);
70496+
70497+#ifdef CONFIG_PAX_SEGMEXEC
70498+ vma_m = pax_find_mirror_vma(vma);
70499+ if (vma_m) {
70500+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
70501+ struct zap_details details = {
70502+ .nonlinear_vma = vma_m,
70503+ .last_index = ULONG_MAX,
70504+ };
70505+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
70506+ } else
70507+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
70508+ }
58c5fc13
MT
70509+#endif
70510+
ae4e228f
MT
70511 return 0;
70512 }
58c5fc13 70513
572b4308 70514@@ -394,6 +425,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
58c5fc13
MT
70515 if (end < start)
70516 goto out;
70517
70518+#ifdef CONFIG_PAX_SEGMEXEC
70519+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70520+ if (end > SEGMEXEC_TASK_SIZE)
70521+ goto out;
70522+ } else
70523+#endif
70524+
70525+ if (end > TASK_SIZE)
70526+ goto out;
70527+
70528 error = 0;
70529 if (end == start)
70530 goto out;
fe2de317 70531diff --git a/mm/memory-failure.c b/mm/memory-failure.c
c6e2a6c8 70532index 97cc273..6ed703f 100644
fe2de317
MT
70533--- a/mm/memory-failure.c
70534+++ b/mm/memory-failure.c
4c928ab7 70535@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
fe2de317
MT
70536
70537 int sysctl_memory_failure_recovery __read_mostly = 1;
70538
70539-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70540+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
70541
70542 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
70543
c6e2a6c8
MT
70544@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
70545 pfn, t->comm, t->pid);
fe2de317
MT
70546 si.si_signo = SIGBUS;
70547 si.si_errno = 0;
fe2de317
MT
70548- si.si_addr = (void *)addr;
70549+ si.si_addr = (void __user *)addr;
70550 #ifdef __ARCH_SI_TRAPNO
70551 si.si_trapno = trapno;
70552 #endif
c6e2a6c8 70553@@ -1036,7 +1036,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
fe2de317
MT
70554 }
70555
70556 nr_pages = 1 << compound_trans_order(hpage);
70557- atomic_long_add(nr_pages, &mce_bad_pages);
70558+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
70559
70560 /*
70561 * We need/can do nothing about count=0 pages.
c6e2a6c8 70562@@ -1066,7 +1066,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
fe2de317
MT
70563 if (!PageHWPoison(hpage)
70564 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
70565 || (p != hpage && TestSetPageHWPoison(hpage))) {
70566- atomic_long_sub(nr_pages, &mce_bad_pages);
70567+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70568 return 0;
70569 }
70570 set_page_hwpoison_huge_page(hpage);
c6e2a6c8 70571@@ -1124,7 +1124,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
fe2de317
MT
70572 }
70573 if (hwpoison_filter(p)) {
70574 if (TestClearPageHWPoison(p))
70575- atomic_long_sub(nr_pages, &mce_bad_pages);
70576+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70577 unlock_page(hpage);
70578 put_page(hpage);
70579 return 0;
c6e2a6c8 70580@@ -1319,7 +1319,7 @@ int unpoison_memory(unsigned long pfn)
fe2de317
MT
70581 return 0;
70582 }
70583 if (TestClearPageHWPoison(p))
70584- atomic_long_sub(nr_pages, &mce_bad_pages);
70585+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70586 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
70587 return 0;
70588 }
c6e2a6c8 70589@@ -1333,7 +1333,7 @@ int unpoison_memory(unsigned long pfn)
fe2de317
MT
70590 */
70591 if (TestClearPageHWPoison(page)) {
70592 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
70593- atomic_long_sub(nr_pages, &mce_bad_pages);
70594+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
70595 freeit = 1;
70596 if (PageHuge(page))
70597 clear_page_hwpoison_huge_page(page);
c6e2a6c8 70598@@ -1446,7 +1446,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
fe2de317
MT
70599 }
70600 done:
70601 if (!PageHWPoison(hpage))
70602- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
70603+ atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
70604 set_page_hwpoison_huge_page(hpage);
70605 dequeue_hwpoisoned_huge_page(hpage);
70606 /* keep elevated page count for bad page */
c6e2a6c8 70607@@ -1577,7 +1577,7 @@ int soft_offline_page(struct page *page, int flags)
fe2de317
MT
70608 return ret;
70609
70610 done:
70611- atomic_long_add(1, &mce_bad_pages);
70612+ atomic_long_add_unchecked(1, &mce_bad_pages);
70613 SetPageHWPoison(page);
70614 /* keep elevated page count for bad page */
70615 return ret;
70616diff --git a/mm/memory.c b/mm/memory.c
c6e2a6c8 70617index 6105f47..3363489 100644
fe2de317
MT
70618--- a/mm/memory.c
70619+++ b/mm/memory.c
c6e2a6c8 70620@@ -434,8 +434,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
df50ba0c
MT
70621 return;
70622
70623 pmd = pmd_offset(pud, start);
70624+
70625+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
70626 pud_clear(pud);
70627 pmd_free_tlb(tlb, pmd, start);
70628+#endif
70629+
70630 }
70631
70632 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
c6e2a6c8 70633@@ -466,9 +470,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
6892158b 70634 if (end - 1 > ceiling - 1)
df50ba0c
MT
70635 return;
70636
df50ba0c 70637+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
6892158b 70638 pud = pud_offset(pgd, start);
df50ba0c
MT
70639 pgd_clear(pgd);
70640 pud_free_tlb(tlb, pud, start);
70641+#endif
70642+
70643 }
70644
70645 /*
c6e2a6c8 70646@@ -1597,12 +1604,6 @@ no_page_table:
71d190be
MT
70647 return page;
70648 }
70649
70650-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
70651-{
66a7e928
MT
70652- return stack_guard_page_start(vma, addr) ||
70653- stack_guard_page_end(vma, addr+PAGE_SIZE);
71d190be
MT
70654-}
70655-
66a7e928
MT
70656 /**
70657 * __get_user_pages() - pin user pages in memory
70658 * @tsk: task_struct of target task
c6e2a6c8 70659@@ -1675,10 +1676,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
ae4e228f 70660 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
58c5fc13
MT
70661 i = 0;
70662
70663- do {
70664+ while (nr_pages) {
70665 struct vm_area_struct *vma;
58c5fc13
MT
70666
70667- vma = find_extend_vma(mm, start);
70668+ vma = find_vma(mm, start);
66a7e928 70669 if (!vma && in_gate_area(mm, start)) {
58c5fc13 70670 unsigned long pg = start & PAGE_MASK;
71d190be 70671 pgd_t *pgd;
c6e2a6c8 70672@@ -1726,7 +1727,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
71d190be 70673 goto next_page;
58c5fc13
MT
70674 }
70675
70676- if (!vma ||
70677+ if (!vma || start < vma->vm_start ||
70678 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
ae4e228f 70679 !(vm_flags & vma->vm_flags))
58c5fc13 70680 return i ? : -EFAULT;
c6e2a6c8 70681@@ -1753,11 +1754,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
66a7e928
MT
70682 int ret;
70683 unsigned int fault_flags = 0;
70684
70685- /* For mlock, just skip the stack guard page. */
70686- if (foll_flags & FOLL_MLOCK) {
70687- if (stack_guard_page(vma, start))
70688- goto next_page;
70689- }
70690 if (foll_flags & FOLL_WRITE)
70691 fault_flags |= FAULT_FLAG_WRITE;
70692 if (nonblocking)
c6e2a6c8 70693@@ -1831,7 +1827,7 @@ next_page:
58c5fc13
MT
70694 start += PAGE_SIZE;
70695 nr_pages--;
70696 } while (nr_pages && start < vma->vm_end);
70697- } while (nr_pages);
70698+ }
70699 return i;
70700 }
66a7e928 70701 EXPORT_SYMBOL(__get_user_pages);
c6e2a6c8 70702@@ -2038,6 +2034,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
6892158b
MT
70703 page_add_file_rmap(page);
70704 set_pte_at(mm, addr, pte, mk_pte(page, prot));
70705
70706+#ifdef CONFIG_PAX_SEGMEXEC
70707+ pax_mirror_file_pte(vma, addr, page, ptl);
70708+#endif
70709+
70710 retval = 0;
70711 pte_unmap_unlock(pte, ptl);
70712 return retval;
c6e2a6c8 70713@@ -2072,10 +2072,22 @@ out:
6892158b
MT
70714 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
70715 struct page *page)
70716 {
70717+
70718+#ifdef CONFIG_PAX_SEGMEXEC
70719+ struct vm_area_struct *vma_m;
70720+#endif
70721+
70722 if (addr < vma->vm_start || addr >= vma->vm_end)
70723 return -EFAULT;
70724 if (!page_count(page))
70725 return -EINVAL;
70726+
70727+#ifdef CONFIG_PAX_SEGMEXEC
70728+ vma_m = pax_find_mirror_vma(vma);
70729+ if (vma_m)
70730+ vma_m->vm_flags |= VM_INSERTPAGE;
70731+#endif
70732+
70733 vma->vm_flags |= VM_INSERTPAGE;
70734 return insert_page(vma, addr, page, vma->vm_page_prot);
70735 }
c6e2a6c8 70736@@ -2161,6 +2173,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
6892158b
MT
70737 unsigned long pfn)
70738 {
70739 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
70740+ BUG_ON(vma->vm_mirror);
70741
70742 if (addr < vma->vm_start || addr >= vma->vm_end)
70743 return -EFAULT;
c6e2a6c8 70744@@ -2368,7 +2381,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
5e856224
MT
70745
70746 BUG_ON(pud_huge(*pud));
70747
70748- pmd = pmd_alloc(mm, pud, addr);
70749+ pmd = (mm == &init_mm) ?
70750+ pmd_alloc_kernel(mm, pud, addr) :
70751+ pmd_alloc(mm, pud, addr);
70752 if (!pmd)
70753 return -ENOMEM;
70754 do {
c6e2a6c8 70755@@ -2388,7 +2403,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
5e856224
MT
70756 unsigned long next;
70757 int err;
70758
70759- pud = pud_alloc(mm, pgd, addr);
70760+ pud = (mm == &init_mm) ?
70761+ pud_alloc_kernel(mm, pgd, addr) :
70762+ pud_alloc(mm, pgd, addr);
70763 if (!pud)
70764 return -ENOMEM;
70765 do {
c6e2a6c8 70766@@ -2476,6 +2493,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
58c5fc13
MT
70767 copy_user_highpage(dst, src, va, vma);
70768 }
70769
70770+#ifdef CONFIG_PAX_SEGMEXEC
70771+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
70772+{
70773+ struct mm_struct *mm = vma->vm_mm;
70774+ spinlock_t *ptl;
70775+ pte_t *pte, entry;
70776+
70777+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
70778+ entry = *pte;
70779+ if (!pte_present(entry)) {
70780+ if (!pte_none(entry)) {
70781+ BUG_ON(pte_file(entry));
70782+ free_swap_and_cache(pte_to_swp_entry(entry));
70783+ pte_clear_not_present_full(mm, address, pte, 0);
70784+ }
70785+ } else {
70786+ struct page *page;
70787+
70788+ flush_cache_page(vma, address, pte_pfn(entry));
70789+ entry = ptep_clear_flush(vma, address, pte);
70790+ BUG_ON(pte_dirty(entry));
70791+ page = vm_normal_page(vma, address, entry);
70792+ if (page) {
70793+ update_hiwater_rss(mm);
70794+ if (PageAnon(page))
df50ba0c 70795+ dec_mm_counter_fast(mm, MM_ANONPAGES);
58c5fc13 70796+ else
df50ba0c 70797+ dec_mm_counter_fast(mm, MM_FILEPAGES);
58c5fc13
MT
70798+ page_remove_rmap(page);
70799+ page_cache_release(page);
70800+ }
70801+ }
70802+ pte_unmap_unlock(pte, ptl);
70803+}
70804+
70805+/* PaX: if vma is mirrored, synchronize the mirror's PTE
70806+ *
70807+ * the ptl of the lower mapped page is held on entry and is not released on exit
70808+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
70809+ */
70810+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70811+{
70812+ struct mm_struct *mm = vma->vm_mm;
70813+ unsigned long address_m;
70814+ spinlock_t *ptl_m;
70815+ struct vm_area_struct *vma_m;
70816+ pmd_t *pmd_m;
70817+ pte_t *pte_m, entry_m;
70818+
70819+ BUG_ON(!page_m || !PageAnon(page_m));
70820+
70821+ vma_m = pax_find_mirror_vma(vma);
70822+ if (!vma_m)
70823+ return;
70824+
70825+ BUG_ON(!PageLocked(page_m));
70826+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70827+ address_m = address + SEGMEXEC_TASK_SIZE;
70828+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
bc901d79 70829+ pte_m = pte_offset_map(pmd_m, address_m);
58c5fc13
MT
70830+ ptl_m = pte_lockptr(mm, pmd_m);
70831+ if (ptl != ptl_m) {
70832+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70833+ if (!pte_none(*pte_m))
70834+ goto out;
70835+ }
70836+
70837+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70838+ page_cache_get(page_m);
70839+ page_add_anon_rmap(page_m, vma_m, address_m);
df50ba0c 70840+ inc_mm_counter_fast(mm, MM_ANONPAGES);
58c5fc13
MT
70841+ set_pte_at(mm, address_m, pte_m, entry_m);
70842+ update_mmu_cache(vma_m, address_m, entry_m);
70843+out:
70844+ if (ptl != ptl_m)
70845+ spin_unlock(ptl_m);
bc901d79 70846+ pte_unmap(pte_m);
58c5fc13
MT
70847+ unlock_page(page_m);
70848+}
70849+
70850+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
70851+{
70852+ struct mm_struct *mm = vma->vm_mm;
70853+ unsigned long address_m;
70854+ spinlock_t *ptl_m;
70855+ struct vm_area_struct *vma_m;
70856+ pmd_t *pmd_m;
70857+ pte_t *pte_m, entry_m;
70858+
70859+ BUG_ON(!page_m || PageAnon(page_m));
70860+
70861+ vma_m = pax_find_mirror_vma(vma);
70862+ if (!vma_m)
70863+ return;
70864+
70865+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70866+ address_m = address + SEGMEXEC_TASK_SIZE;
70867+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
bc901d79 70868+ pte_m = pte_offset_map(pmd_m, address_m);
58c5fc13
MT
70869+ ptl_m = pte_lockptr(mm, pmd_m);
70870+ if (ptl != ptl_m) {
70871+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70872+ if (!pte_none(*pte_m))
70873+ goto out;
70874+ }
70875+
70876+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
70877+ page_cache_get(page_m);
70878+ page_add_file_rmap(page_m);
df50ba0c 70879+ inc_mm_counter_fast(mm, MM_FILEPAGES);
58c5fc13
MT
70880+ set_pte_at(mm, address_m, pte_m, entry_m);
70881+ update_mmu_cache(vma_m, address_m, entry_m);
70882+out:
70883+ if (ptl != ptl_m)
70884+ spin_unlock(ptl_m);
bc901d79 70885+ pte_unmap(pte_m);
58c5fc13
MT
70886+}
70887+
70888+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
70889+{
70890+ struct mm_struct *mm = vma->vm_mm;
70891+ unsigned long address_m;
70892+ spinlock_t *ptl_m;
70893+ struct vm_area_struct *vma_m;
70894+ pmd_t *pmd_m;
70895+ pte_t *pte_m, entry_m;
70896+
70897+ vma_m = pax_find_mirror_vma(vma);
70898+ if (!vma_m)
70899+ return;
70900+
70901+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
70902+ address_m = address + SEGMEXEC_TASK_SIZE;
70903+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
bc901d79 70904+ pte_m = pte_offset_map(pmd_m, address_m);
58c5fc13
MT
70905+ ptl_m = pte_lockptr(mm, pmd_m);
70906+ if (ptl != ptl_m) {
70907+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
70908+ if (!pte_none(*pte_m))
70909+ goto out;
70910+ }
70911+
70912+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
70913+ set_pte_at(mm, address_m, pte_m, entry_m);
70914+out:
70915+ if (ptl != ptl_m)
70916+ spin_unlock(ptl_m);
bc901d79 70917+ pte_unmap(pte_m);
58c5fc13
MT
70918+}
70919+
70920+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
70921+{
70922+ struct page *page_m;
70923+ pte_t entry;
70924+
70925+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
70926+ goto out;
70927+
70928+ entry = *pte;
70929+ page_m = vm_normal_page(vma, address, entry);
70930+ if (!page_m)
70931+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
70932+ else if (PageAnon(page_m)) {
70933+ if (pax_find_mirror_vma(vma)) {
70934+ pte_unmap_unlock(pte, ptl);
70935+ lock_page(page_m);
70936+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
70937+ if (pte_same(entry, *pte))
70938+ pax_mirror_anon_pte(vma, address, page_m, ptl);
70939+ else
70940+ unlock_page(page_m);
70941+ }
70942+ } else
70943+ pax_mirror_file_pte(vma, address, page_m, ptl);
70944+
70945+out:
70946+ pte_unmap_unlock(pte, ptl);
70947+}
70948+#endif
70949+
70950 /*
70951 * This routine handles present pages, when users try to write
70952 * to a shared page. It is done by copying the page to a new address
c6e2a6c8 70953@@ -2687,6 +2884,12 @@ gotten:
58c5fc13
MT
70954 */
70955 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
70956 if (likely(pte_same(*page_table, orig_pte))) {
70957+
70958+#ifdef CONFIG_PAX_SEGMEXEC
70959+ if (pax_find_mirror_vma(vma))
70960+ BUG_ON(!trylock_page(new_page));
70961+#endif
70962+
70963 if (old_page) {
70964 if (!PageAnon(old_page)) {
df50ba0c 70965 dec_mm_counter_fast(mm, MM_FILEPAGES);
c6e2a6c8 70966@@ -2738,6 +2941,10 @@ gotten:
58c5fc13
MT
70967 page_remove_rmap(old_page);
70968 }
70969
70970+#ifdef CONFIG_PAX_SEGMEXEC
70971+ pax_mirror_anon_pte(vma, address, new_page, ptl);
70972+#endif
70973+
70974 /* Free the old page.. */
70975 new_page = old_page;
70976 ret |= VM_FAULT_WRITE;
c6e2a6c8 70977@@ -3017,6 +3224,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
70978 swap_free(entry);
70979 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
70980 try_to_free_swap(page);
70981+
70982+#ifdef CONFIG_PAX_SEGMEXEC
70983+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
70984+#endif
70985+
70986 unlock_page(page);
bc901d79
MT
70987 if (swapcache) {
70988 /*
c6e2a6c8 70989@@ -3040,6 +3252,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
70990
70991 /* No need to invalidate - it was non-present before */
df50ba0c 70992 update_mmu_cache(vma, address, page_table);
58c5fc13
MT
70993+
70994+#ifdef CONFIG_PAX_SEGMEXEC
70995+ pax_mirror_anon_pte(vma, address, page, ptl);
70996+#endif
70997+
70998 unlock:
70999 pte_unmap_unlock(page_table, ptl);
71000 out:
c6e2a6c8 71001@@ -3059,40 +3276,6 @@ out_release:
57199397
MT
71002 }
71003
71004 /*
6892158b
MT
71005- * This is like a special single-page "expand_{down|up}wards()",
71006- * except we must first make sure that 'address{-|+}PAGE_SIZE'
57199397 71007- * doesn't hit another vma.
57199397
MT
71008- */
71009-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
71010-{
71011- address &= PAGE_MASK;
71012- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
71013- struct vm_area_struct *prev = vma->vm_prev;
71014-
71015- /*
71016- * Is there a mapping abutting this one below?
71017- *
71018- * That's only ok if it's the same stack mapping
71019- * that has gotten split..
71020- */
71021- if (prev && prev->vm_end == address)
71022- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
71023-
15a11c5b 71024- expand_downwards(vma, address - PAGE_SIZE);
57199397 71025- }
6892158b
MT
71026- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
71027- struct vm_area_struct *next = vma->vm_next;
71028-
71029- /* As VM_GROWSDOWN but s/below/above/ */
71030- if (next && next->vm_start == address + PAGE_SIZE)
71031- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
71032-
71033- expand_upwards(vma, address + PAGE_SIZE);
71034- }
57199397
MT
71035- return 0;
71036-}
71037-
71038-/*
71039 * We enter with non-exclusive mmap_sem (to exclude vma changes,
71040 * but allow concurrent faults), and pte mapped but not yet locked.
71041 * We return with mmap_sem still held, but pte unmapped and unlocked.
c6e2a6c8 71042@@ -3101,27 +3284,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
ae4e228f
MT
71043 unsigned long address, pte_t *page_table, pmd_t *pmd,
71044 unsigned int flags)
71045 {
71046- struct page *page;
71047+ struct page *page = NULL;
71048 spinlock_t *ptl;
71049 pte_t entry;
71050
57199397
MT
71051- pte_unmap(page_table);
71052-
71053- /* Check if we need to add a guard page to the stack */
71054- if (check_stack_guard_page(vma, address) < 0)
71055- return VM_FAULT_SIGBUS;
71056-
71057- /* Use the zero-page for reads */
71058 if (!(flags & FAULT_FLAG_WRITE)) {
71059 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
71060 vma->vm_page_prot));
71061- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
71062+ ptl = pte_lockptr(mm, pmd);
71063+ spin_lock(ptl);
71064 if (!pte_none(*page_table))
71065 goto unlock;
71066 goto setpte;
71067 }
71068
71069 /* Allocate our own private page. */
71070+ pte_unmap(page_table);
71071+
71072 if (unlikely(anon_vma_prepare(vma)))
71073 goto oom;
71074 page = alloc_zeroed_user_highpage_movable(vma, address);
c6e2a6c8 71075@@ -3140,6 +3319,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71076 if (!pte_none(*page_table))
71077 goto release;
ae4e228f 71078
58c5fc13
MT
71079+#ifdef CONFIG_PAX_SEGMEXEC
71080+ if (pax_find_mirror_vma(vma))
71081+ BUG_ON(!trylock_page(page));
71082+#endif
71083+
df50ba0c 71084 inc_mm_counter_fast(mm, MM_ANONPAGES);
58c5fc13 71085 page_add_new_anon_rmap(page, vma, address);
ae4e228f 71086 setpte:
c6e2a6c8 71087@@ -3147,6 +3331,12 @@ setpte:
58c5fc13
MT
71088
71089 /* No need to invalidate - it was non-present before */
df50ba0c 71090 update_mmu_cache(vma, address, page_table);
58c5fc13
MT
71091+
71092+#ifdef CONFIG_PAX_SEGMEXEC
ae4e228f
MT
71093+ if (page)
71094+ pax_mirror_anon_pte(vma, address, page, ptl);
58c5fc13
MT
71095+#endif
71096+
71097 unlock:
71098 pte_unmap_unlock(page_table, ptl);
71099 return 0;
c6e2a6c8 71100@@ -3290,6 +3480,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71101 */
71102 /* Only go through if we didn't race with anybody else... */
71103 if (likely(pte_same(*page_table, orig_pte))) {
71104+
71105+#ifdef CONFIG_PAX_SEGMEXEC
71106+ if (anon && pax_find_mirror_vma(vma))
71107+ BUG_ON(!trylock_page(page));
71108+#endif
71109+
71110 flush_icache_page(vma, page);
71111 entry = mk_pte(page, vma->vm_page_prot);
71112 if (flags & FAULT_FLAG_WRITE)
c6e2a6c8 71113@@ -3309,6 +3505,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71114
71115 /* no need to invalidate: a not-present page won't be cached */
df50ba0c 71116 update_mmu_cache(vma, address, page_table);
58c5fc13
MT
71117+
71118+#ifdef CONFIG_PAX_SEGMEXEC
71119+ if (anon)
71120+ pax_mirror_anon_pte(vma, address, page, ptl);
71121+ else
71122+ pax_mirror_file_pte(vma, address, page, ptl);
71123+#endif
71124+
71125 } else {
6e9df6a3
MT
71126 if (cow_page)
71127 mem_cgroup_uncharge_page(cow_page);
c6e2a6c8 71128@@ -3462,6 +3666,12 @@ int handle_pte_fault(struct mm_struct *mm,
58c5fc13 71129 if (flags & FAULT_FLAG_WRITE)
bc901d79 71130 flush_tlb_fix_spurious_fault(vma, address);
58c5fc13
MT
71131 }
71132+
71133+#ifdef CONFIG_PAX_SEGMEXEC
71134+ pax_mirror_pte(vma, address, pte, pmd, ptl);
71135+ return 0;
71136+#endif
71137+
71138 unlock:
71139 pte_unmap_unlock(pte, ptl);
71140 return 0;
c6e2a6c8 71141@@ -3478,6 +3688,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71142 pmd_t *pmd;
71143 pte_t *pte;
71144
71145+#ifdef CONFIG_PAX_SEGMEXEC
71146+ struct vm_area_struct *vma_m;
71147+#endif
71148+
71149 __set_current_state(TASK_RUNNING);
71150
71151 count_vm_event(PGFAULT);
c6e2a6c8 71152@@ -3489,6 +3703,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
71153 if (unlikely(is_vm_hugetlb_page(vma)))
71154 return hugetlb_fault(mm, vma, address, flags);
71155
71156+#ifdef CONFIG_PAX_SEGMEXEC
71157+ vma_m = pax_find_mirror_vma(vma);
71158+ if (vma_m) {
71159+ unsigned long address_m;
71160+ pgd_t *pgd_m;
71161+ pud_t *pud_m;
71162+ pmd_t *pmd_m;
71163+
71164+ if (vma->vm_start > vma_m->vm_start) {
71165+ address_m = address;
71166+ address -= SEGMEXEC_TASK_SIZE;
71167+ vma = vma_m;
71168+ } else
71169+ address_m = address + SEGMEXEC_TASK_SIZE;
71170+
71171+ pgd_m = pgd_offset(mm, address_m);
71172+ pud_m = pud_alloc(mm, pgd_m, address_m);
71173+ if (!pud_m)
71174+ return VM_FAULT_OOM;
71175+ pmd_m = pmd_alloc(mm, pud_m, address_m);
71176+ if (!pmd_m)
71177+ return VM_FAULT_OOM;
16454cff 71178+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
58c5fc13
MT
71179+ return VM_FAULT_OOM;
71180+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
71181+ }
71182+#endif
71183+
71184 pgd = pgd_offset(mm, address);
71185 pud = pud_alloc(mm, pgd, address);
71186 if (!pud)
c6e2a6c8 71187@@ -3518,7 +3760,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
66a7e928
MT
71188 * run pte_offset_map on the pmd, if an huge pmd could
71189 * materialize from under us from a different thread.
71190 */
71191- if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
71192+ if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
71193 return VM_FAULT_OOM;
71194 /* if an huge pmd materialized from under us just retry later */
71195 if (unlikely(pmd_trans_huge(*pmd)))
c6e2a6c8 71196@@ -3555,6 +3797,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
5e856224
MT
71197 spin_unlock(&mm->page_table_lock);
71198 return 0;
71199 }
71200+
71201+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
71202+{
71203+ pud_t *new = pud_alloc_one(mm, address);
71204+ if (!new)
71205+ return -ENOMEM;
71206+
71207+ smp_wmb(); /* See comment in __pte_alloc */
71208+
71209+ spin_lock(&mm->page_table_lock);
71210+ if (pgd_present(*pgd)) /* Another has populated it */
71211+ pud_free(mm, new);
71212+ else
71213+ pgd_populate_kernel(mm, pgd, new);
71214+ spin_unlock(&mm->page_table_lock);
71215+ return 0;
71216+}
71217 #endif /* __PAGETABLE_PUD_FOLDED */
71218
71219 #ifndef __PAGETABLE_PMD_FOLDED
c6e2a6c8 71220@@ -3585,6 +3844,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
5e856224
MT
71221 spin_unlock(&mm->page_table_lock);
71222 return 0;
71223 }
71224+
71225+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
71226+{
71227+ pmd_t *new = pmd_alloc_one(mm, address);
71228+ if (!new)
71229+ return -ENOMEM;
71230+
71231+ smp_wmb(); /* See comment in __pte_alloc */
71232+
71233+ spin_lock(&mm->page_table_lock);
71234+#ifndef __ARCH_HAS_4LEVEL_HACK
71235+ if (pud_present(*pud)) /* Another has populated it */
71236+ pmd_free(mm, new);
71237+ else
71238+ pud_populate_kernel(mm, pud, new);
71239+#else
71240+ if (pgd_present(*pud)) /* Another has populated it */
71241+ pmd_free(mm, new);
71242+ else
71243+ pgd_populate_kernel(mm, pud, new);
71244+#endif /* __ARCH_HAS_4LEVEL_HACK */
71245+ spin_unlock(&mm->page_table_lock);
71246+ return 0;
71247+}
71248 #endif /* __PAGETABLE_PMD_FOLDED */
71249
71250 int make_pages_present(unsigned long addr, unsigned long end)
c6e2a6c8 71251@@ -3622,7 +3905,7 @@ static int __init gate_vma_init(void)
58c5fc13
MT
71252 gate_vma.vm_start = FIXADDR_USER_START;
71253 gate_vma.vm_end = FIXADDR_USER_END;
71254 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
71255- gate_vma.vm_page_prot = __P101;
71256+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
c6e2a6c8
MT
71257
71258 return 0;
71259 }
fe2de317 71260diff --git a/mm/mempolicy.c b/mm/mempolicy.c
c6e2a6c8 71261index bf5b485..e44c2cb 100644
fe2de317
MT
71262--- a/mm/mempolicy.c
71263+++ b/mm/mempolicy.c
c6e2a6c8 71264@@ -619,6 +619,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
df50ba0c
MT
71265 unsigned long vmstart;
71266 unsigned long vmend;
58c5fc13
MT
71267
71268+#ifdef CONFIG_PAX_SEGMEXEC
71269+ struct vm_area_struct *vma_m;
71270+#endif
71271+
5e856224 71272 vma = find_vma(mm, start);
df50ba0c
MT
71273 if (!vma || vma->vm_start > start)
71274 return -EFAULT;
c6e2a6c8
MT
71275@@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
71276 if (err)
71277 goto out;
71278 }
58c5fc13
MT
71279+
71280+#ifdef CONFIG_PAX_SEGMEXEC
71281+ vma_m = pax_find_mirror_vma(vma);
c6e2a6c8
MT
71282+ if (vma_m && vma_m->vm_ops && vma_m->vm_ops->set_policy) {
71283+ err = vma_m->vm_ops->set_policy(vma_m, new_pol);
58c5fc13 71284+ if (err)
df50ba0c 71285+ goto out;
58c5fc13
MT
71286+ }
71287+#endif
71288+
71289 }
df50ba0c
MT
71290
71291 out:
c6e2a6c8 71292@@ -1105,6 +1119,17 @@ static long do_mbind(unsigned long start, unsigned long len,
58c5fc13
MT
71293
71294 if (end < start)
71295 return -EINVAL;
71296+
71297+#ifdef CONFIG_PAX_SEGMEXEC
71298+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
71299+ if (end > SEGMEXEC_TASK_SIZE)
71300+ return -EINVAL;
71301+ } else
71302+#endif
71303+
71304+ if (end > TASK_SIZE)
71305+ return -EINVAL;
71306+
71307 if (end == start)
71308 return 0;
71309
c6e2a6c8
MT
71310@@ -1328,8 +1353,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
71311 */
58c5fc13
MT
71312 tcred = __task_cred(task);
71313 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
71314- cred->uid != tcred->suid && cred->uid != tcred->uid &&
71315- !capable(CAP_SYS_NICE)) {
71316+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
71317 rcu_read_unlock();
71318 err = -EPERM;
c6e2a6c8
MT
71319 goto out_put;
71320@@ -1360,6 +1384,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
58c5fc13 71321 goto out;
c6e2a6c8 71322 }
58c5fc13
MT
71323
71324+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71325+ if (mm != current->mm &&
71326+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
c6e2a6c8 71327+ mmput(mm);
58c5fc13
MT
71328+ err = -EPERM;
71329+ goto out;
71330+ }
71331+#endif
71332+
c6e2a6c8
MT
71333 err = do_migrate_pages(mm, old, new,
71334 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
71335
fe2de317 71336diff --git a/mm/mlock.c b/mm/mlock.c
572b4308 71337index ef726e8..cd7f1ec 100644
fe2de317
MT
71338--- a/mm/mlock.c
71339+++ b/mm/mlock.c
58c5fc13
MT
71340@@ -13,6 +13,7 @@
71341 #include <linux/pagemap.h>
71342 #include <linux/mempolicy.h>
71343 #include <linux/syscalls.h>
71344+#include <linux/security.h>
71345 #include <linux/sched.h>
4c928ab7 71346 #include <linux/export.h>
58c5fc13 71347 #include <linux/rmap.h>
572b4308
MT
71348@@ -376,7 +377,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
71349 {
71350 unsigned long nstart, end, tmp;
71351 struct vm_area_struct * vma, * prev;
71352- int error;
71353+ int error = 0;
71354
71355 VM_BUG_ON(start & ~PAGE_MASK);
71356 VM_BUG_ON(len != PAGE_ALIGN(len));
4c928ab7 71357@@ -385,6 +386,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
58c5fc13
MT
71358 return -EINVAL;
71359 if (end == start)
71360 return 0;
58c5fc13
MT
71361+ if (end > TASK_SIZE)
71362+ return -EINVAL;
71363+
5e856224 71364 vma = find_vma(current->mm, start);
58c5fc13
MT
71365 if (!vma || vma->vm_start > start)
71366 return -ENOMEM;
5e856224 71367@@ -396,6 +400,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
57199397 71368 for (nstart = start ; ; ) {
15a11c5b 71369 vm_flags_t newflags;
57199397
MT
71370
71371+#ifdef CONFIG_PAX_SEGMEXEC
71372+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71373+ break;
71374+#endif
71375+
71376 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
71377
71378 newflags = vma->vm_flags | VM_LOCKED;
5e856224 71379@@ -501,6 +510,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
58c5fc13
MT
71380 lock_limit >>= PAGE_SHIFT;
71381
71382 /* check against resource limits */
71383+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
71384 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
71385 error = do_mlock(start, len, 1);
71386 up_write(&current->mm->mmap_sem);
5e856224 71387@@ -524,17 +534,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
58c5fc13
MT
71388 static int do_mlockall(int flags)
71389 {
71390 struct vm_area_struct * vma, * prev = NULL;
71391- unsigned int def_flags = 0;
58c5fc13
MT
71392
71393 if (flags & MCL_FUTURE)
71394- def_flags = VM_LOCKED;
57199397
MT
71395- current->mm->def_flags = def_flags;
71396+ current->mm->def_flags |= VM_LOCKED;
71397+ else
71398+ current->mm->def_flags &= ~VM_LOCKED;
58c5fc13
MT
71399 if (flags == MCL_FUTURE)
71400 goto out;
58c5fc13 71401
57199397 71402 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
15a11c5b
MT
71403 vm_flags_t newflags;
71404
58c5fc13
MT
71405+#ifdef CONFIG_PAX_SEGMEXEC
71406+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
71407+ break;
71408+#endif
15a11c5b 71409+
58c5fc13
MT
71410+ BUG_ON(vma->vm_end > TASK_SIZE);
71411 newflags = vma->vm_flags | VM_LOCKED;
71412 if (!(flags & MCL_CURRENT))
71413 newflags &= ~VM_LOCKED;
5e856224 71414@@ -567,6 +583,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
58c5fc13
MT
71415 lock_limit >>= PAGE_SHIFT;
71416
71417 ret = -ENOMEM;
57199397 71418+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
58c5fc13
MT
71419 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
71420 capable(CAP_IPC_LOCK))
71421 ret = do_mlockall(flags);
fe2de317 71422diff --git a/mm/mmap.c b/mm/mmap.c
c6e2a6c8 71423index 848ef52..d2b586c 100644
fe2de317
MT
71424--- a/mm/mmap.c
71425+++ b/mm/mmap.c
16454cff 71426@@ -46,6 +46,16 @@
58c5fc13
MT
71427 #define arch_rebalance_pgtables(addr, len) (addr)
71428 #endif
71429
71430+static inline void verify_mm_writelocked(struct mm_struct *mm)
71431+{
71432+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
71433+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
71434+ up_read(&mm->mmap_sem);
71435+ BUG();
71436+ }
71437+#endif
71438+}
71439+
71440 static void unmap_region(struct mm_struct *mm,
71441 struct vm_area_struct *vma, struct vm_area_struct *prev,
71442 unsigned long start, unsigned long end);
fe2de317 71443@@ -71,22 +81,32 @@ static void unmap_region(struct mm_struct *mm,
58c5fc13
MT
71444 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
71445 *
71446 */
71447-pgprot_t protection_map[16] = {
71448+pgprot_t protection_map[16] __read_only = {
71449 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
71450 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
71451 };
71452
15a11c5b
MT
71453-pgprot_t vm_get_page_prot(unsigned long vm_flags)
71454+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
58c5fc13
MT
71455 {
71456- return __pgprot(pgprot_val(protection_map[vm_flags &
71457+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
71458 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
71459 pgprot_val(arch_vm_get_page_prot(vm_flags)));
71460+
71461+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
ae4e228f 71462+ if (!(__supported_pte_mask & _PAGE_NX) &&
58c5fc13
MT
71463+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
71464+ (vm_flags & (VM_READ | VM_WRITE)))
71465+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
71466+#endif
71467+
71468+ return prot;
71469 }
71470 EXPORT_SYMBOL(vm_get_page_prot);
71471
15a11c5b
MT
71472 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
71473 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
57199397
MT
71474 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
71475+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
57199397 71476 /*
15a11c5b
MT
71477 * Make sure vm_committed_as in one cacheline and not cacheline shared with
71478 * other variables. It can be updated by several CPUs frequently.
fe2de317 71479@@ -228,6 +248,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
58c5fc13
MT
71480 struct vm_area_struct *next = vma->vm_next;
71481
71482 might_sleep();
71483+ BUG_ON(vma->vm_mirror);
71484 if (vma->vm_ops && vma->vm_ops->close)
71485 vma->vm_ops->close(vma);
71486 if (vma->vm_file) {
c6e2a6c8 71487@@ -274,6 +295,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
58c5fc13
MT
71488 * not page aligned -Ram Gupta
71489 */
df50ba0c 71490 rlim = rlimit(RLIMIT_DATA);
58c5fc13
MT
71491+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
71492 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
71493 (mm->end_data - mm->start_data) > rlim)
71494 goto out;
c6e2a6c8 71495@@ -690,6 +712,12 @@ static int
58c5fc13
MT
71496 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
71497 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71498 {
71499+
71500+#ifdef CONFIG_PAX_SEGMEXEC
71501+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
71502+ return 0;
71503+#endif
71504+
71505 if (is_mergeable_vma(vma, file, vm_flags) &&
15a11c5b 71506 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
58c5fc13 71507 if (vma->vm_pgoff == vm_pgoff)
c6e2a6c8 71508@@ -709,6 +737,12 @@ static int
58c5fc13
MT
71509 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
71510 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
71511 {
71512+
71513+#ifdef CONFIG_PAX_SEGMEXEC
71514+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
71515+ return 0;
71516+#endif
71517+
71518 if (is_mergeable_vma(vma, file, vm_flags) &&
15a11c5b 71519 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
58c5fc13 71520 pgoff_t vm_pglen;
c6e2a6c8 71521@@ -751,13 +785,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
58c5fc13
MT
71522 struct vm_area_struct *vma_merge(struct mm_struct *mm,
71523 struct vm_area_struct *prev, unsigned long addr,
71524 unsigned long end, unsigned long vm_flags,
71525- struct anon_vma *anon_vma, struct file *file,
71526+ struct anon_vma *anon_vma, struct file *file,
71527 pgoff_t pgoff, struct mempolicy *policy)
71528 {
71529 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
71530 struct vm_area_struct *area, *next;
df50ba0c 71531 int err;
58c5fc13
MT
71532
71533+#ifdef CONFIG_PAX_SEGMEXEC
71534+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
71535+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
71536+
71537+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
71538+#endif
71539+
71540 /*
71541 * We later require that vma->vm_flags == vm_flags,
71542 * so this tests vma->vm_flags & VM_SPECIAL, too.
c6e2a6c8 71543@@ -773,6 +814,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
58c5fc13
MT
71544 if (next && next->vm_end == end) /* cases 6, 7, 8 */
71545 next = next->vm_next;
71546
71547+#ifdef CONFIG_PAX_SEGMEXEC
71548+ if (prev)
71549+ prev_m = pax_find_mirror_vma(prev);
71550+ if (area)
71551+ area_m = pax_find_mirror_vma(area);
71552+ if (next)
71553+ next_m = pax_find_mirror_vma(next);
71554+#endif
71555+
71556 /*
71557 * Can it merge with the predecessor?
71558 */
c6e2a6c8 71559@@ -792,9 +842,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
58c5fc13 71560 /* cases 1, 6 */
df50ba0c 71561 err = vma_adjust(prev, prev->vm_start,
58c5fc13
MT
71562 next->vm_end, prev->vm_pgoff, NULL);
71563- } else /* cases 2, 5, 7 */
71564+
71565+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
71566+ if (!err && prev_m)
71567+ err = vma_adjust(prev_m, prev_m->vm_start,
58c5fc13
MT
71568+ next_m->vm_end, prev_m->vm_pgoff, NULL);
71569+#endif
71570+
71571+ } else { /* cases 2, 5, 7 */
df50ba0c 71572 err = vma_adjust(prev, prev->vm_start,
58c5fc13
MT
71573 end, prev->vm_pgoff, NULL);
71574+
71575+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
71576+ if (!err && prev_m)
71577+ err = vma_adjust(prev_m, prev_m->vm_start,
71578+ end_m, prev_m->vm_pgoff, NULL);
58c5fc13
MT
71579+#endif
71580+
71581+ }
df50ba0c
MT
71582 if (err)
71583 return NULL;
16454cff 71584 khugepaged_enter_vma_merge(prev);
c6e2a6c8 71585@@ -808,12 +873,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
58c5fc13
MT
71586 mpol_equal(policy, vma_policy(next)) &&
71587 can_vma_merge_before(next, vm_flags,
71588 anon_vma, file, pgoff+pglen)) {
71589- if (prev && addr < prev->vm_end) /* case 4 */
71590+ if (prev && addr < prev->vm_end) { /* case 4 */
df50ba0c 71591 err = vma_adjust(prev, prev->vm_start,
58c5fc13
MT
71592 addr, prev->vm_pgoff, NULL);
71593- else /* cases 3, 8 */
71594+
71595+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
71596+ if (!err && prev_m)
71597+ err = vma_adjust(prev_m, prev_m->vm_start,
71598+ addr_m, prev_m->vm_pgoff, NULL);
58c5fc13
MT
71599+#endif
71600+
71601+ } else { /* cases 3, 8 */
df50ba0c 71602 err = vma_adjust(area, addr, next->vm_end,
58c5fc13
MT
71603 next->vm_pgoff - pglen, NULL);
71604+
71605+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
71606+ if (!err && area_m)
71607+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
71608+ next_m->vm_pgoff - pglen, NULL);
58c5fc13
MT
71609+#endif
71610+
71611+ }
df50ba0c
MT
71612 if (err)
71613 return NULL;
16454cff 71614 khugepaged_enter_vma_merge(area);
c6e2a6c8 71615@@ -922,14 +1002,11 @@ none:
58c5fc13
MT
71616 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
71617 struct file *file, long pages)
71618 {
71619- const unsigned long stack_flags
71620- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
71621-
71622 if (file) {
71623 mm->shared_vm += pages;
71624 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
71625 mm->exec_vm += pages;
71626- } else if (flags & stack_flags)
71627+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
71628 mm->stack_vm += pages;
71629 if (flags & (VM_RESERVED|VM_IO))
71630 mm->reserved_vm += pages;
c6e2a6c8 71631@@ -969,7 +1046,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13
MT
71632 * (the exception is when the underlying filesystem is noexec
71633 * mounted, in which case we dont add PROT_EXEC.)
71634 */
71635- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
71636+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
71637 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
71638 prot |= PROT_EXEC;
71639
c6e2a6c8 71640@@ -995,7 +1072,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13
MT
71641 /* Obtain the address to map to. we verify (or select) it and ensure
71642 * that it represents a valid section of the address space.
71643 */
71644- addr = get_unmapped_area(file, addr, len, pgoff, flags);
71645+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
71646 if (addr & ~PAGE_MASK)
71647 return addr;
71648
c6e2a6c8 71649@@ -1006,6 +1083,36 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13
MT
71650 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
71651 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
71652
58c5fc13 71653+#ifdef CONFIG_PAX_MPROTECT
57199397 71654+ if (mm->pax_flags & MF_PAX_MPROTECT) {
c52201e0 71655+#ifndef CONFIG_PAX_MPROTECT_COMPAT
6892158b
MT
71656+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
71657+ gr_log_rwxmmap(file);
57199397
MT
71658+
71659+#ifdef CONFIG_PAX_EMUPLT
71660+ vm_flags &= ~VM_EXEC;
71661+#else
71662+ return -EPERM;
58c5fc13
MT
71663+#endif
71664+
6892158b
MT
71665+ }
71666+
57199397
MT
71667+ if (!(vm_flags & VM_EXEC))
71668+ vm_flags &= ~VM_MAYEXEC;
c52201e0
MT
71669+#else
71670+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
71671+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
71672+#endif
57199397
MT
71673+ else
71674+ vm_flags &= ~VM_MAYWRITE;
58c5fc13
MT
71675+ }
71676+#endif
71677+
71678+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71679+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
71680+ vm_flags &= ~VM_PAGEEXEC;
71681+#endif
71682+
ae4e228f 71683 if (flags & MAP_LOCKED)
58c5fc13
MT
71684 if (!can_do_mlock())
71685 return -EPERM;
c6e2a6c8 71686@@ -1017,6 +1124,7 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13 71687 locked += mm->locked_vm;
df50ba0c 71688 lock_limit = rlimit(RLIMIT_MEMLOCK);
58c5fc13
MT
71689 lock_limit >>= PAGE_SHIFT;
71690+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
71691 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
71692 return -EAGAIN;
71693 }
c6e2a6c8 71694@@ -1087,6 +1195,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
58c5fc13
MT
71695 if (error)
71696 return error;
71697
71698+ if (!gr_acl_handle_mmap(file, prot))
71699+ return -EACCES;
71700+
71701 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
71702 }
c6e2a6c8
MT
71703
71704@@ -1192,7 +1303,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
15a11c5b 71705 vm_flags_t vm_flags = vma->vm_flags;
58c5fc13
MT
71706
71707 /* If it was private or non-writable, the write bit is already clear */
71708- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
71709+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
71710 return 0;
71711
71712 /* The backer wishes to know when pages are first written to? */
c6e2a6c8 71713@@ -1241,14 +1352,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
58c5fc13
MT
71714 unsigned long charged = 0;
71715 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
71716
71717+#ifdef CONFIG_PAX_SEGMEXEC
71718+ struct vm_area_struct *vma_m = NULL;
71719+#endif
71720+
71721+ /*
71722+ * mm->mmap_sem is required to protect against another thread
71723+ * changing the mappings in case we sleep.
71724+ */
71725+ verify_mm_writelocked(mm);
71726+
71727 /* Clear old maps */
71728 error = -ENOMEM;
71729-munmap_back:
71730 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71731 if (vma && vma->vm_start < addr + len) {
71732 if (do_munmap(mm, addr, len))
71733 return -ENOMEM;
71734- goto munmap_back;
71735+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
71736+ BUG_ON(vma && vma->vm_start < addr + len);
71737 }
71738
71739 /* Check against address space limit. */
c6e2a6c8 71740@@ -1297,6 +1418,16 @@ munmap_back:
58c5fc13
MT
71741 goto unacct_error;
71742 }
71743
71744+#ifdef CONFIG_PAX_SEGMEXEC
71745+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
71746+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
71747+ if (!vma_m) {
71748+ error = -ENOMEM;
71749+ goto free_vma;
71750+ }
71751+ }
71752+#endif
71753+
71754 vma->vm_mm = mm;
71755 vma->vm_start = addr;
71756 vma->vm_end = addr + len;
c6e2a6c8 71757@@ -1321,6 +1452,19 @@ munmap_back:
58c5fc13
MT
71758 error = file->f_op->mmap(file, vma);
71759 if (error)
71760 goto unmap_and_free_vma;
71761+
71762+#ifdef CONFIG_PAX_SEGMEXEC
71763+ if (vma_m && (vm_flags & VM_EXECUTABLE))
71764+ added_exe_file_vma(mm);
71765+#endif
71766+
71767+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
71768+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
71769+ vma->vm_flags |= VM_PAGEEXEC;
71770+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
71771+ }
71772+#endif
71773+
71774 if (vm_flags & VM_EXECUTABLE)
71775 added_exe_file_vma(mm);
ae4e228f 71776
c6e2a6c8 71777@@ -1358,6 +1502,11 @@ munmap_back:
58c5fc13
MT
71778 vma_link(mm, vma, prev, rb_link, rb_parent);
71779 file = vma->vm_file;
71780
71781+#ifdef CONFIG_PAX_SEGMEXEC
71782+ if (vma_m)
df50ba0c 71783+ BUG_ON(pax_mirror_vma(vma_m, vma));
58c5fc13
MT
71784+#endif
71785+
71786 /* Once vma denies write, undo our temporary denial count */
71787 if (correct_wcount)
71788 atomic_inc(&inode->i_writecount);
c6e2a6c8 71789@@ -1366,6 +1515,7 @@ out:
58c5fc13
MT
71790
71791 mm->total_vm += len >> PAGE_SHIFT;
71792 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
71793+ track_exec_limit(mm, addr, addr + len, vm_flags);
71794 if (vm_flags & VM_LOCKED) {
df50ba0c
MT
71795 if (!mlock_vma_pages_range(vma, addr, addr + len))
71796 mm->locked_vm += (len >> PAGE_SHIFT);
c6e2a6c8 71797@@ -1383,6 +1533,12 @@ unmap_and_free_vma:
58c5fc13
MT
71798 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
71799 charged = 0;
71800 free_vma:
71801+
71802+#ifdef CONFIG_PAX_SEGMEXEC
71803+ if (vma_m)
71804+ kmem_cache_free(vm_area_cachep, vma_m);
71805+#endif
71806+
71807 kmem_cache_free(vm_area_cachep, vma);
71808 unacct_error:
71809 if (charged)
c6e2a6c8 71810@@ -1390,6 +1546,44 @@ unacct_error:
57199397
MT
71811 return error;
71812 }
71813
16454cff 71814+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
57199397
MT
71815+{
71816+ if (!vma) {
71817+#ifdef CONFIG_STACK_GROWSUP
71818+ if (addr > sysctl_heap_stack_gap)
71819+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
71820+ else
71821+ vma = find_vma(current->mm, 0);
71822+ if (vma && (vma->vm_flags & VM_GROWSUP))
71823+ return false;
71824+#endif
71825+ return true;
71826+ }
71827+
71828+ if (addr + len > vma->vm_start)
71829+ return false;
71830+
71831+ if (vma->vm_flags & VM_GROWSDOWN)
71832+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
71833+#ifdef CONFIG_STACK_GROWSUP
71834+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
71835+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
71836+#endif
71837+
71838+ return true;
71839+}
16454cff
MT
71840+
71841+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
71842+{
71843+ if (vma->vm_start < len)
71844+ return -ENOMEM;
71845+ if (!(vma->vm_flags & VM_GROWSDOWN))
71846+ return vma->vm_start - len;
71847+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
71848+ return vma->vm_start - len - sysctl_heap_stack_gap;
71849+ return -ENOMEM;
71850+}
57199397
MT
71851+
71852 /* Get an address range which is currently unmapped.
71853 * For shmat() with addr=0.
71854 *
c6e2a6c8 71855@@ -1416,18 +1610,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
58c5fc13
MT
71856 if (flags & MAP_FIXED)
71857 return addr;
71858
71859+#ifdef CONFIG_PAX_RANDMMAP
71860+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71861+#endif
71862+
71863 if (addr) {
71864 addr = PAGE_ALIGN(addr);
57199397
MT
71865- vma = find_vma(mm, addr);
71866- if (TASK_SIZE - len >= addr &&
71867- (!vma || addr + len <= vma->vm_start))
71868- return addr;
71869+ if (TASK_SIZE - len >= addr) {
71870+ vma = find_vma(mm, addr);
71871+ if (check_heap_stack_gap(vma, addr, len))
71872+ return addr;
71873+ }
58c5fc13
MT
71874 }
71875 if (len > mm->cached_hole_size) {
71876- start_addr = addr = mm->free_area_cache;
71877+ start_addr = addr = mm->free_area_cache;
71878 } else {
71879- start_addr = addr = TASK_UNMAPPED_BASE;
71880- mm->cached_hole_size = 0;
71881+ start_addr = addr = mm->mmap_base;
71882+ mm->cached_hole_size = 0;
71883 }
71884
71885 full_search:
c6e2a6c8 71886@@ -1438,34 +1637,40 @@ full_search:
58c5fc13
MT
71887 * Start a new search - just in case we missed
71888 * some holes.
71889 */
71890- if (start_addr != TASK_UNMAPPED_BASE) {
71891- addr = TASK_UNMAPPED_BASE;
71892- start_addr = addr;
71893+ if (start_addr != mm->mmap_base) {
71894+ start_addr = addr = mm->mmap_base;
71895 mm->cached_hole_size = 0;
71896 goto full_search;
71897 }
57199397
MT
71898 return -ENOMEM;
71899 }
71900- if (!vma || addr + len <= vma->vm_start) {
71901- /*
71902- * Remember the place where we stopped the search:
71903- */
71904- mm->free_area_cache = addr + len;
71905- return addr;
71906- }
71907+ if (check_heap_stack_gap(vma, addr, len))
71908+ break;
71909 if (addr + mm->cached_hole_size < vma->vm_start)
71910 mm->cached_hole_size = vma->vm_start - addr;
71911 addr = vma->vm_end;
71912 }
71913+
71914+ /*
71915+ * Remember the place where we stopped the search:
71916+ */
71917+ mm->free_area_cache = addr + len;
71918+ return addr;
71919 }
71920 #endif
58c5fc13
MT
71921
71922 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
71923 {
71924+
71925+#ifdef CONFIG_PAX_SEGMEXEC
71926+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
71927+ return;
71928+#endif
71929+
71930 /*
71931 * Is this a new hole at the lowest possible address?
71932 */
c6e2a6c8
MT
71933- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
71934+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
58c5fc13 71935 mm->free_area_cache = addr;
c6e2a6c8
MT
71936 }
71937
71938@@ -1481,7 +1686,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
71939 {
71940 struct vm_area_struct *vma;
71941 struct mm_struct *mm = current->mm;
c6e2a6c8
MT
71942- unsigned long addr = addr0, start_addr;
71943+ unsigned long base = mm->mmap_base, addr = addr0, start_addr;
58c5fc13
MT
71944
71945 /* requested length too big for entire address space */
71946 if (len > TASK_SIZE)
c6e2a6c8 71947@@ -1490,13 +1695,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
58c5fc13
MT
71948 if (flags & MAP_FIXED)
71949 return addr;
71950
71951+#ifdef CONFIG_PAX_RANDMMAP
71952+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
71953+#endif
71954+
71955 /* requesting a specific address */
71956 if (addr) {
71957 addr = PAGE_ALIGN(addr);
57199397
MT
71958- vma = find_vma(mm, addr);
71959- if (TASK_SIZE - len >= addr &&
71960- (!vma || addr + len <= vma->vm_start))
71961- return addr;
71962+ if (TASK_SIZE - len >= addr) {
71963+ vma = find_vma(mm, addr);
71964+ if (check_heap_stack_gap(vma, addr, len))
71965+ return addr;
71966+ }
71967 }
71968
71969 /* check if free_area_cache is useful for us */
c6e2a6c8 71970@@ -1520,7 +1730,7 @@ try_again:
57199397
MT
71971 * return with success:
71972 */
71973 vma = find_vma(mm, addr);
71974- if (!vma || addr+len <= vma->vm_start)
71975+ if (check_heap_stack_gap(vma, addr, len))
71976 /* remember the address as a hint for next time */
71977 return (mm->free_area_cache = addr);
71978
c6e2a6c8 71979@@ -1529,8 +1739,8 @@ try_again:
16454cff
MT
71980 mm->cached_hole_size = vma->vm_start - addr;
71981
71982 /* try just below the current vma->vm_start */
71983- addr = vma->vm_start-len;
71984- } while (len < vma->vm_start);
71985+ addr = skip_heap_stack_gap(vma, len);
71986+ } while (!IS_ERR_VALUE(addr));
71987
c6e2a6c8 71988 fail:
16454cff 71989 /*
c6e2a6c8 71990@@ -1553,13 +1763,21 @@ fail:
58c5fc13
MT
71991 * can happen with large stack limits and large mmap()
71992 * allocations.
71993 */
71994+ mm->mmap_base = TASK_UNMAPPED_BASE;
71995+
71996+#ifdef CONFIG_PAX_RANDMMAP
71997+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71998+ mm->mmap_base += mm->delta_mmap;
71999+#endif
72000+
72001+ mm->free_area_cache = mm->mmap_base;
72002 mm->cached_hole_size = ~0UL;
72003- mm->free_area_cache = TASK_UNMAPPED_BASE;
72004 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
72005 /*
72006 * Restore the topdown base:
72007 */
72008- mm->free_area_cache = mm->mmap_base;
72009+ mm->mmap_base = base;
72010+ mm->free_area_cache = base;
72011 mm->cached_hole_size = ~0UL;
72012
72013 return addr;
c6e2a6c8 72014@@ -1568,6 +1786,12 @@ fail:
58c5fc13
MT
72015
72016 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
72017 {
72018+
72019+#ifdef CONFIG_PAX_SEGMEXEC
72020+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
72021+ return;
72022+#endif
72023+
72024 /*
72025 * Is this a new hole at the highest possible address?
72026 */
c6e2a6c8 72027@@ -1575,8 +1799,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
58c5fc13
MT
72028 mm->free_area_cache = addr;
72029
72030 /* dont allow allocations above current base */
72031- if (mm->free_area_cache > mm->mmap_base)
72032+ if (mm->free_area_cache > mm->mmap_base) {
72033 mm->free_area_cache = mm->mmap_base;
72034+ mm->cached_hole_size = ~0UL;
72035+ }
72036 }
72037
72038 unsigned long
c6e2a6c8 72039@@ -1672,6 +1898,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
5e856224
MT
72040 return vma;
72041 }
4c928ab7 72042
58c5fc13
MT
72043+#ifdef CONFIG_PAX_SEGMEXEC
72044+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
72045+{
72046+ struct vm_area_struct *vma_m;
5e856224 72047+
58c5fc13
MT
72048+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
72049+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
72050+ BUG_ON(vma->vm_mirror);
72051+ return NULL;
72052+ }
72053+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
72054+ vma_m = vma->vm_mirror;
72055+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
72056+ BUG_ON(vma->vm_file != vma_m->vm_file);
72057+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
57199397 72058+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
6892158b
MT
72059+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
72060+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
58c5fc13 72061+ return vma_m;
5e856224 72062+}
58c5fc13 72063+#endif
5e856224 72064+
58c5fc13
MT
72065 /*
72066 * Verify that the stack growth is acceptable and
5e856224 72067 * update accounting. This is shared with both the
c6e2a6c8 72068@@ -1688,6 +1936,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
58c5fc13
MT
72069 return -ENOMEM;
72070
72071 /* Stack limit test */
72072+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
df50ba0c 72073 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
58c5fc13
MT
72074 return -ENOMEM;
72075
c6e2a6c8 72076@@ -1698,6 +1947,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
58c5fc13 72077 locked = mm->locked_vm + grow;
df50ba0c
MT
72078 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
72079 limit >>= PAGE_SHIFT;
58c5fc13
MT
72080+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
72081 if (locked > limit && !capable(CAP_IPC_LOCK))
72082 return -ENOMEM;
72083 }
c6e2a6c8 72084@@ -1728,37 +1978,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
6892158b
MT
72085 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
72086 * vma is the last one with address > vma->vm_end. Have to extend vma.
72087 */
72088+#ifndef CONFIG_IA64
72089+static
72090+#endif
58c5fc13
MT
72091 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
72092 {
bc901d79
MT
72093 int error;
72094+ bool locknext;
58c5fc13
MT
72095
72096 if (!(vma->vm_flags & VM_GROWSUP))
72097 return -EFAULT;
72098
72099+ /* Also guard against wrapping around to address 0. */
72100+ if (address < PAGE_ALIGN(address+1))
72101+ address = PAGE_ALIGN(address+1);
72102+ else
72103+ return -ENOMEM;
72104+
72105 /*
72106 * We must make sure the anon_vma is allocated
72107 * so that the anon_vma locking is not a noop.
72108 */
72109 if (unlikely(anon_vma_prepare(vma)))
72110 return -ENOMEM;
72111+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
57199397 72112+ if (locknext && anon_vma_prepare(vma->vm_next))
58c5fc13 72113+ return -ENOMEM;
6892158b 72114 vma_lock_anon_vma(vma);
58c5fc13 72115+ if (locknext)
6892158b 72116+ vma_lock_anon_vma(vma->vm_next);
58c5fc13
MT
72117
72118 /*
72119 * vma->vm_start/vm_end cannot change under us because the caller
72120 * is required to hold the mmap_sem in read mode. We need the
72121- * anon_vma lock to serialize against concurrent expand_stacks.
72122- * Also guard against wrapping around to address 0.
72123+ * anon_vma locks to serialize against concurrent expand_stacks
72124+ * and expand_upwards.
72125 */
72126- if (address < PAGE_ALIGN(address+4))
72127- address = PAGE_ALIGN(address+4);
72128- else {
6892158b 72129- vma_unlock_anon_vma(vma);
58c5fc13
MT
72130- return -ENOMEM;
72131- }
72132 error = 0;
72133
72134 /* Somebody else might have raced and expanded it already */
72135- if (address > vma->vm_end) {
57199397
MT
72136+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
72137+ error = -ENOMEM;
72138+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
58c5fc13
MT
72139 unsigned long size, grow;
72140
72141 size = address - vma->vm_start;
c6e2a6c8 72142@@ -1773,6 +2034,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
66a7e928 72143 }
6892158b 72144 }
58c5fc13
MT
72145 }
72146+ if (locknext)
6892158b
MT
72147+ vma_unlock_anon_vma(vma->vm_next);
72148 vma_unlock_anon_vma(vma);
16454cff 72149 khugepaged_enter_vma_merge(vma);
58c5fc13 72150 return error;
c6e2a6c8 72151@@ -1786,6 +2049,8 @@ int expand_downwards(struct vm_area_struct *vma,
58c5fc13
MT
72152 unsigned long address)
72153 {
bc901d79
MT
72154 int error;
72155+ bool lockprev = false;
57199397 72156+ struct vm_area_struct *prev;
58c5fc13
MT
72157
72158 /*
72159 * We must make sure the anon_vma is allocated
c6e2a6c8 72160@@ -1799,6 +2064,15 @@ int expand_downwards(struct vm_area_struct *vma,
58c5fc13
MT
72161 if (error)
72162 return error;
72163
57199397 72164+ prev = vma->vm_prev;
58c5fc13 72165+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
58c5fc13
MT
72166+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
72167+#endif
57199397 72168+ if (lockprev && anon_vma_prepare(prev))
58c5fc13
MT
72169+ return -ENOMEM;
72170+ if (lockprev)
6892158b 72171+ vma_lock_anon_vma(prev);
58c5fc13 72172+
6892158b 72173 vma_lock_anon_vma(vma);
58c5fc13
MT
72174
72175 /*
c6e2a6c8 72176@@ -1808,9 +2082,17 @@ int expand_downwards(struct vm_area_struct *vma,
58c5fc13
MT
72177 */
72178
72179 /* Somebody else might have raced and expanded it already */
72180- if (address < vma->vm_start) {
57199397
MT
72181+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
72182+ error = -ENOMEM;
72183+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
58c5fc13
MT
72184 unsigned long size, grow;
72185
72186+#ifdef CONFIG_PAX_SEGMEXEC
72187+ struct vm_area_struct *vma_m;
72188+
72189+ vma_m = pax_find_mirror_vma(vma);
72190+#endif
72191+
72192 size = vma->vm_end - address;
72193 grow = (vma->vm_start - address) >> PAGE_SHIFT;
72194
c6e2a6c8 72195@@ -1820,11 +2102,22 @@ int expand_downwards(struct vm_area_struct *vma,
71d190be
MT
72196 if (!error) {
72197 vma->vm_start = address;
72198 vma->vm_pgoff -= grow;
72199+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
58c5fc13
MT
72200+
72201+#ifdef CONFIG_PAX_SEGMEXEC
71d190be
MT
72202+ if (vma_m) {
72203+ vma_m->vm_start -= grow << PAGE_SHIFT;
72204+ vma_m->vm_pgoff -= grow;
72205+ }
58c5fc13
MT
72206+#endif
72207+
71d190be
MT
72208 perf_event_mmap(vma);
72209 }
58c5fc13
MT
72210 }
72211 }
6892158b 72212 vma_unlock_anon_vma(vma);
58c5fc13 72213+ if (lockprev)
6892158b 72214+ vma_unlock_anon_vma(prev);
16454cff 72215 khugepaged_enter_vma_merge(vma);
58c5fc13
MT
72216 return error;
72217 }
c6e2a6c8 72218@@ -1894,6 +2187,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
58c5fc13
MT
72219 do {
72220 long nrpages = vma_pages(vma);
72221
72222+#ifdef CONFIG_PAX_SEGMEXEC
72223+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
72224+ vma = remove_vma(vma);
72225+ continue;
72226+ }
72227+#endif
72228+
72229 mm->total_vm -= nrpages;
72230 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
72231 vma = remove_vma(vma);
c6e2a6c8 72232@@ -1939,6 +2239,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13 72233 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
57199397 72234 vma->vm_prev = NULL;
58c5fc13
MT
72235 do {
72236+
72237+#ifdef CONFIG_PAX_SEGMEXEC
72238+ if (vma->vm_mirror) {
72239+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
72240+ vma->vm_mirror->vm_mirror = NULL;
72241+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
72242+ vma->vm_mirror = NULL;
72243+ }
72244+#endif
72245+
72246 rb_erase(&vma->vm_rb, &mm->mm_rb);
72247 mm->map_count--;
72248 tail_vma = vma;
c6e2a6c8 72249@@ -1967,14 +2277,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
ae4e228f 72250 struct vm_area_struct *new;
df50ba0c 72251 int err = -ENOMEM;
ae4e228f 72252
58c5fc13 72253+#ifdef CONFIG_PAX_SEGMEXEC
ae4e228f 72254+ struct vm_area_struct *vma_m, *new_m = NULL;
58c5fc13 72255+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
ae4e228f 72256+#endif
58c5fc13 72257+
ae4e228f
MT
72258 if (is_vm_hugetlb_page(vma) && (addr &
72259 ~(huge_page_mask(hstate_vma(vma)))))
72260 return -EINVAL;
72261
72262+#ifdef CONFIG_PAX_SEGMEXEC
58c5fc13 72263+ vma_m = pax_find_mirror_vma(vma);
ae4e228f 72264+#endif
58c5fc13 72265+
ae4e228f
MT
72266 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72267 if (!new)
df50ba0c 72268 goto out_err;
ae4e228f
MT
72269
72270+#ifdef CONFIG_PAX_SEGMEXEC
58c5fc13
MT
72271+ if (vma_m) {
72272+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
72273+ if (!new_m) {
72274+ kmem_cache_free(vm_area_cachep, new);
df50ba0c 72275+ goto out_err;
58c5fc13
MT
72276+ }
72277+ }
ae4e228f 72278+#endif
58c5fc13 72279+
ae4e228f
MT
72280 /* most fields are the same, copy all, and then fixup */
72281 *new = *vma;
72282
c6e2a6c8 72283@@ -1987,6 +2316,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
ae4e228f
MT
72284 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
72285 }
72286
72287+#ifdef CONFIG_PAX_SEGMEXEC
58c5fc13
MT
72288+ if (vma_m) {
72289+ *new_m = *vma_m;
df50ba0c 72290+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
58c5fc13
MT
72291+ new_m->vm_mirror = new;
72292+ new->vm_mirror = new_m;
72293+
72294+ if (new_below)
72295+ new_m->vm_end = addr_m;
72296+ else {
72297+ new_m->vm_start = addr_m;
72298+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
72299+ }
72300+ }
ae4e228f
MT
72301+#endif
72302+
72303 pol = mpol_dup(vma_policy(vma));
72304 if (IS_ERR(pol)) {
df50ba0c 72305 err = PTR_ERR(pol);
c6e2a6c8 72306@@ -2012,6 +2357,42 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
ae4e228f 72307 else
df50ba0c 72308 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
ae4e228f
MT
72309
72310+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c
MT
72311+ if (!err && vma_m) {
72312+ if (anon_vma_clone(new_m, vma_m))
72313+ goto out_free_mpol;
72314+
58c5fc13
MT
72315+ mpol_get(pol);
72316+ vma_set_policy(new_m, pol);
72317+
72318+ if (new_m->vm_file) {
72319+ get_file(new_m->vm_file);
72320+ if (vma_m->vm_flags & VM_EXECUTABLE)
72321+ added_exe_file_vma(mm);
72322+ }
72323+
72324+ if (new_m->vm_ops && new_m->vm_ops->open)
72325+ new_m->vm_ops->open(new_m);
72326+
72327+ if (new_below)
df50ba0c 72328+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
58c5fc13
MT
72329+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
72330+ else
df50ba0c
MT
72331+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
72332+
72333+ if (err) {
72334+ if (new_m->vm_ops && new_m->vm_ops->close)
72335+ new_m->vm_ops->close(new_m);
72336+ if (new_m->vm_file) {
72337+ if (vma_m->vm_flags & VM_EXECUTABLE)
72338+ removed_exe_file_vma(mm);
72339+ fput(new_m->vm_file);
72340+ }
72341+ mpol_put(pol);
72342+ }
58c5fc13 72343+ }
ae4e228f 72344+#endif
58c5fc13 72345+
df50ba0c
MT
72346 /* Success. */
72347 if (!err)
72348 return 0;
c6e2a6c8 72349@@ -2024,10 +2405,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
6892158b
MT
72350 removed_exe_file_vma(mm);
72351 fput(new->vm_file);
72352 }
72353- unlink_anon_vmas(new);
df50ba0c
MT
72354 out_free_mpol:
72355 mpol_put(pol);
72356 out_free_vma:
72357+
72358+#ifdef CONFIG_PAX_SEGMEXEC
72359+ if (new_m) {
72360+ unlink_anon_vmas(new_m);
72361+ kmem_cache_free(vm_area_cachep, new_m);
72362+ }
72363+#endif
72364+
72365+ unlink_anon_vmas(new);
72366 kmem_cache_free(vm_area_cachep, new);
72367 out_err:
72368 return err;
c6e2a6c8 72369@@ -2040,6 +2429,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
ae4e228f
MT
72370 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
72371 unsigned long addr, int new_below)
72372 {
72373+
72374+#ifdef CONFIG_PAX_SEGMEXEC
72375+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
72376+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
72377+ if (mm->map_count >= sysctl_max_map_count-1)
72378+ return -ENOMEM;
72379+ } else
58c5fc13 72380+#endif
ae4e228f
MT
72381+
72382 if (mm->map_count >= sysctl_max_map_count)
72383 return -ENOMEM;
58c5fc13 72384
c6e2a6c8 72385@@ -2051,11 +2449,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
58c5fc13
MT
72386 * work. This now handles partial unmappings.
72387 * Jeremy Fitzhardinge <jeremy@goop.org>
72388 */
72389+#ifdef CONFIG_PAX_SEGMEXEC
15a11c5b
MT
72390 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72391 {
58c5fc13
MT
72392+ int ret = __do_munmap(mm, start, len);
72393+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
72394+ return ret;
72395+
72396+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
72397+}
72398+
72399+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
72400+#else
15a11c5b 72401+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
58c5fc13 72402+#endif
15a11c5b 72403+{
58c5fc13
MT
72404 unsigned long end;
72405 struct vm_area_struct *vma, *prev, *last;
72406
72407+ /*
72408+ * mm->mmap_sem is required to protect against another thread
72409+ * changing the mappings in case we sleep.
72410+ */
72411+ verify_mm_writelocked(mm);
72412+
72413 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
72414 return -EINVAL;
72415
c6e2a6c8 72416@@ -2130,6 +2547,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
58c5fc13
MT
72417 /* Fix up all other VM information */
72418 remove_vma_list(mm, vma);
72419
72420+ track_exec_limit(mm, start, end, 0UL);
72421+
72422 return 0;
72423 }
c6e2a6c8
MT
72424 EXPORT_SYMBOL(do_munmap);
72425@@ -2139,6 +2558,13 @@ int vm_munmap(unsigned long start, size_t len)
72426 int ret;
72427 struct mm_struct *mm = current->mm;
58c5fc13 72428
c6e2a6c8 72429+
58c5fc13
MT
72430+#ifdef CONFIG_PAX_SEGMEXEC
72431+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
c6e2a6c8 72432+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
58c5fc13
MT
72433+ return -EINVAL;
72434+#endif
72435+
72436 down_write(&mm->mmap_sem);
c6e2a6c8 72437 ret = do_munmap(mm, start, len);
58c5fc13 72438 up_write(&mm->mmap_sem);
c6e2a6c8
MT
72439@@ -2152,16 +2578,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
72440 return vm_munmap(addr, len);
58c5fc13
MT
72441 }
72442
72443-static inline void verify_mm_writelocked(struct mm_struct *mm)
72444-{
72445-#ifdef CONFIG_DEBUG_VM
72446- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
72447- WARN_ON(1);
72448- up_read(&mm->mmap_sem);
72449- }
72450-#endif
72451-}
72452-
72453 /*
72454 * this is really a simplified "do_mmap". it only handles
72455 * anonymous maps. eventually we may be able to do some
c6e2a6c8 72456@@ -2175,6 +2591,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
72457 struct rb_node ** rb_link, * rb_parent;
72458 pgoff_t pgoff = addr >> PAGE_SHIFT;
72459 int error;
72460+ unsigned long charged;
58c5fc13
MT
72461
72462 len = PAGE_ALIGN(len);
72463 if (!len)
c6e2a6c8 72464@@ -2186,16 +2603,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
72465
72466 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
72467
72468+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
72469+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
72470+ flags &= ~VM_EXEC;
72471+
72472+#ifdef CONFIG_PAX_MPROTECT
72473+ if (mm->pax_flags & MF_PAX_MPROTECT)
72474+ flags &= ~VM_MAYEXEC;
72475+#endif
72476+
72477+ }
72478+#endif
72479+
ae4e228f
MT
72480 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
72481 if (error & ~PAGE_MASK)
58c5fc13
MT
72482 return error;
72483
72484+ charged = len >> PAGE_SHIFT;
72485+
72486 /*
72487 * mlock MCL_FUTURE?
72488 */
72489 if (mm->def_flags & VM_LOCKED) {
72490 unsigned long locked, lock_limit;
72491- locked = len >> PAGE_SHIFT;
72492+ locked = charged;
72493 locked += mm->locked_vm;
df50ba0c 72494 lock_limit = rlimit(RLIMIT_MEMLOCK);
58c5fc13 72495 lock_limit >>= PAGE_SHIFT;
c6e2a6c8 72496@@ -2212,22 +2643,22 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
72497 /*
72498 * Clear old maps. this also does some error checking for us
72499 */
72500- munmap_back:
72501 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72502 if (vma && vma->vm_start < addr + len) {
72503 if (do_munmap(mm, addr, len))
72504 return -ENOMEM;
72505- goto munmap_back;
c6e2a6c8 72506- }
58c5fc13
MT
72507+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
72508+ BUG_ON(vma && vma->vm_start < addr + len);
c6e2a6c8 72509+ }
58c5fc13
MT
72510
72511 /* Check against address space limits *after* clearing old maps... */
72512- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
72513+ if (!may_expand_vm(mm, charged))
72514 return -ENOMEM;
72515
72516 if (mm->map_count > sysctl_max_map_count)
72517 return -ENOMEM;
72518
c6e2a6c8
MT
72519- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
72520+ if (security_vm_enough_memory_mm(mm, charged))
58c5fc13
MT
72521 return -ENOMEM;
72522
72523 /* Can we just expand an old private anonymous mapping? */
c6e2a6c8 72524@@ -2241,7 +2672,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
72525 */
72526 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72527 if (!vma) {
72528- vm_unacct_memory(len >> PAGE_SHIFT);
72529+ vm_unacct_memory(charged);
72530 return -ENOMEM;
72531 }
72532
c6e2a6c8 72533@@ -2255,11 +2686,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
58c5fc13
MT
72534 vma_link(mm, vma, prev, rb_link, rb_parent);
72535 out:
6892158b 72536 perf_event_mmap(vma);
58c5fc13
MT
72537- mm->total_vm += len >> PAGE_SHIFT;
72538+ mm->total_vm += charged;
72539 if (flags & VM_LOCKED) {
72540 if (!mlock_vma_pages_range(vma, addr, addr + len))
72541- mm->locked_vm += (len >> PAGE_SHIFT);
72542+ mm->locked_vm += charged;
72543 }
72544+ track_exec_limit(mm, addr, addr + len, flags);
72545 return addr;
72546 }
72547
c6e2a6c8 72548@@ -2315,8 +2747,10 @@ void exit_mmap(struct mm_struct *mm)
58c5fc13
MT
72549 * Walk the list again, actually closing and freeing it,
72550 * with preemption enabled, without holding any MM locks.
72551 */
72552- while (vma)
72553+ while (vma) {
72554+ vma->vm_mirror = NULL;
72555 vma = remove_vma(vma);
72556+ }
72557
72558 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
72559 }
c6e2a6c8 72560@@ -2330,6 +2764,13 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
58c5fc13
MT
72561 struct vm_area_struct * __vma, * prev;
72562 struct rb_node ** rb_link, * rb_parent;
72563
72564+#ifdef CONFIG_PAX_SEGMEXEC
72565+ struct vm_area_struct *vma_m = NULL;
72566+#endif
bc901d79
MT
72567+
72568+ if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
72569+ return -EPERM;
58c5fc13
MT
72570+
72571 /*
72572 * The vm_pgoff of a purely anonymous vma should be irrelevant
72573 * until its first write fault, when page's anon_vma and index
c6e2a6c8 72574@@ -2352,7 +2793,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
58c5fc13
MT
72575 if ((vma->vm_flags & VM_ACCOUNT) &&
72576 security_vm_enough_memory_mm(mm, vma_pages(vma)))
72577 return -ENOMEM;
72578+
72579+#ifdef CONFIG_PAX_SEGMEXEC
72580+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
72581+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72582+ if (!vma_m)
72583+ return -ENOMEM;
72584+ }
72585+#endif
72586+
72587 vma_link(mm, vma, prev, rb_link, rb_parent);
72588+
72589+#ifdef CONFIG_PAX_SEGMEXEC
72590+ if (vma_m)
df50ba0c 72591+ BUG_ON(pax_mirror_vma(vma_m, vma));
58c5fc13
MT
72592+#endif
72593+
72594 return 0;
72595 }
72596
c6e2a6c8 72597@@ -2371,6 +2827,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
58c5fc13 72598 struct mempolicy *pol;
5e856224 72599 bool faulted_in_anon_vma = true;
58c5fc13
MT
72600
72601+ BUG_ON(vma->vm_mirror);
72602+
72603 /*
72604 * If anonymous vma has not yet been faulted, update new pgoff
72605 * to match new location, to increase its chance of merging.
c6e2a6c8 72606@@ -2438,6 +2896,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
df50ba0c 72607 return NULL;
58c5fc13 72608 }
15a11c5b 72609
58c5fc13 72610+#ifdef CONFIG_PAX_SEGMEXEC
df50ba0c 72611+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
58c5fc13
MT
72612+{
72613+ struct vm_area_struct *prev_m;
72614+ struct rb_node **rb_link_m, *rb_parent_m;
72615+ struct mempolicy *pol_m;
72616+
72617+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
72618+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
72619+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
72620+ *vma_m = *vma;
df50ba0c
MT
72621+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
72622+ if (anon_vma_clone(vma_m, vma))
72623+ return -ENOMEM;
58c5fc13
MT
72624+ pol_m = vma_policy(vma_m);
72625+ mpol_get(pol_m);
72626+ vma_set_policy(vma_m, pol_m);
72627+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
72628+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
72629+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
72630+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
72631+ if (vma_m->vm_file)
72632+ get_file(vma_m->vm_file);
72633+ if (vma_m->vm_ops && vma_m->vm_ops->open)
72634+ vma_m->vm_ops->open(vma_m);
72635+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
72636+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
72637+ vma_m->vm_mirror = vma;
72638+ vma->vm_mirror = vma_m;
df50ba0c 72639+ return 0;
58c5fc13
MT
72640+}
72641+#endif
15a11c5b 72642+
58c5fc13
MT
72643 /*
72644 * Return true if the calling process may expand its vm space by the passed
15a11c5b 72645 * number of pages
c6e2a6c8 72646@@ -2449,6 +2940,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
58c5fc13 72647
df50ba0c 72648 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
5e856224
MT
72649
72650+#ifdef CONFIG_PAX_RANDMMAP
72651+ if (mm->pax_flags & MF_PAX_RANDMMAP)
72652+ cur -= mm->brk_gap;
72653+#endif
72654+
58c5fc13
MT
72655+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
72656 if (cur + npages > lim)
72657 return 0;
72658 return 1;
c6e2a6c8 72659@@ -2519,6 +3016,22 @@ int install_special_mapping(struct mm_struct *mm,
58c5fc13
MT
72660 vma->vm_start = addr;
72661 vma->vm_end = addr + len;
72662
72663+#ifdef CONFIG_PAX_MPROTECT
72664+ if (mm->pax_flags & MF_PAX_MPROTECT) {
c52201e0 72665+#ifndef CONFIG_PAX_MPROTECT_COMPAT
57199397
MT
72666+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
72667+ return -EPERM;
72668+ if (!(vm_flags & VM_EXEC))
72669+ vm_flags &= ~VM_MAYEXEC;
c52201e0
MT
72670+#else
72671+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
72672+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
72673+#endif
58c5fc13 72674+ else
57199397 72675+ vm_flags &= ~VM_MAYWRITE;
58c5fc13
MT
72676+ }
72677+#endif
72678+
72679 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
72680 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
72681
fe2de317 72682diff --git a/mm/mprotect.c b/mm/mprotect.c
c6e2a6c8 72683index a409926..8b32e6d 100644
fe2de317
MT
72684--- a/mm/mprotect.c
72685+++ b/mm/mprotect.c
c6e2a6c8 72686@@ -23,10 +23,17 @@
58c5fc13
MT
72687 #include <linux/mmu_notifier.h>
72688 #include <linux/migrate.h>
ae4e228f 72689 #include <linux/perf_event.h>
58c5fc13
MT
72690+
72691+#ifdef CONFIG_PAX_MPROTECT
72692+#include <linux/elf.h>
c6e2a6c8 72693+#include <linux/binfmts.h>
58c5fc13
MT
72694+#endif
72695+
72696 #include <asm/uaccess.h>
72697 #include <asm/pgtable.h>
72698 #include <asm/cacheflush.h>
72699 #include <asm/tlbflush.h>
72700+#include <asm/mmu_context.h>
72701
72702 #ifndef pgprot_modify
72703 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
c6e2a6c8 72704@@ -141,6 +148,48 @@ static void change_protection(struct vm_area_struct *vma,
58c5fc13
MT
72705 flush_tlb_range(vma, start, end);
72706 }
72707
72708+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
72709+/* called while holding the mmap semaphor for writing except stack expansion */
72710+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
72711+{
72712+ unsigned long oldlimit, newlimit = 0UL;
72713+
ae4e228f 72714+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
58c5fc13
MT
72715+ return;
72716+
72717+ spin_lock(&mm->page_table_lock);
72718+ oldlimit = mm->context.user_cs_limit;
72719+ if ((prot & VM_EXEC) && oldlimit < end)
72720+ /* USER_CS limit moved up */
72721+ newlimit = end;
72722+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
72723+ /* USER_CS limit moved down */
72724+ newlimit = start;
72725+
72726+ if (newlimit) {
72727+ mm->context.user_cs_limit = newlimit;
72728+
72729+#ifdef CONFIG_SMP
72730+ wmb();
72731+ cpus_clear(mm->context.cpu_user_cs_mask);
72732+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
72733+#endif
72734+
72735+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
72736+ }
72737+ spin_unlock(&mm->page_table_lock);
72738+ if (newlimit == end) {
72739+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
72740+
72741+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
72742+ if (is_vm_hugetlb_page(vma))
72743+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
72744+ else
72745+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
72746+ }
72747+}
72748+#endif
72749+
72750 int
72751 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
72752 unsigned long start, unsigned long end, unsigned long newflags)
c6e2a6c8 72753@@ -153,11 +202,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
58c5fc13
MT
72754 int error;
72755 int dirty_accountable = 0;
72756
72757+#ifdef CONFIG_PAX_SEGMEXEC
72758+ struct vm_area_struct *vma_m = NULL;
72759+ unsigned long start_m, end_m;
72760+
72761+ start_m = start + SEGMEXEC_TASK_SIZE;
72762+ end_m = end + SEGMEXEC_TASK_SIZE;
72763+#endif
72764+
72765 if (newflags == oldflags) {
72766 *pprev = vma;
72767 return 0;
57199397
MT
72768 }
72769
72770+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
72771+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
72772+
72773+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
72774+ return -ENOMEM;
72775+
72776+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
72777+ return -ENOMEM;
72778+ }
72779+
72780 /*
72781 * If we make a private mapping writable we increase our commit;
72782 * but (without finer accounting) cannot reduce our commit if we
c6e2a6c8 72783@@ -174,6 +241,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
58c5fc13
MT
72784 }
72785 }
72786
72787+#ifdef CONFIG_PAX_SEGMEXEC
72788+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
72789+ if (start != vma->vm_start) {
72790+ error = split_vma(mm, vma, start, 1);
72791+ if (error)
72792+ goto fail;
72793+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
72794+ *pprev = (*pprev)->vm_next;
72795+ }
72796+
72797+ if (end != vma->vm_end) {
72798+ error = split_vma(mm, vma, end, 0);
72799+ if (error)
72800+ goto fail;
72801+ }
72802+
72803+ if (pax_find_mirror_vma(vma)) {
72804+ error = __do_munmap(mm, start_m, end_m - start_m);
72805+ if (error)
72806+ goto fail;
72807+ } else {
72808+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
72809+ if (!vma_m) {
72810+ error = -ENOMEM;
72811+ goto fail;
72812+ }
72813+ vma->vm_flags = newflags;
df50ba0c
MT
72814+ error = pax_mirror_vma(vma_m, vma);
72815+ if (error) {
72816+ vma->vm_flags = oldflags;
72817+ goto fail;
72818+ }
58c5fc13
MT
72819+ }
72820+ }
72821+#endif
72822+
72823 /*
72824 * First try to merge with previous and/or next vma.
72825 */
c6e2a6c8 72826@@ -204,9 +307,21 @@ success:
df50ba0c 72827 * vm_flags and vm_page_prot are protected by the mmap_sem
58c5fc13
MT
72828 * held in write mode.
72829 */
df50ba0c
MT
72830+
72831+#ifdef CONFIG_PAX_SEGMEXEC
72832+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
72833+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
72834+#endif
72835+
58c5fc13
MT
72836 vma->vm_flags = newflags;
72837+
72838+#ifdef CONFIG_PAX_MPROTECT
ae4e228f
MT
72839+ if (mm->binfmt && mm->binfmt->handle_mprotect)
72840+ mm->binfmt->handle_mprotect(vma, newflags);
58c5fc13
MT
72841+#endif
72842+
72843 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
72844- vm_get_page_prot(newflags));
72845+ vm_get_page_prot(vma->vm_flags));
72846
72847 if (vma_wants_writenotify(vma)) {
72848 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
c6e2a6c8 72849@@ -248,6 +363,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
58c5fc13
MT
72850 end = start + len;
72851 if (end <= start)
72852 return -ENOMEM;
72853+
72854+#ifdef CONFIG_PAX_SEGMEXEC
72855+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
72856+ if (end > SEGMEXEC_TASK_SIZE)
72857+ return -EINVAL;
72858+ } else
72859+#endif
72860+
72861+ if (end > TASK_SIZE)
72862+ return -EINVAL;
72863+
72864 if (!arch_validate_prot(prot))
72865 return -EINVAL;
72866
c6e2a6c8 72867@@ -255,7 +381,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
58c5fc13
MT
72868 /*
72869 * Does the application expect PROT_READ to imply PROT_EXEC:
72870 */
72871- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
72872+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
72873 prot |= PROT_EXEC;
72874
72875 vm_flags = calc_vm_prot_bits(prot);
c6e2a6c8 72876@@ -288,6 +414,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
58c5fc13
MT
72877 if (start > vma->vm_start)
72878 prev = vma;
72879
58c5fc13 72880+#ifdef CONFIG_PAX_MPROTECT
ae4e228f
MT
72881+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
72882+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
58c5fc13
MT
72883+#endif
72884+
72885 for (nstart = start ; ; ) {
72886 unsigned long newflags;
72887
c6e2a6c8 72888@@ -297,6 +428,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
6892158b
MT
72889
72890 /* newflags >> 4 shift VM_MAY% in place of VM_% */
72891 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
72892+ if (prot & (PROT_WRITE | PROT_EXEC))
72893+ gr_log_rwxmprotect(vma->vm_file);
72894+
72895+ error = -EACCES;
72896+ goto out;
72897+ }
72898+
72899+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
72900 error = -EACCES;
72901 goto out;
72902 }
c6e2a6c8 72903@@ -311,6 +450,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
bc901d79 72904 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
58c5fc13
MT
72905 if (error)
72906 goto out;
58c5fc13
MT
72907+
72908+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
72909+
72910 nstart = tmp;
72911
72912 if (nstart < prev->vm_end)
fe2de317 72913diff --git a/mm/mremap.c b/mm/mremap.c
c6e2a6c8 72914index db8d983..76506cb 100644
fe2de317
MT
72915--- a/mm/mremap.c
72916+++ b/mm/mremap.c
4c928ab7 72917@@ -106,6 +106,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
58c5fc13 72918 continue;
4c928ab7 72919 pte = ptep_get_and_clear(mm, old_addr, old_pte);
58c5fc13
MT
72920 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
72921+
72922+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
ae4e228f 72923+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
58c5fc13
MT
72924+ pte = pte_exprotect(pte);
72925+#endif
72926+
72927 set_pte_at(mm, new_addr, new_pte, pte);
72928 }
72929
5e856224 72930@@ -299,6 +305,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
ae4e228f
MT
72931 if (is_vm_hugetlb_page(vma))
72932 goto Einval;
72933
72934+#ifdef CONFIG_PAX_SEGMEXEC
72935+ if (pax_find_mirror_vma(vma))
72936+ goto Einval;
72937+#endif
72938+
72939 /* We can't remap across vm area boundaries */
72940 if (old_len > vma->vm_end - addr)
72941 goto Efault;
5e856224 72942@@ -355,20 +366,25 @@ static unsigned long mremap_to(unsigned long addr,
ae4e228f
MT
72943 unsigned long ret = -EINVAL;
72944 unsigned long charged = 0;
72945 unsigned long map_flags;
72946+ unsigned long pax_task_size = TASK_SIZE;
72947
72948 if (new_addr & ~PAGE_MASK)
72949 goto out;
72950
72951- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
72952+#ifdef CONFIG_PAX_SEGMEXEC
72953+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
72954+ pax_task_size = SEGMEXEC_TASK_SIZE;
72955+#endif
72956+
6892158b
MT
72957+ pax_task_size -= PAGE_SIZE;
72958+
ae4e228f
MT
72959+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
72960 goto out;
72961
72962 /* Check if the location we're moving into overlaps the
72963 * old location at all, and fail if it does.
72964 */
72965- if ((new_addr <= addr) && (new_addr+new_len) > addr)
72966- goto out;
72967-
72968- if ((addr <= new_addr) && (addr+old_len) > new_addr)
72969+ if (addr + old_len > new_addr && new_addr + new_len > addr)
72970 goto out;
72971
72972 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
5e856224 72973@@ -440,6 +456,7 @@ unsigned long do_mremap(unsigned long addr,
58c5fc13
MT
72974 struct vm_area_struct *vma;
72975 unsigned long ret = -EINVAL;
72976 unsigned long charged = 0;
72977+ unsigned long pax_task_size = TASK_SIZE;
72978
72979 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
72980 goto out;
5e856224 72981@@ -458,6 +475,17 @@ unsigned long do_mremap(unsigned long addr,
58c5fc13
MT
72982 if (!new_len)
72983 goto out;
72984
72985+#ifdef CONFIG_PAX_SEGMEXEC
ae4e228f 72986+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
58c5fc13
MT
72987+ pax_task_size = SEGMEXEC_TASK_SIZE;
72988+#endif
72989+
6892158b
MT
72990+ pax_task_size -= PAGE_SIZE;
72991+
58c5fc13
MT
72992+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
72993+ old_len > pax_task_size || addr > pax_task_size-old_len)
72994+ goto out;
72995+
58c5fc13 72996 if (flags & MREMAP_FIXED) {
ae4e228f
MT
72997 if (flags & MREMAP_MAYMOVE)
72998 ret = mremap_to(addr, old_len, new_addr, new_len);
5e856224 72999@@ -507,6 +535,7 @@ unsigned long do_mremap(unsigned long addr,
58c5fc13
MT
73000 addr + new_len);
73001 }
73002 ret = addr;
73003+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
73004 goto out;
73005 }
73006 }
5e856224 73007@@ -533,7 +562,13 @@ unsigned long do_mremap(unsigned long addr,
ae4e228f
MT
73008 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
73009 if (ret)
73010 goto out;
73011+
58c5fc13
MT
73012+ map_flags = vma->vm_flags;
73013 ret = move_vma(vma, addr, old_len, new_len, new_addr);
73014+ if (!(ret & ~PAGE_MASK)) {
73015+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
73016+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
73017+ }
73018 }
73019 out:
73020 if (ret & ~PAGE_MASK)
fe2de317 73021diff --git a/mm/nommu.c b/mm/nommu.c
c6e2a6c8 73022index bb8f4f0..40d3e02 100644
fe2de317
MT
73023--- a/mm/nommu.c
73024+++ b/mm/nommu.c
73025@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
57199397
MT
73026 int sysctl_overcommit_ratio = 50; /* default is 50% */
73027 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
73028 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
73029-int heap_stack_gap = 0;
73030
73031 atomic_long_t mmap_pages_allocated;
73032
4c928ab7 73033@@ -827,15 +826,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
58c5fc13
MT
73034 EXPORT_SYMBOL(find_vma);
73035
73036 /*
73037- * find a VMA
73038- * - we don't extend stack VMAs under NOMMU conditions
73039- */
73040-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
73041-{
73042- return find_vma(mm, addr);
73043-}
73044-
73045-/*
73046 * expand a stack to a given address
73047 * - not supported under NOMMU conditions
73048 */
c6e2a6c8 73049@@ -1580,6 +1570,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
df50ba0c
MT
73050
73051 /* most fields are the same, copy all, and then fixup */
73052 *new = *vma;
73053+ INIT_LIST_HEAD(&new->anon_vma_chain);
73054 *region = *vma->vm_region;
73055 new->vm_region = region;
73056
fe2de317 73057diff --git a/mm/page_alloc.c b/mm/page_alloc.c
c6e2a6c8 73058index 918330f..ae99ae1 100644
fe2de317
MT
73059--- a/mm/page_alloc.c
73060+++ b/mm/page_alloc.c
5e856224 73061@@ -335,7 +335,7 @@ out:
15a11c5b
MT
73062 * This usage means that zero-order pages may not be compound.
73063 */
73064
73065-static void free_compound_page(struct page *page)
73066+void free_compound_page(struct page *page)
73067 {
73068 __free_pages_ok(page, compound_order(page));
73069 }
5e856224 73070@@ -692,6 +692,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
57199397 73071 int i;
58c5fc13 73072 int bad = 0;
58c5fc13
MT
73073
73074+#ifdef CONFIG_PAX_MEMORY_SANITIZE
73075+ unsigned long index = 1UL << order;
73076+#endif
73077+
5e856224 73078 trace_mm_page_free(page, order);
58c5fc13
MT
73079 kmemcheck_free_shadow(page, order);
73080
5e856224 73081@@ -707,6 +711,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
58c5fc13
MT
73082 debug_check_no_obj_freed(page_address(page),
73083 PAGE_SIZE << order);
73084 }
73085+
73086+#ifdef CONFIG_PAX_MEMORY_SANITIZE
73087+ for (; index; --index)
73088+ sanitize_highpage(page + index - 1);
73089+#endif
73090+
73091 arch_free_page(page, order);
73092 kernel_map_pages(page, 1 << order, 0);
73093
5e856224 73094@@ -830,8 +840,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
58c5fc13
MT
73095 arch_alloc_page(page, order);
73096 kernel_map_pages(page, 1 << order, 1);
73097
73098+#ifndef CONFIG_PAX_MEMORY_SANITIZE
73099 if (gfp_flags & __GFP_ZERO)
73100 prep_zero_page(page, order, gfp_flags);
73101+#endif
73102
73103 if (order && (gfp_flags & __GFP_COMP))
73104 prep_compound_page(page, order);
c6e2a6c8 73105@@ -3523,7 +3535,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
6e9df6a3
MT
73106 unsigned long pfn;
73107
73108 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
73109+#ifdef CONFIG_X86_32
73110+ /* boot failures in VMware 8 on 32bit vanilla since
73111+ this change */
73112+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
73113+#else
73114 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
73115+#endif
73116 return 1;
73117 }
73118 return 0;
fe2de317 73119diff --git a/mm/percpu.c b/mm/percpu.c
c6e2a6c8 73120index bb4be74..a43ea85 100644
fe2de317
MT
73121--- a/mm/percpu.c
73122+++ b/mm/percpu.c
5e856224 73123@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
4c928ab7 73124 static unsigned int pcpu_high_unit_cpu __read_mostly;
58c5fc13
MT
73125
73126 /* the address of the first chunk which starts with the kernel static area */
73127-void *pcpu_base_addr __read_mostly;
73128+void *pcpu_base_addr __read_only;
73129 EXPORT_SYMBOL_GPL(pcpu_base_addr);
73130
ae4e228f 73131 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
4c928ab7 73132diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
5e856224 73133index c20ff48..137702a 100644
4c928ab7
MT
73134--- a/mm/process_vm_access.c
73135+++ b/mm/process_vm_access.c
73136@@ -13,6 +13,7 @@
73137 #include <linux/uio.h>
73138 #include <linux/sched.h>
73139 #include <linux/highmem.h>
73140+#include <linux/security.h>
73141 #include <linux/ptrace.h>
73142 #include <linux/slab.h>
73143 #include <linux/syscalls.h>
73144@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
73145 size_t iov_l_curr_offset = 0;
73146 ssize_t iov_len;
73147
73148+ return -ENOSYS; // PaX: until properly audited
73149+
73150 /*
73151 * Work out how many pages of struct pages we're going to need
73152 * when eventually calling get_user_pages
73153 */
73154 for (i = 0; i < riovcnt; i++) {
73155 iov_len = rvec[i].iov_len;
73156- if (iov_len > 0) {
73157- nr_pages_iov = ((unsigned long)rvec[i].iov_base
73158- + iov_len)
73159- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
73160- / PAGE_SIZE + 1;
73161- nr_pages = max(nr_pages, nr_pages_iov);
73162- }
73163+ if (iov_len <= 0)
73164+ continue;
73165+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
73166+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
73167+ nr_pages = max(nr_pages, nr_pages_iov);
73168 }
73169
73170 if (nr_pages == 0)
5e856224 73171@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
4c928ab7
MT
73172 goto free_proc_pages;
73173 }
73174
4c928ab7 73175+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
5e856224
MT
73176+ rc = -EPERM;
73177+ goto put_task_struct;
73178+ }
73179+
73180 mm = mm_access(task, PTRACE_MODE_ATTACH);
73181 if (!mm || IS_ERR(mm)) {
73182 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
fe2de317 73183diff --git a/mm/rmap.c b/mm/rmap.c
c6e2a6c8 73184index 5b5ad58..0f77903 100644
fe2de317
MT
73185--- a/mm/rmap.c
73186+++ b/mm/rmap.c
c6e2a6c8 73187@@ -167,6 +167,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
df50ba0c
MT
73188 struct anon_vma *anon_vma = vma->anon_vma;
73189 struct anon_vma_chain *avc;
73190
73191+#ifdef CONFIG_PAX_SEGMEXEC
73192+ struct anon_vma_chain *avc_m = NULL;
73193+#endif
73194+
73195 might_sleep();
73196 if (unlikely(!anon_vma)) {
58c5fc13 73197 struct mm_struct *mm = vma->vm_mm;
c6e2a6c8 73198@@ -176,6 +180,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
df50ba0c
MT
73199 if (!avc)
73200 goto out_enomem;
73201
73202+#ifdef CONFIG_PAX_SEGMEXEC
15a11c5b 73203+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
df50ba0c
MT
73204+ if (!avc_m)
73205+ goto out_enomem_free_avc;
73206+#endif
58c5fc13
MT
73207+
73208 anon_vma = find_mergeable_anon_vma(vma);
73209 allocated = NULL;
73210 if (!anon_vma) {
c6e2a6c8 73211@@ -189,6 +199,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
57199397
MT
73212 /* page_table_lock to protect against threads */
73213 spin_lock(&mm->page_table_lock);
73214 if (likely(!vma->anon_vma)) {
58c5fc13
MT
73215+
73216+#ifdef CONFIG_PAX_SEGMEXEC
57199397
MT
73217+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
73218+
58c5fc13 73219+ if (vma_m) {
df50ba0c 73220+ BUG_ON(vma_m->anon_vma);
58c5fc13 73221+ vma_m->anon_vma = anon_vma;
c6e2a6c8 73222+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
df50ba0c 73223+ avc_m = NULL;
58c5fc13
MT
73224+ }
73225+#endif
73226+
57199397 73227 vma->anon_vma = anon_vma;
c6e2a6c8
MT
73228 anon_vma_chain_link(vma, avc, anon_vma);
73229 allocated = NULL;
73230@@ -199,12 +221,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
df50ba0c
MT
73231
73232 if (unlikely(allocated))
66a7e928 73233 put_anon_vma(allocated);
df50ba0c
MT
73234+
73235+#ifdef CONFIG_PAX_SEGMEXEC
73236+ if (unlikely(avc_m))
73237+ anon_vma_chain_free(avc_m);
73238+#endif
73239+
73240 if (unlikely(avc))
73241 anon_vma_chain_free(avc);
73242 }
73243 return 0;
73244
73245 out_enomem_free_avc:
73246+
73247+#ifdef CONFIG_PAX_SEGMEXEC
73248+ if (avc_m)
73249+ anon_vma_chain_free(avc_m);
73250+#endif
73251+
73252 anon_vma_chain_free(avc);
73253 out_enomem:
73254 return -ENOMEM;
c6e2a6c8 73255@@ -240,7 +274,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
57199397
MT
73256 * Attach the anon_vmas from src to dst.
73257 * Returns 0 on success, -ENOMEM on failure.
73258 */
73259-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
73260+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
73261 {
73262 struct anon_vma_chain *avc, *pavc;
15a11c5b 73263 struct anon_vma *root = NULL;
c6e2a6c8 73264@@ -318,7 +352,7 @@ void anon_vma_moveto_tail(struct vm_area_struct *dst)
57199397
MT
73265 * the corresponding VMA in the parent process is attached to.
73266 * Returns 0 on success, non-zero on failure.
73267 */
73268-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
73269+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
73270 {
73271 struct anon_vma_chain *avc;
73272 struct anon_vma *anon_vma;
fe2de317 73273diff --git a/mm/shmem.c b/mm/shmem.c
572b4308 73274index 9d65a02..7c877e7 100644
fe2de317
MT
73275--- a/mm/shmem.c
73276+++ b/mm/shmem.c
6892158b 73277@@ -31,7 +31,7 @@
4c928ab7 73278 #include <linux/export.h>
58c5fc13 73279 #include <linux/swap.h>
58c5fc13
MT
73280
73281-static struct vfsmount *shm_mnt;
73282+struct vfsmount *shm_mnt;
73283
73284 #ifdef CONFIG_SHMEM
73285 /*
6e9df6a3
MT
73286@@ -74,7 +74,7 @@ static struct vfsmount *shm_mnt;
73287 #define BOGO_DIRENT_SIZE 20
73288
73289 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
73290-#define SHORT_SYMLINK_LEN 128
73291+#define SHORT_SYMLINK_LEN 64
73292
73293 struct shmem_xattr {
73294 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
572b4308 73295@@ -2236,8 +2236,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
66a7e928
MT
73296 int err = -ENOMEM;
73297
73298 /* Round up to L1_CACHE_BYTES to resist false sharing */
73299- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
73300- L1_CACHE_BYTES), GFP_KERNEL);
73301+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
73302 if (!sbinfo)
73303 return -ENOMEM;
73304
fe2de317 73305diff --git a/mm/slab.c b/mm/slab.c
572b4308 73306index e901a36..ca479fc 100644
fe2de317
MT
73307--- a/mm/slab.c
73308+++ b/mm/slab.c
5e856224 73309@@ -153,7 +153,7 @@
71d190be
MT
73310
73311 /* Legal flag mask for kmem_cache_create(). */
73312 #if DEBUG
73313-# define CREATE_MASK (SLAB_RED_ZONE | \
73314+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
73315 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
73316 SLAB_CACHE_DMA | \
73317 SLAB_STORE_USER | \
5e856224 73318@@ -161,7 +161,7 @@
71d190be
MT
73319 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
73320 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
73321 #else
73322-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
73323+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
73324 SLAB_CACHE_DMA | \
73325 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
73326 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
5e856224 73327@@ -290,7 +290,7 @@ struct kmem_list3 {
58c5fc13
MT
73328 * Need this for bootstrapping a per node allocator.
73329 */
73330 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
16454cff
MT
73331-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
73332+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
58c5fc13
MT
73333 #define CACHE_CACHE 0
73334 #define SIZE_AC MAX_NUMNODES
73335 #define SIZE_L3 (2 * MAX_NUMNODES)
5e856224 73336@@ -391,10 +391,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
8308f9c9
MT
73337 if ((x)->max_freeable < i) \
73338 (x)->max_freeable = i; \
73339 } while (0)
73340-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
73341-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
73342-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
73343-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
73344+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
73345+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
73346+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
73347+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
73348 #else
73349 #define STATS_INC_ACTIVE(x) do { } while (0)
73350 #define STATS_DEC_ACTIVE(x) do { } while (0)
5e856224 73351@@ -542,7 +542,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
58c5fc13
MT
73352 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
73353 */
73354 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
73355- const struct slab *slab, void *obj)
73356+ const struct slab *slab, const void *obj)
73357 {
73358 u32 offset = (obj - slab->s_mem);
73359 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
572b4308
MT
73360@@ -563,12 +563,13 @@ EXPORT_SYMBOL(malloc_sizes);
73361 struct cache_names {
73362 char *name;
73363 char *name_dma;
73364+ char *name_usercopy;
73365 };
73366
58c5fc13 73367 static struct cache_names __initdata cache_names[] = {
572b4308
MT
73368-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
73369+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
58c5fc13
MT
73370 #include <linux/kmalloc_sizes.h>
73371- {NULL,}
71d190be 73372+ {NULL}
58c5fc13
MT
73373 #undef CACHE
73374 };
73375
572b4308
MT
73376@@ -756,6 +757,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
73377 if (unlikely(gfpflags & GFP_DMA))
73378 return csizep->cs_dmacachep;
73379 #endif
73380+
73381+#ifdef CONFIG_PAX_USERCOPY_SLABS
73382+ if (unlikely(gfpflags & GFP_USERCOPY))
73383+ return csizep->cs_usercopycachep;
73384+#endif
73385+
73386 return csizep->cs_cachep;
73387 }
73388
73389@@ -1588,7 +1595,7 @@ void __init kmem_cache_init(void)
71d190be
MT
73390 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
73391 sizes[INDEX_AC].cs_size,
73392 ARCH_KMALLOC_MINALIGN,
73393- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73394+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73395 NULL);
73396
73397 if (INDEX_AC != INDEX_L3) {
572b4308 73398@@ -1596,7 +1603,7 @@ void __init kmem_cache_init(void)
71d190be
MT
73399 kmem_cache_create(names[INDEX_L3].name,
73400 sizes[INDEX_L3].cs_size,
73401 ARCH_KMALLOC_MINALIGN,
73402- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73403+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73404 NULL);
73405 }
73406
572b4308 73407@@ -1614,7 +1621,7 @@ void __init kmem_cache_init(void)
71d190be
MT
73408 sizes->cs_cachep = kmem_cache_create(names->name,
73409 sizes->cs_size,
73410 ARCH_KMALLOC_MINALIGN,
73411- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
73412+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73413 NULL);
73414 }
73415 #ifdef CONFIG_ZONE_DMA
572b4308
MT
73416@@ -1626,6 +1633,16 @@ void __init kmem_cache_init(void)
73417 SLAB_PANIC,
73418 NULL);
73419 #endif
73420+
73421+#ifdef CONFIG_PAX_USERCOPY_SLABS
73422+ sizes->cs_usercopycachep = kmem_cache_create(
73423+ names->name_usercopy,
73424+ sizes->cs_size,
73425+ ARCH_KMALLOC_MINALIGN,
73426+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
73427+ NULL);
73428+#endif
73429+
73430 sizes++;
73431 names++;
73432 }
73433@@ -4390,10 +4407,10 @@ static int s_show(struct seq_file *m, void *p)
8308f9c9
MT
73434 }
73435 /* cpu stats */
73436 {
73437- unsigned long allochit = atomic_read(&cachep->allochit);
73438- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
73439- unsigned long freehit = atomic_read(&cachep->freehit);
73440- unsigned long freemiss = atomic_read(&cachep->freemiss);
73441+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
73442+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
73443+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
73444+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
73445
73446 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
73447 allochit, allocmiss, freehit, freemiss);
572b4308 73448@@ -4652,13 +4669,68 @@ static int __init slab_proc_init(void)
58c5fc13 73449 {
4c928ab7 73450 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
df50ba0c
MT
73451 #ifdef CONFIG_DEBUG_SLAB_LEAK
73452- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4c928ab7 73453+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
df50ba0c
MT
73454 #endif
73455 return 0;
73456 }
58c5fc13
MT
73457 module_init(slab_proc_init);
73458 #endif
73459
572b4308 73460+bool is_usercopy_object(const void *ptr)
58c5fc13 73461+{
572b4308
MT
73462+ struct page *page;
73463+ struct kmem_cache *cachep;
73464+
73465+ if (ZERO_OR_NULL_PTR(ptr))
73466+ return false;
73467+
73468+ if (!virt_addr_valid(ptr))
73469+ return false;
73470+
73471+ page = virt_to_head_page(ptr);
73472+
73473+ if (!PageSlab(page))
73474+ return false;
73475+
73476+ cachep = page_get_cache(page);
73477+ return cachep->flags & SLAB_USERCOPY;
73478+}
58c5fc13
MT
73479+
73480+#ifdef CONFIG_PAX_USERCOPY
572b4308
MT
73481+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
73482+{
58c5fc13 73483+ struct page *page;
572b4308 73484+ struct kmem_cache *cachep;
71d190be 73485+ struct slab *slabp;
58c5fc13
MT
73486+ unsigned int objnr;
73487+ unsigned long offset;
58c5fc13
MT
73488+
73489+ if (ZERO_OR_NULL_PTR(ptr))
572b4308 73490+ return "<null>";
58c5fc13
MT
73491+
73492+ if (!virt_addr_valid(ptr))
572b4308 73493+ return NULL;
58c5fc13
MT
73494+
73495+ page = virt_to_head_page(ptr);
73496+
572b4308
MT
73497+ if (!PageSlab(page))
73498+ return NULL;
58c5fc13
MT
73499+
73500+ cachep = page_get_cache(page);
71d190be 73501+ if (!(cachep->flags & SLAB_USERCOPY))
572b4308 73502+ return cachep->name;
71d190be 73503+
58c5fc13
MT
73504+ slabp = page_get_slab(page);
73505+ objnr = obj_to_index(cachep, slabp, ptr);
73506+ BUG_ON(objnr >= cachep->num);
73507+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
73508+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
572b4308 73509+ return NULL;
58c5fc13 73510+
572b4308 73511+ return cachep->name;
58c5fc13 73512+}
572b4308 73513+#endif
58c5fc13
MT
73514+
73515 /**
73516 * ksize - get the actual amount of memory allocated for a given object
73517 * @objp: Pointer to the object
fe2de317 73518diff --git a/mm/slob.c b/mm/slob.c
572b4308 73519index 8105be4..3c15e57 100644
fe2de317
MT
73520--- a/mm/slob.c
73521+++ b/mm/slob.c
58c5fc13
MT
73522@@ -29,7 +29,7 @@
73523 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
73524 * alloc_pages() directly, allocating compound pages so the page order
73525 * does not have to be separately tracked, and also stores the exact
73526- * allocation size in page->private so that it can be used to accurately
73527+ * allocation size in slob_page->size so that it can be used to accurately
73528 * provide ksize(). These objects are detected in kfree() because slob_page()
73529 * is false for them.
73530 *
73531@@ -58,6 +58,7 @@
73532 */
73533
73534 #include <linux/kernel.h>
73535+#include <linux/sched.h>
73536 #include <linux/slab.h>
73537 #include <linux/mm.h>
73538 #include <linux/swap.h> /* struct reclaim_state */
6892158b 73539@@ -102,7 +103,8 @@ struct slob_page {
58c5fc13
MT
73540 unsigned long flags; /* mandatory */
73541 atomic_t _count; /* mandatory */
73542 slobidx_t units; /* free units left in page */
73543- unsigned long pad[2];
73544+ unsigned long pad[1];
73545+ unsigned long size; /* size when >=PAGE_SIZE */
73546 slob_t *free; /* first free slob_t in page */
73547 struct list_head list; /* linked list of free pages */
73548 };
6892158b 73549@@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
58c5fc13
MT
73550 */
73551 static inline int is_slob_page(struct slob_page *sp)
73552 {
73553- return PageSlab((struct page *)sp);
73554+ return PageSlab((struct page *)sp) && !sp->size;
73555 }
73556
73557 static inline void set_slob_page(struct slob_page *sp)
fe2de317 73558@@ -150,7 +152,7 @@ static inline void clear_slob_page(struct slob_page *sp)
58c5fc13
MT
73559
73560 static inline struct slob_page *slob_page(const void *addr)
73561 {
73562- return (struct slob_page *)virt_to_page(addr);
73563+ return (struct slob_page *)virt_to_head_page(addr);
73564 }
73565
73566 /*
fe2de317 73567@@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
58c5fc13
MT
73568 /*
73569 * Return the size of a slob block.
73570 */
73571-static slobidx_t slob_units(slob_t *s)
73572+static slobidx_t slob_units(const slob_t *s)
73573 {
73574 if (s->units > 0)
73575 return s->units;
6892158b 73576@@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
58c5fc13
MT
73577 /*
73578 * Return the next free slob block pointer after this one.
73579 */
73580-static slob_t *slob_next(slob_t *s)
73581+static slob_t *slob_next(const slob_t *s)
73582 {
73583 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
73584 slobidx_t next;
6892158b 73585@@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
58c5fc13
MT
73586 /*
73587 * Returns true if s is the last free block in its page.
73588 */
73589-static int slob_last(slob_t *s)
73590+static int slob_last(const slob_t *s)
73591 {
73592 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
73593 }
fe2de317 73594@@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
58c5fc13
MT
73595 if (!page)
73596 return NULL;
73597
73598+ set_slob_page(page);
73599 return page_address(page);
73600 }
73601
fe2de317 73602@@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
58c5fc13
MT
73603 if (!b)
73604 return NULL;
73605 sp = slob_page(b);
73606- set_slob_page(sp);
73607
73608 spin_lock_irqsave(&slob_lock, flags);
73609 sp->units = SLOB_UNITS(PAGE_SIZE);
73610 sp->free = b;
73611+ sp->size = 0;
73612 INIT_LIST_HEAD(&sp->list);
73613 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
73614 set_slob_page_free(sp, slob_list);
6892158b 73615@@ -476,10 +479,9 @@ out:
57199397
MT
73616 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
73617 */
58c5fc13
MT
73618
73619-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73620+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
73621 {
73622- unsigned int *m;
73623- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73624+ slob_t *m;
73625 void *ret;
73626
6e9df6a3 73627 gfp &= gfp_allowed_mask;
fe2de317 73628@@ -494,7 +496,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
58c5fc13
MT
73629
73630 if (!m)
73631 return NULL;
73632- *m = size;
73633+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
73634+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
73635+ m[0].units = size;
73636+ m[1].units = align;
73637 ret = (void *)m + align;
73638
73639 trace_kmalloc_node(_RET_IP_, ret,
fe2de317 73640@@ -506,16 +511,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
bc901d79
MT
73641 gfp |= __GFP_COMP;
73642 ret = slob_new_pages(gfp, order, node);
58c5fc13
MT
73643 if (ret) {
73644- struct page *page;
73645- page = virt_to_page(ret);
73646- page->private = size;
73647+ struct slob_page *sp;
73648+ sp = slob_page(ret);
73649+ sp->size = size;
73650 }
73651
73652 trace_kmalloc_node(_RET_IP_, ret,
15a11c5b
MT
73653 size, PAGE_SIZE << order, gfp, node);
73654 }
73655
73656- kmemleak_alloc(ret, size, 1, gfp);
73657+ return ret;
73658+}
58c5fc13
MT
73659+
73660+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
73661+{
73662+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
15a11c5b 73663+ void *ret = __kmalloc_node_align(size, gfp, node, align);
58c5fc13 73664+
15a11c5b
MT
73665+ if (!ZERO_OR_NULL_PTR(ret))
73666+ kmemleak_alloc(ret, size, 1, gfp);
73667 return ret;
73668 }
58c5fc13 73669 EXPORT_SYMBOL(__kmalloc_node);
572b4308 73670@@ -533,13 +547,83 @@ void kfree(const void *block)
58c5fc13
MT
73671 sp = slob_page(block);
73672 if (is_slob_page(sp)) {
73673 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73674- unsigned int *m = (unsigned int *)(block - align);
73675- slob_free(m, *m + align);
73676- } else
73677+ slob_t *m = (slob_t *)(block - align);
73678+ slob_free(m, m[0].units + align);
73679+ } else {
73680+ clear_slob_page(sp);
73681+ free_slob_page(sp);
73682+ sp->size = 0;
73683 put_page(&sp->page);
73684+ }
73685 }
73686 EXPORT_SYMBOL(kfree);
73687
572b4308 73688+bool is_usercopy_object(const void *ptr)
58c5fc13 73689+{
572b4308
MT
73690+ return false;
73691+}
58c5fc13
MT
73692+
73693+#ifdef CONFIG_PAX_USERCOPY
572b4308
MT
73694+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
73695+{
58c5fc13
MT
73696+ struct slob_page *sp;
73697+ const slob_t *free;
73698+ const void *base;
15a11c5b 73699+ unsigned long flags;
58c5fc13
MT
73700+
73701+ if (ZERO_OR_NULL_PTR(ptr))
572b4308 73702+ return "<null>";
58c5fc13
MT
73703+
73704+ if (!virt_addr_valid(ptr))
572b4308 73705+ return NULL;
58c5fc13
MT
73706+
73707+ sp = slob_page(ptr);
572b4308
MT
73708+ if (!PageSlab((struct page *)sp))
73709+ return NULL;
58c5fc13
MT
73710+
73711+ if (sp->size) {
73712+ base = page_address(&sp->page);
73713+ if (base <= ptr && n <= sp->size - (ptr - base))
572b4308
MT
73714+ return NULL;
73715+ return "<slob>";
58c5fc13
MT
73716+ }
73717+
73718+ /* some tricky double walking to find the chunk */
15a11c5b 73719+ spin_lock_irqsave(&slob_lock, flags);
58c5fc13
MT
73720+ base = (void *)((unsigned long)ptr & PAGE_MASK);
73721+ free = sp->free;
73722+
73723+ while (!slob_last(free) && (void *)free <= ptr) {
73724+ base = free + slob_units(free);
73725+ free = slob_next(free);
73726+ }
73727+
73728+ while (base < (void *)free) {
73729+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
73730+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
73731+ int offset;
73732+
73733+ if (ptr < base + align)
15a11c5b 73734+ break;
58c5fc13
MT
73735+
73736+ offset = ptr - base - align;
15a11c5b
MT
73737+ if (offset >= m) {
73738+ base += size;
73739+ continue;
58c5fc13 73740+ }
15a11c5b
MT
73741+
73742+ if (n > m - offset)
73743+ break;
73744+
73745+ spin_unlock_irqrestore(&slob_lock, flags);
572b4308 73746+ return NULL;
58c5fc13
MT
73747+ }
73748+
15a11c5b 73749+ spin_unlock_irqrestore(&slob_lock, flags);
572b4308 73750+ return "<slob>";
58c5fc13 73751+}
572b4308 73752+#endif
58c5fc13
MT
73753+
73754 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
73755 size_t ksize(const void *block)
73756 {
572b4308 73757@@ -552,10 +636,10 @@ size_t ksize(const void *block)
58c5fc13
MT
73758 sp = slob_page(block);
73759 if (is_slob_page(sp)) {
73760 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
73761- unsigned int *m = (unsigned int *)(block - align);
73762- return SLOB_UNITS(*m) * SLOB_UNIT;
73763+ slob_t *m = (slob_t *)(block - align);
73764+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
73765 } else
73766- return sp->page.private;
73767+ return sp->size;
73768 }
73769 EXPORT_SYMBOL(ksize);
73770
572b4308 73771@@ -571,8 +655,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
15a11c5b
MT
73772 {
73773 struct kmem_cache *c;
73774
572b4308 73775+#ifdef CONFIG_PAX_USERCOPY_SLABS
15a11c5b
MT
73776+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
73777+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
73778+#else
73779 c = slob_alloc(sizeof(struct kmem_cache),
73780 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
73781+#endif
73782
73783 if (c) {
73784 c->name = name;
572b4308 73785@@ -614,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
6e9df6a3
MT
73786
73787 lockdep_trace_alloc(flags);
58c5fc13 73788
572b4308 73789+#ifdef CONFIG_PAX_USERCOPY_SLABS
58c5fc13
MT
73790+ b = __kmalloc_node_align(c->size, flags, node, c->align);
73791+#else
73792 if (c->size < PAGE_SIZE) {
73793 b = slob_alloc(c->size, flags, c->align, node);
73794 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73795 SLOB_UNITS(c->size) * SLOB_UNIT,
73796 flags, node);
73797 } else {
73798+ struct slob_page *sp;
73799+
73800 b = slob_new_pages(flags, get_order(c->size), node);
73801+ sp = slob_page(b);
73802+ sp->size = c->size;
73803 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
73804 PAGE_SIZE << get_order(c->size),
73805 flags, node);
73806 }
73807+#endif
73808
73809 if (c->ctor)
73810 c->ctor(b);
572b4308 73811@@ -636,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
58c5fc13
MT
73812
73813 static void __kmem_cache_free(void *b, int size)
73814 {
73815- if (size < PAGE_SIZE)
73816+ struct slob_page *sp = slob_page(b);
73817+
73818+ if (is_slob_page(sp))
73819 slob_free(b, size);
73820- else
73821+ else {
73822+ clear_slob_page(sp);
73823+ free_slob_page(sp);
73824+ sp->size = 0;
73825 slob_free_pages(b, get_order(size));
73826+ }
73827 }
73828
73829 static void kmem_rcu_free(struct rcu_head *head)
572b4308 73830@@ -652,17 +755,31 @@ static void kmem_rcu_free(struct rcu_head *head)
58c5fc13
MT
73831
73832 void kmem_cache_free(struct kmem_cache *c, void *b)
73833 {
73834+ int size = c->size;
73835+
572b4308 73836+#ifdef CONFIG_PAX_USERCOPY_SLABS
58c5fc13
MT
73837+ if (size + c->align < PAGE_SIZE) {
73838+ size += c->align;
73839+ b -= c->align;
73840+ }
73841+#endif
73842+
73843 kmemleak_free_recursive(b, c->flags);
73844 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
73845 struct slob_rcu *slob_rcu;
73846- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
58c5fc13 73847- slob_rcu->size = c->size;
6892158b 73848+ slob_rcu = b + (size - sizeof(struct slob_rcu));
58c5fc13
MT
73849+ slob_rcu->size = size;
73850 call_rcu(&slob_rcu->head, kmem_rcu_free);
73851 } else {
73852- __kmem_cache_free(b, c->size);
73853+ __kmem_cache_free(b, size);
73854 }
73855
572b4308 73856+#ifdef CONFIG_PAX_USERCOPY_SLABS
15a11c5b
MT
73857+ trace_kfree(_RET_IP_, b);
73858+#else
58c5fc13 73859 trace_kmem_cache_free(_RET_IP_, b);
15a11c5b
MT
73860+#endif
73861+
73862 }
73863 EXPORT_SYMBOL(kmem_cache_free);
73864
fe2de317 73865diff --git a/mm/slub.c b/mm/slub.c
572b4308 73866index 71de9b5..a93d4a4 100644
fe2de317
MT
73867--- a/mm/slub.c
73868+++ b/mm/slub.c
c6e2a6c8 73869@@ -209,7 +209,7 @@ struct track {
15a11c5b
MT
73870
73871 enum track_item { TRACK_ALLOC, TRACK_FREE };
73872
73873-#ifdef CONFIG_SYSFS
73874+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
73875 static int sysfs_slab_add(struct kmem_cache *);
73876 static int sysfs_slab_alias(struct kmem_cache *, const char *);
73877 static void sysfs_slab_remove(struct kmem_cache *);
c6e2a6c8 73878@@ -538,7 +538,7 @@ static void print_track(const char *s, struct track *t)
bc901d79
MT
73879 if (!t->addr)
73880 return;
73881
73882- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
73883+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
73884 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
6e9df6a3
MT
73885 #ifdef CONFIG_STACKTRACE
73886 {
c6e2a6c8 73887@@ -2603,6 +2603,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
ae4e228f
MT
73888
73889 page = virt_to_head_page(x);
73890
73891+ BUG_ON(!PageSlab(page));
73892+
73893 slab_free(s, page, x, _RET_IP_);
73894
73895 trace_kmem_cache_free(_RET_IP_, x);
c6e2a6c8 73896@@ -2636,7 +2638,7 @@ static int slub_min_objects;
58c5fc13
MT
73897 * Merge control. If this is set then no merging of slab caches will occur.
73898 * (Could be removed. This was introduced to pacify the merge skeptics.)
73899 */
73900-static int slub_nomerge;
73901+static int slub_nomerge = 1;
73902
73903 /*
73904 * Calculate the order of allocation given an slab object size.
c6e2a6c8 73905@@ -3089,7 +3091,7 @@ static int kmem_cache_open(struct kmem_cache *s,
4c928ab7
MT
73906 else
73907 s->cpu_partial = 30;
73908
58c5fc13
MT
73909- s->refcount = 1;
73910+ atomic_set(&s->refcount, 1);
73911 #ifdef CONFIG_NUMA
73912 s->remote_node_defrag_ratio = 1000;
73913 #endif
c6e2a6c8 73914@@ -3193,8 +3195,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
58c5fc13
MT
73915 void kmem_cache_destroy(struct kmem_cache *s)
73916 {
73917 down_write(&slub_lock);
73918- s->refcount--;
73919- if (!s->refcount) {
73920+ if (atomic_dec_and_test(&s->refcount)) {
73921 list_del(&s->list);
4c928ab7 73922 up_write(&slub_lock);
58c5fc13 73923 if (kmem_cache_close(s)) {
572b4308
MT
73924@@ -3223,6 +3224,10 @@ static struct kmem_cache *kmem_cache;
73925 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
73926 #endif
73927
73928+#ifdef CONFIG_PAX_USERCOPY_SLABS
73929+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
73930+#endif
73931+
73932 static int __init setup_slub_min_order(char *str)
73933 {
73934 get_option(&str, &slub_min_order);
73935@@ -3337,6 +3342,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
73936 return kmalloc_dma_caches[index];
73937
73938 #endif
73939+
73940+#ifdef CONFIG_PAX_USERCOPY_SLABS
73941+ if (flags & SLAB_USERCOPY)
73942+ return kmalloc_usercopy_caches[index];
73943+
73944+#endif
73945+
73946 return kmalloc_caches[index];
73947 }
73948
73949@@ -3405,6 +3417,56 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
58c5fc13
MT
73950 EXPORT_SYMBOL(__kmalloc_node);
73951 #endif
73952
572b4308 73953+bool is_usercopy_object(const void *ptr)
58c5fc13 73954+{
572b4308
MT
73955+ struct page *page;
73956+ struct kmem_cache *s;
73957+
73958+ if (ZERO_OR_NULL_PTR(ptr))
73959+ return false;
73960+
73961+ if (!virt_addr_valid(ptr))
73962+ return false;
73963+
73964+ page = virt_to_head_page(ptr);
73965+
73966+ if (!PageSlab(page))
73967+ return false;
73968+
73969+ s = page->slab;
73970+ return s->flags & SLAB_USERCOPY;
73971+}
58c5fc13
MT
73972+
73973+#ifdef CONFIG_PAX_USERCOPY
572b4308
MT
73974+const char *check_heap_object(const void *ptr, unsigned long n, bool to)
73975+{
58c5fc13 73976+ struct page *page;
572b4308 73977+ struct kmem_cache *s;
58c5fc13
MT
73978+ unsigned long offset;
73979+
58c5fc13 73980+ if (ZERO_OR_NULL_PTR(ptr))
572b4308 73981+ return "<null>";
58c5fc13
MT
73982+
73983+ if (!virt_addr_valid(ptr))
572b4308 73984+ return NULL;
58c5fc13 73985+
16454cff 73986+ page = virt_to_head_page(ptr);
58c5fc13 73987+
572b4308
MT
73988+ if (!PageSlab(page))
73989+ return NULL;
58c5fc13
MT
73990+
73991+ s = page->slab;
71d190be 73992+ if (!(s->flags & SLAB_USERCOPY))
572b4308 73993+ return s->name;
71d190be 73994+
58c5fc13
MT
73995+ offset = (ptr - page_address(page)) % s->size;
73996+ if (offset <= s->objsize && n <= s->objsize - offset)
572b4308 73997+ return NULL;
58c5fc13 73998+
572b4308 73999+ return s->name;
58c5fc13 74000+}
572b4308 74001+#endif
58c5fc13
MT
74002+
74003 size_t ksize(const void *object)
74004 {
74005 struct page *page;
572b4308 74006@@ -3679,7 +3741,7 @@ static void __init kmem_cache_bootstrap_fixup(struct kmem_cache *s)
bc901d79
MT
74007 int node;
74008
74009 list_add(&s->list, &slab_caches);
74010- s->refcount = -1;
74011+ atomic_set(&s->refcount, -1);
74012
74013 for_each_node_state(node, N_NORMAL_MEMORY) {
74014 struct kmem_cache_node *n = get_node(s, node);
572b4308 74015@@ -3799,17 +3861,17 @@ void __init kmem_cache_init(void)
71d190be
MT
74016
74017 /* Caches that are not of the two-to-the-power-of size */
74018 if (KMALLOC_MIN_SIZE <= 32) {
74019- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
74020+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
74021 caches++;
74022 }
74023
74024 if (KMALLOC_MIN_SIZE <= 64) {
74025- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
74026+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
74027 caches++;
74028 }
74029
74030 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
74031- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
74032+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
74033 caches++;
74034 }
74035
572b4308
MT
74036@@ -3851,6 +3913,22 @@ void __init kmem_cache_init(void)
74037 }
74038 }
74039 #endif
74040+
74041+#ifdef CONFIG_PAX_USERCOPY_SLABS
74042+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
74043+ struct kmem_cache *s = kmalloc_caches[i];
74044+
74045+ if (s && s->size) {
74046+ char *name = kasprintf(GFP_NOWAIT,
74047+ "usercopy-kmalloc-%d", s->objsize);
74048+
74049+ BUG_ON(!name);
74050+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
74051+ s->objsize, SLAB_USERCOPY);
74052+ }
74053+ }
74054+#endif
74055+
74056 printk(KERN_INFO
74057 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
74058 " CPUs=%d, Nodes=%d\n",
74059@@ -3877,7 +3955,7 @@ static int slab_unmergeable(struct kmem_cache *s)
58c5fc13
MT
74060 /*
74061 * We may have set a slab to be unmergeable during bootstrap.
74062 */
74063- if (s->refcount < 0)
74064+ if (atomic_read(&s->refcount) < 0)
74065 return 1;
74066
74067 return 0;
572b4308 74068@@ -3936,7 +4014,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
df50ba0c
MT
74069 down_write(&slub_lock);
74070 s = find_mergeable(size, align, flags, name, ctor);
58c5fc13 74071 if (s) {
58c5fc13
MT
74072- s->refcount++;
74073+ atomic_inc(&s->refcount);
74074 /*
74075 * Adjust the object sizes so that we clear
74076 * the complete object on kzalloc.
572b4308 74077@@ -3945,7 +4023,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
6892158b 74078 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
58c5fc13
MT
74079
74080 if (sysfs_slab_alias(s, name)) {
58c5fc13
MT
74081- s->refcount--;
74082+ atomic_dec(&s->refcount);
58c5fc13
MT
74083 goto err;
74084 }
6892158b 74085 up_write(&slub_lock);
572b4308 74086@@ -4074,7 +4152,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
15a11c5b
MT
74087 }
74088 #endif
74089
74090-#ifdef CONFIG_SYSFS
74091+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74092 static int count_inuse(struct page *page)
74093 {
74094 return page->inuse;
572b4308 74095@@ -4461,12 +4539,12 @@ static void resiliency_test(void)
15a11c5b
MT
74096 validate_slab_cache(kmalloc_caches[9]);
74097 }
74098 #else
74099-#ifdef CONFIG_SYSFS
74100+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74101 static void resiliency_test(void) {};
74102 #endif
74103 #endif
74104
74105-#ifdef CONFIG_SYSFS
74106+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74107 enum slab_stat_type {
74108 SL_ALL, /* All slabs */
74109 SL_PARTIAL, /* Only partially allocated slabs */
572b4308 74110@@ -4709,7 +4787,7 @@ SLAB_ATTR_RO(ctor);
58c5fc13
MT
74111
74112 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
74113 {
74114- return sprintf(buf, "%d\n", s->refcount - 1);
74115+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
74116 }
74117 SLAB_ATTR_RO(aliases);
74118
572b4308 74119@@ -5280,6 +5358,7 @@ static char *create_unique_id(struct kmem_cache *s)
15a11c5b
MT
74120 return name;
74121 }
74122
74123+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74124 static int sysfs_slab_add(struct kmem_cache *s)
74125 {
74126 int err;
572b4308 74127@@ -5342,6 +5421,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
15a11c5b
MT
74128 kobject_del(&s->kobj);
74129 kobject_put(&s->kobj);
74130 }
74131+#endif
74132
74133 /*
74134 * Need to buffer aliases during bootup until sysfs becomes
572b4308 74135@@ -5355,6 +5435,7 @@ struct saved_alias {
15a11c5b
MT
74136
74137 static struct saved_alias *alias_list;
74138
74139+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
74140 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
74141 {
74142 struct saved_alias *al;
572b4308 74143@@ -5377,6 +5458,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
15a11c5b
MT
74144 alias_list = al;
74145 return 0;
74146 }
74147+#endif
74148
74149 static int __init slab_sysfs_init(void)
74150 {
5e856224
MT
74151diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
74152index 1b7e22a..3fcd4f3 100644
74153--- a/mm/sparse-vmemmap.c
74154+++ b/mm/sparse-vmemmap.c
74155@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
74156 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
74157 if (!p)
74158 return NULL;
74159- pud_populate(&init_mm, pud, p);
74160+ pud_populate_kernel(&init_mm, pud, p);
74161 }
74162 return pud;
74163 }
74164@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
74165 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
74166 if (!p)
74167 return NULL;
74168- pgd_populate(&init_mm, pgd, p);
74169+ pgd_populate_kernel(&init_mm, pgd, p);
74170 }
74171 return pgd;
74172 }
fe2de317 74173diff --git a/mm/swap.c b/mm/swap.c
c6e2a6c8 74174index 5c13f13..f1cfc13 100644
fe2de317
MT
74175--- a/mm/swap.c
74176+++ b/mm/swap.c
5e856224 74177@@ -30,6 +30,7 @@
15a11c5b
MT
74178 #include <linux/backing-dev.h>
74179 #include <linux/memcontrol.h>
74180 #include <linux/gfp.h>
74181+#include <linux/hugetlb.h>
74182
74183 #include "internal.h"
74184
5e856224 74185@@ -70,6 +71,8 @@ static void __put_compound_page(struct page *page)
15a11c5b
MT
74186
74187 __page_cache_release(page);
74188 dtor = get_compound_page_dtor(page);
74189+ if (!PageHuge(page))
74190+ BUG_ON(dtor != free_compound_page);
74191 (*dtor)(page);
74192 }
74193
fe2de317 74194diff --git a/mm/swapfile.c b/mm/swapfile.c
c1e3898a 74195index 38186d9..bfba6d3 100644
fe2de317
MT
74196--- a/mm/swapfile.c
74197+++ b/mm/swapfile.c
4c928ab7 74198@@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
8308f9c9
MT
74199
74200 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
74201 /* Activity counter to indicate that a swapon or swapoff has occurred */
74202-static atomic_t proc_poll_event = ATOMIC_INIT(0);
74203+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
74204
74205 static inline unsigned char swap_count(unsigned char ent)
74206 {
c6e2a6c8 74207@@ -1671,7 +1671,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
8308f9c9
MT
74208 }
74209 filp_close(swap_file, NULL);
74210 err = 0;
74211- atomic_inc(&proc_poll_event);
74212+ atomic_inc_unchecked(&proc_poll_event);
74213 wake_up_interruptible(&proc_poll_wait);
74214
74215 out_dput:
c6e2a6c8 74216@@ -1687,8 +1687,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
8308f9c9
MT
74217
74218 poll_wait(file, &proc_poll_wait, wait);
74219
6e9df6a3
MT
74220- if (seq->poll_event != atomic_read(&proc_poll_event)) {
74221- seq->poll_event = atomic_read(&proc_poll_event);
74222+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
74223+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
8308f9c9
MT
74224 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
74225 }
74226
c6e2a6c8 74227@@ -1786,7 +1786,7 @@ static int swaps_open(struct inode *inode, struct file *file)
6e9df6a3 74228 return ret;
8308f9c9 74229
6e9df6a3
MT
74230 seq = file->private_data;
74231- seq->poll_event = atomic_read(&proc_poll_event);
74232+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
74233 return 0;
8308f9c9
MT
74234 }
74235
c1e3898a 74236@@ -2123,7 +2123,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
66a7e928
MT
74237 (p->flags & SWP_DISCARDABLE) ? "D" : "");
74238
8308f9c9
MT
74239 mutex_unlock(&swapon_mutex);
74240- atomic_inc(&proc_poll_event);
74241+ atomic_inc_unchecked(&proc_poll_event);
74242 wake_up_interruptible(&proc_poll_wait);
74243
66a7e928 74244 if (S_ISREG(inode->i_mode))
fe2de317 74245diff --git a/mm/util.c b/mm/util.c
c6e2a6c8 74246index ae962b3..0bba886 100644
fe2de317
MT
74247--- a/mm/util.c
74248+++ b/mm/util.c
c6e2a6c8 74249@@ -284,6 +284,12 @@ done:
58c5fc13
MT
74250 void arch_pick_mmap_layout(struct mm_struct *mm)
74251 {
74252 mm->mmap_base = TASK_UNMAPPED_BASE;
74253+
74254+#ifdef CONFIG_PAX_RANDMMAP
74255+ if (mm->pax_flags & MF_PAX_RANDMMAP)
74256+ mm->mmap_base += mm->delta_mmap;
74257+#endif
74258+
74259 mm->get_unmapped_area = arch_get_unmapped_area;
74260 mm->unmap_area = arch_unmap_area;
74261 }
fe2de317 74262diff --git a/mm/vmalloc.c b/mm/vmalloc.c
c6e2a6c8 74263index 1196c77..2e608e8 100644
fe2de317
MT
74264--- a/mm/vmalloc.c
74265+++ b/mm/vmalloc.c
74266@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
ae4e228f
MT
74267
74268 pte = pte_offset_kernel(pmd, addr);
74269 do {
74270- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74271- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74272+
74273+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74274+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
74275+ BUG_ON(!pte_exec(*pte));
74276+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
74277+ continue;
74278+ }
74279+#endif
74280+
74281+ {
74282+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
74283+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
74284+ }
74285 } while (pte++, addr += PAGE_SIZE, addr != end);
74286 }
74287
fe2de317 74288@@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
58c5fc13
MT
74289 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
74290 {
74291 pte_t *pte;
74292+ int ret = -ENOMEM;
58c5fc13
MT
74293
74294 /*
74295 * nr is a running index into the array which helps higher level
fe2de317 74296@@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
58c5fc13
MT
74297 pte = pte_alloc_kernel(pmd, addr);
74298 if (!pte)
74299 return -ENOMEM;
74300+
ae4e228f 74301+ pax_open_kernel();
58c5fc13
MT
74302 do {
74303 struct page *page = pages[*nr];
74304
74305- if (WARN_ON(!pte_none(*pte)))
74306- return -EBUSY;
74307- if (WARN_ON(!page))
74308- return -ENOMEM;
ae4e228f 74309+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57199397 74310+ if (pgprot_val(prot) & _PAGE_NX)
ae4e228f
MT
74311+#endif
74312+
58c5fc13
MT
74313+ if (WARN_ON(!pte_none(*pte))) {
74314+ ret = -EBUSY;
74315+ goto out;
74316+ }
74317+ if (WARN_ON(!page)) {
74318+ ret = -ENOMEM;
74319+ goto out;
74320+ }
74321 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
74322 (*nr)++;
74323 } while (pte++, addr += PAGE_SIZE, addr != end);
74324- return 0;
74325+ ret = 0;
74326+out:
ae4e228f
MT
74327+ pax_close_kernel();
74328+ return ret;
74329 }
74330
74331 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
5e856224
MT
74332@@ -119,7 +144,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
74333 pmd_t *pmd;
74334 unsigned long next;
74335
74336- pmd = pmd_alloc(&init_mm, pud, addr);
74337+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
74338 if (!pmd)
74339 return -ENOMEM;
74340 do {
74341@@ -136,7 +161,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
74342 pud_t *pud;
74343 unsigned long next;
74344
74345- pud = pud_alloc(&init_mm, pgd, addr);
74346+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
74347 if (!pud)
74348 return -ENOMEM;
74349 do {
fe2de317 74350@@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void *x)
ae4e228f
MT
74351 * and fall back on vmalloc() if that fails. Others
74352 * just put it in the vmalloc space.
74353 */
74354-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
74355+#ifdef CONFIG_MODULES
74356+#ifdef MODULES_VADDR
74357 unsigned long addr = (unsigned long)x;
74358 if (addr >= MODULES_VADDR && addr < MODULES_END)
74359 return 1;
74360 #endif
58c5fc13 74361+
ae4e228f
MT
74362+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74363+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
74364+ return 1;
58c5fc13
MT
74365+#endif
74366+
ae4e228f
MT
74367+#endif
74368+
74369 return is_vmalloc_addr(x);
58c5fc13
MT
74370 }
74371
fe2de317 74372@@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
57199397
MT
74373
74374 if (!pgd_none(*pgd)) {
74375 pud_t *pud = pud_offset(pgd, addr);
74376+#ifdef CONFIG_X86
74377+ if (!pud_large(*pud))
74378+#endif
74379 if (!pud_none(*pud)) {
74380 pmd_t *pmd = pmd_offset(pud, addr);
74381+#ifdef CONFIG_X86
74382+ if (!pmd_large(*pmd))
74383+#endif
74384 if (!pmd_none(*pmd)) {
74385 pte_t *ptep, pte;
74386
c6e2a6c8
MT
74387@@ -332,6 +372,10 @@ static void purge_vmap_area_lazy(void);
74388 static struct vmap_area *alloc_vmap_area(unsigned long size,
74389 unsigned long align,
74390 unsigned long vstart, unsigned long vend,
74391+ int node, gfp_t gfp_mask) __size_overflow(1);
74392+static struct vmap_area *alloc_vmap_area(unsigned long size,
74393+ unsigned long align,
74394+ unsigned long vstart, unsigned long vend,
74395 int node, gfp_t gfp_mask)
74396 {
74397 struct vmap_area *va;
74398@@ -1320,6 +1364,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
ae4e228f 74399 struct vm_struct *area;
58c5fc13
MT
74400
74401 BUG_ON(in_interrupt());
74402+
df50ba0c 74403+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58c5fc13
MT
74404+ if (flags & VM_KERNEXEC) {
74405+ if (start != VMALLOC_START || end != VMALLOC_END)
74406+ return NULL;
df50ba0c
MT
74407+ start = (unsigned long)MODULES_EXEC_VADDR;
74408+ end = (unsigned long)MODULES_EXEC_END;
58c5fc13
MT
74409+ }
74410+#endif
74411+
74412 if (flags & VM_IOREMAP) {
74413 int bit = fls(size);
74414
c6e2a6c8 74415@@ -1552,6 +1606,11 @@ void *vmap(struct page **pages, unsigned int count,
ae4e228f 74416 if (count > totalram_pages)
58c5fc13
MT
74417 return NULL;
74418
df50ba0c 74419+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58c5fc13
MT
74420+ if (!(pgprot_val(prot) & _PAGE_NX))
74421+ flags |= VM_KERNEXEC;
74422+#endif
74423+
74424 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
74425 __builtin_return_address(0));
74426 if (!area)
c6e2a6c8 74427@@ -1653,6 +1712,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
ae4e228f 74428 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
4c928ab7 74429 goto fail;
58c5fc13 74430
df50ba0c 74431+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58c5fc13 74432+ if (!(pgprot_val(prot) & _PAGE_NX))
6e9df6a3
MT
74433+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
74434+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
58c5fc13
MT
74435+ else
74436+#endif
74437+
6e9df6a3
MT
74438 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
74439 start, end, node, gfp_mask, caller);
4c928ab7 74440 if (!area)
c6e2a6c8 74441@@ -1826,10 +1892,9 @@ EXPORT_SYMBOL(vzalloc_node);
58c5fc13
MT
74442 * For tight control over page level allocator and protection flags
74443 * use __vmalloc() instead.
74444 */
74445-
58c5fc13
MT
74446 void *vmalloc_exec(unsigned long size)
74447 {
ae4e228f
MT
74448- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
74449+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
58c5fc13
MT
74450 -1, __builtin_return_address(0));
74451 }
74452
c6e2a6c8 74453@@ -2124,6 +2189,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
6892158b
MT
74454 unsigned long uaddr = vma->vm_start;
74455 unsigned long usize = vma->vm_end - vma->vm_start;
74456
74457+ BUG_ON(vma->vm_mirror);
74458+
74459 if ((PAGE_SIZE-1) & (unsigned long)addr)
74460 return -EINVAL;
74461
c6e2a6c8
MT
74462@@ -2376,8 +2443,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
74463 return NULL;
74464 }
74465
74466- vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
74467- vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
74468+ vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
74469+ vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
74470 if (!vas || !vms)
74471 goto err_free2;
74472
572b4308
MT
74473diff --git a/mm/vmscan.c b/mm/vmscan.c
74474index 4607cc6..be5bc0a 100644
74475--- a/mm/vmscan.c
74476+++ b/mm/vmscan.c
74477@@ -3013,7 +3013,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
74478 * them before going back to sleep.
74479 */
74480 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
74481- schedule();
74482+
74483+ if (!kthread_should_stop())
74484+ schedule();
74485+
74486 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
74487 } else {
74488 if (remaining)
fe2de317 74489diff --git a/mm/vmstat.c b/mm/vmstat.c
c6e2a6c8 74490index 7db1b9b..e9f6b07 100644
fe2de317
MT
74491--- a/mm/vmstat.c
74492+++ b/mm/vmstat.c
bc901d79 74493@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
57199397
MT
74494 *
74495 * vm_stat contains the global counters
74496 */
4c928ab7
MT
74497-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
74498+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
57199397
MT
74499 EXPORT_SYMBOL(vm_stat);
74500
74501 #ifdef CONFIG_SMP
66a7e928 74502@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
57199397
MT
74503 v = p->vm_stat_diff[i];
74504 p->vm_stat_diff[i] = 0;
74505 local_irq_restore(flags);
74506- atomic_long_add(v, &zone->vm_stat[i]);
74507+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
74508 global_diff[i] += v;
74509 #ifdef CONFIG_NUMA
74510 /* 3 seconds idle till flush */
66a7e928 74511@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
57199397
MT
74512
74513 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
74514 if (global_diff[i])
74515- atomic_long_add(global_diff[i], &vm_stat[i]);
74516+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
74517 }
74518
74519 #endif
4c928ab7 74520@@ -1208,10 +1208,20 @@ static int __init setup_vmstat(void)
57199397
MT
74521 start_cpu_timer(cpu);
74522 #endif
74523 #ifdef CONFIG_PROC_FS
74524- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
74525- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
74526- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
74527- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
74528+ {
74529+ mode_t gr_mode = S_IRUGO;
74530+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74531+ gr_mode = S_IRUSR;
74532+#endif
74533+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
74534+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
bc901d79
MT
74535+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74536+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
74537+#else
57199397 74538+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
bc901d79 74539+#endif
57199397
MT
74540+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
74541+ }
74542 #endif
74543 return 0;
74544 }
fe2de317 74545diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
5e856224 74546index efea35b..9c8dd0b 100644
fe2de317
MT
74547--- a/net/8021q/vlan.c
74548+++ b/net/8021q/vlan.c
5e856224 74549@@ -554,8 +554,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
df50ba0c
MT
74550 err = -EPERM;
74551 if (!capable(CAP_NET_ADMIN))
74552 break;
74553- if ((args.u.name_type >= 0) &&
74554- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
74555+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
74556 struct vlan_net *vn;
74557
74558 vn = net_generic(net, vlan_net_id);
fe2de317 74559diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
5e856224 74560index fccae26..e7ece2f 100644
fe2de317
MT
74561--- a/net/9p/trans_fd.c
74562+++ b/net/9p/trans_fd.c
5e856224 74563@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
6e9df6a3
MT
74564 oldfs = get_fs();
74565 set_fs(get_ds());
74566 /* The cast to a user pointer is valid due to the set_fs() */
74567- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
74568+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
74569 set_fs(oldfs);
74570
74571 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
fe2de317 74572diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
5e856224 74573index 876fbe8..8bbea9f 100644
fe2de317
MT
74574--- a/net/atm/atm_misc.c
74575+++ b/net/atm/atm_misc.c
74576@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
58c5fc13
MT
74577 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
74578 return 1;
df50ba0c 74579 atm_return(vcc, truesize);
58c5fc13
MT
74580- atomic_inc(&vcc->stats->rx_drop);
74581+ atomic_inc_unchecked(&vcc->stats->rx_drop);
74582 return 0;
74583 }
df50ba0c 74584 EXPORT_SYMBOL(atm_charge);
fe2de317 74585@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
58c5fc13
MT
74586 }
74587 }
df50ba0c 74588 atm_return(vcc, guess);
58c5fc13
MT
74589- atomic_inc(&vcc->stats->rx_drop);
74590+ atomic_inc_unchecked(&vcc->stats->rx_drop);
74591 return NULL;
74592 }
df50ba0c
MT
74593 EXPORT_SYMBOL(atm_alloc_charge);
74594@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
58c5fc13 74595
df50ba0c 74596 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
58c5fc13
MT
74597 {
74598-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74599+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74600 __SONET_ITEMS
74601 #undef __HANDLE_ITEM
74602 }
df50ba0c 74603@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
58c5fc13 74604
df50ba0c 74605 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
58c5fc13 74606 {
df50ba0c 74607-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
58c5fc13
MT
74608+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
74609 __SONET_ITEMS
74610 #undef __HANDLE_ITEM
74611 }
fe2de317
MT
74612diff --git a/net/atm/lec.h b/net/atm/lec.h
74613index dfc0719..47c5322 100644
74614--- a/net/atm/lec.h
74615+++ b/net/atm/lec.h
15a11c5b
MT
74616@@ -48,7 +48,7 @@ struct lane2_ops {
74617 const u8 *tlvs, u32 sizeoftlvs);
74618 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
74619 const u8 *tlvs, u32 sizeoftlvs);
74620-};
74621+} __no_const;
74622
74623 /*
74624 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
fe2de317
MT
74625diff --git a/net/atm/mpc.h b/net/atm/mpc.h
74626index 0919a88..a23d54e 100644
74627--- a/net/atm/mpc.h
74628+++ b/net/atm/mpc.h
15a11c5b
MT
74629@@ -33,7 +33,7 @@ struct mpoa_client {
74630 struct mpc_parameters parameters; /* parameters for this client */
74631
74632 const struct net_device_ops *old_ops;
74633- struct net_device_ops new_ops;
74634+ net_device_ops_no_const new_ops;
74635 };
74636
74637
fe2de317
MT
74638diff --git a/net/atm/proc.c b/net/atm/proc.c
74639index 0d020de..011c7bb 100644
74640--- a/net/atm/proc.c
74641+++ b/net/atm/proc.c
74642@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
58c5fc13
MT
74643 const struct k_atm_aal_stats *stats)
74644 {
74645 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
df50ba0c
MT
74646- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
74647- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
74648- atomic_read(&stats->rx_drop));
74649+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
74650+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
74651+ atomic_read_unchecked(&stats->rx_drop));
58c5fc13
MT
74652 }
74653
74654 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
fe2de317
MT
74655diff --git a/net/atm/resources.c b/net/atm/resources.c
74656index 23f45ce..c748f1a 100644
74657--- a/net/atm/resources.c
74658+++ b/net/atm/resources.c
bc901d79 74659@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
58c5fc13
MT
74660 static void copy_aal_stats(struct k_atm_aal_stats *from,
74661 struct atm_aal_stats *to)
74662 {
74663-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
74664+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
74665 __AAL_STAT_ITEMS
74666 #undef __HANDLE_ITEM
74667 }
fe2de317 74668@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
58c5fc13
MT
74669 static void subtract_aal_stats(struct k_atm_aal_stats *from,
74670 struct atm_aal_stats *to)
74671 {
74672-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
74673+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
74674 __AAL_STAT_ITEMS
74675 #undef __HANDLE_ITEM
74676 }
4c928ab7 74677diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
c6e2a6c8 74678index a6d5d63..1cc6c2b 100644
4c928ab7
MT
74679--- a/net/batman-adv/bat_iv_ogm.c
74680+++ b/net/batman-adv/bat_iv_ogm.c
c6e2a6c8 74681@@ -539,7 +539,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
4c928ab7
MT
74682
74683 /* change sequence number to network order */
74684 batman_ogm_packet->seqno =
74685- htonl((uint32_t)atomic_read(&hard_iface->seqno));
74686+ htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
74687
74688 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
74689 batman_ogm_packet->tt_crc = htons((uint16_t)
c6e2a6c8 74690@@ -559,7 +559,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
4c928ab7
MT
74691 else
74692 batman_ogm_packet->gw_flags = NO_FLAGS;
74693
74694- atomic_inc(&hard_iface->seqno);
74695+ atomic_inc_unchecked(&hard_iface->seqno);
74696
74697 slide_own_bcast_window(hard_iface);
c6e2a6c8
MT
74698 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
74699@@ -917,7 +917,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
4c928ab7
MT
74700 return;
74701
74702 /* could be changed by schedule_own_packet() */
74703- if_incoming_seqno = atomic_read(&if_incoming->seqno);
74704+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
74705
74706 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
74707
fe2de317 74708diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
c6e2a6c8 74709index 3778977..f6a9450 100644
fe2de317
MT
74710--- a/net/batman-adv/hard-interface.c
74711+++ b/net/batman-adv/hard-interface.c
c6e2a6c8 74712@@ -328,8 +328,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
66a7e928
MT
74713 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
74714 dev_add_pack(&hard_iface->batman_adv_ptype);
74715
74716- atomic_set(&hard_iface->seqno, 1);
74717- atomic_set(&hard_iface->frag_seqno, 1);
74718+ atomic_set_unchecked(&hard_iface->seqno, 1);
74719+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
74720 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
74721 hard_iface->net_dev->name);
74722
fe2de317 74723diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
c6e2a6c8 74724index a5590f4..8d31969 100644
fe2de317
MT
74725--- a/net/batman-adv/soft-interface.c
74726+++ b/net/batman-adv/soft-interface.c
5e856224 74727@@ -645,7 +645,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
8308f9c9
MT
74728
74729 /* set broadcast sequence number */
74730 bcast_packet->seqno =
74731- htonl(atomic_inc_return(&bat_priv->bcast_seqno));
74732+ htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
74733
6e9df6a3 74734 add_bcast_packet_to_list(bat_priv, skb, 1);
8308f9c9 74735
c6e2a6c8 74736@@ -841,7 +841,7 @@ struct net_device *softif_create(const char *name)
8308f9c9
MT
74737 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
74738
74739 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
74740- atomic_set(&bat_priv->bcast_seqno, 1);
74741+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
6e9df6a3
MT
74742 atomic_set(&bat_priv->ttvn, 0);
74743 atomic_set(&bat_priv->tt_local_changes, 0);
74744 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
fe2de317 74745diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
c6e2a6c8 74746index 302efb5..1590365 100644
fe2de317
MT
74747--- a/net/batman-adv/types.h
74748+++ b/net/batman-adv/types.h
66a7e928 74749@@ -38,8 +38,8 @@ struct hard_iface {
8308f9c9
MT
74750 int16_t if_num;
74751 char if_status;
74752 struct net_device *net_dev;
74753- atomic_t seqno;
74754- atomic_t frag_seqno;
74755+ atomic_unchecked_t seqno;
74756+ atomic_unchecked_t frag_seqno;
74757 unsigned char *packet_buff;
74758 int packet_len;
74759 struct kobject *hardif_obj;
c6e2a6c8 74760@@ -155,7 +155,7 @@ struct bat_priv {
8308f9c9
MT
74761 atomic_t orig_interval; /* uint */
74762 atomic_t hop_penalty; /* uint */
74763 atomic_t log_level; /* uint */
74764- atomic_t bcast_seqno;
74765+ atomic_unchecked_t bcast_seqno;
74766 atomic_t bcast_queue_left;
74767 atomic_t batman_queue_left;
4c928ab7 74768 atomic_t ttvn; /* translation table version number */
fe2de317 74769diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
c6e2a6c8 74770index 676f6a6..3b4e668 100644
fe2de317
MT
74771--- a/net/batman-adv/unicast.c
74772+++ b/net/batman-adv/unicast.c
74773@@ -264,7 +264,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
66a7e928
MT
74774 frag1->flags = UNI_FRAG_HEAD | large_tail;
74775 frag2->flags = large_tail;
74776
74777- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
74778+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
74779 frag1->seqno = htons(seqno - 1);
74780 frag2->seqno = htons(seqno);
74781
fe2de317 74782diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
c6e2a6c8 74783index 5238b6b..c9798ce 100644
fe2de317
MT
74784--- a/net/bluetooth/hci_conn.c
74785+++ b/net/bluetooth/hci_conn.c
c6e2a6c8 74786@@ -233,7 +233,7 @@ void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
6e9df6a3
MT
74787 memset(&cp, 0, sizeof(cp));
74788
74789 cp.handle = cpu_to_le16(conn->handle);
74790- memcpy(cp.ltk, ltk, sizeof(ltk));
74791+ memcpy(cp.ltk, ltk, sizeof(cp.ltk));
74792
74793 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
74794 }
4c928ab7 74795diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
c6e2a6c8 74796index 6f9c25b..d19fd66 100644
4c928ab7
MT
74797--- a/net/bluetooth/l2cap_core.c
74798+++ b/net/bluetooth/l2cap_core.c
c6e2a6c8 74799@@ -2466,8 +2466,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
4c928ab7
MT
74800 break;
74801
74802 case L2CAP_CONF_RFC:
74803- if (olen == sizeof(rfc))
74804- memcpy(&rfc, (void *)val, olen);
74805+ if (olen != sizeof(rfc))
74806+ break;
74807+
74808+ memcpy(&rfc, (void *)val, olen);
74809
74810 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
74811 rfc.mode != chan->mode)
c6e2a6c8 74812@@ -2585,8 +2587,10 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4c928ab7
MT
74813
74814 switch (type) {
74815 case L2CAP_CONF_RFC:
74816- if (olen == sizeof(rfc))
74817- memcpy(&rfc, (void *)val, olen);
74818+ if (olen != sizeof(rfc))
74819+ break;
74820+
74821+ memcpy(&rfc, (void *)val, olen);
74822 goto done;
74823 }
74824 }
fe2de317 74825diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
5e856224 74826index 5fe2ff3..10968b5 100644
fe2de317
MT
74827--- a/net/bridge/netfilter/ebtables.c
74828+++ b/net/bridge/netfilter/ebtables.c
5e856224 74829@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
ae4e228f
MT
74830 tmp.valid_hooks = t->table->valid_hooks;
74831 }
74832 mutex_unlock(&ebt_mutex);
74833- if (copy_to_user(user, &tmp, *len) != 0){
74834+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
74835 BUGPRINT("c2u Didn't work\n");
74836 ret = -EFAULT;
74837 break;
572b4308
MT
74838diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
74839index aa6f716..7bf4c21 100644
74840--- a/net/caif/caif_dev.c
74841+++ b/net/caif/caif_dev.c
74842@@ -562,9 +562,9 @@ static int __init caif_device_init(void)
74843
74844 static void __exit caif_device_exit(void)
74845 {
74846- unregister_pernet_subsys(&caif_net_ops);
74847 unregister_netdevice_notifier(&caif_device_notifier);
74848 dev_remove_pack(&caif_packet_type);
74849+ unregister_pernet_subsys(&caif_net_ops);
74850 }
74851
74852 module_init(caif_device_init);
fe2de317 74853diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
4c928ab7 74854index 5cf5222..6f704ad 100644
fe2de317
MT
74855--- a/net/caif/cfctrl.c
74856+++ b/net/caif/cfctrl.c
66a7e928
MT
74857@@ -9,6 +9,7 @@
74858 #include <linux/stddef.h>
74859 #include <linux/spinlock.h>
74860 #include <linux/slab.h>
74861+#include <linux/sched.h>
74862 #include <net/caif/caif_layer.h>
74863 #include <net/caif/cfpkt.h>
74864 #include <net/caif/cfctrl.h>
4c928ab7
MT
74865@@ -42,8 +43,8 @@ struct cflayer *cfctrl_create(void)
74866 memset(&dev_info, 0, sizeof(dev_info));
8308f9c9 74867 dev_info.id = 0xff;
8308f9c9
MT
74868 cfsrvl_init(&this->serv, 0, &dev_info, false);
74869- atomic_set(&this->req_seq_no, 1);
74870- atomic_set(&this->rsp_seq_no, 1);
74871+ atomic_set_unchecked(&this->req_seq_no, 1);
74872+ atomic_set_unchecked(&this->rsp_seq_no, 1);
74873 this->serv.layer.receive = cfctrl_recv;
74874 sprintf(this->serv.layer.name, "ctrl");
74875 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
4c928ab7 74876@@ -129,8 +130,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
8308f9c9
MT
74877 struct cfctrl_request_info *req)
74878 {
15a11c5b 74879 spin_lock_bh(&ctrl->info_list_lock);
8308f9c9
MT
74880- atomic_inc(&ctrl->req_seq_no);
74881- req->sequence_no = atomic_read(&ctrl->req_seq_no);
74882+ atomic_inc_unchecked(&ctrl->req_seq_no);
74883+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
74884 list_add_tail(&req->list, &ctrl->list);
15a11c5b 74885 spin_unlock_bh(&ctrl->info_list_lock);
8308f9c9 74886 }
4c928ab7 74887@@ -148,7 +149,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
8308f9c9
MT
74888 if (p != first)
74889 pr_warn("Requests are not received in order\n");
74890
74891- atomic_set(&ctrl->rsp_seq_no,
74892+ atomic_set_unchecked(&ctrl->rsp_seq_no,
74893 p->sequence_no);
74894 list_del(&p->list);
74895 goto out;
4c928ab7
MT
74896diff --git a/net/can/gw.c b/net/can/gw.c
74897index 3d79b12..8de85fa 100644
74898--- a/net/can/gw.c
74899+++ b/net/can/gw.c
74900@@ -96,7 +96,7 @@ struct cf_mod {
74901 struct {
74902 void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
74903 void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
74904- } csumfunc;
74905+ } __no_const csumfunc;
74906 };
66a7e928 74907
66a7e928 74908
fe2de317 74909diff --git a/net/compat.c b/net/compat.c
c6e2a6c8 74910index e055708..3f80795 100644
fe2de317
MT
74911--- a/net/compat.c
74912+++ b/net/compat.c
4c928ab7 74913@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
6e9df6a3
MT
74914 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
74915 __get_user(kmsg->msg_flags, &umsg->msg_flags))
74916 return -EFAULT;
74917- kmsg->msg_name = compat_ptr(tmp1);
74918- kmsg->msg_iov = compat_ptr(tmp2);
74919- kmsg->msg_control = compat_ptr(tmp3);
74920+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
74921+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
74922+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
74923 return 0;
74924 }
74925
4c928ab7 74926@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
6e9df6a3
MT
74927
74928 if (kern_msg->msg_namelen) {
74929 if (mode == VERIFY_READ) {
74930- int err = move_addr_to_kernel(kern_msg->msg_name,
74931+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
74932 kern_msg->msg_namelen,
74933 kern_address);
74934 if (err < 0)
4c928ab7 74935@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
6e9df6a3
MT
74936 kern_msg->msg_name = NULL;
74937
74938 tot_len = iov_from_user_compat_to_kern(kern_iov,
74939- (struct compat_iovec __user *)kern_msg->msg_iov,
74940+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
74941 kern_msg->msg_iovlen);
74942 if (tot_len >= 0)
74943 kern_msg->msg_iov = kern_iov;
4c928ab7 74944@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
6e9df6a3
MT
74945
74946 #define CMSG_COMPAT_FIRSTHDR(msg) \
74947 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
74948- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
74949+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
74950 (struct compat_cmsghdr __user *)NULL)
74951
74952 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
74953 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
74954 (ucmlen) <= (unsigned long) \
74955 ((mhdr)->msg_controllen - \
74956- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
74957+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
74958
74959 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
74960 struct compat_cmsghdr __user *cmsg, int cmsg_len)
74961 {
74962 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
74963- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
74964+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
74965 msg->msg_controllen)
74966 return NULL;
74967 return (struct compat_cmsghdr __user *)ptr;
c6e2a6c8
MT
74968@@ -219,7 +219,7 @@ Efault:
74969
74970 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
6e9df6a3 74971 {
6e9df6a3
MT
74972- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74973+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74974 struct compat_cmsghdr cmhdr;
74975 int cmlen;
74976
c6e2a6c8 74977@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
6e9df6a3
MT
74978
74979 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
74980 {
74981- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
74982+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
74983 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
74984 int fdnum = scm->fp->count;
74985 struct file **fp = scm->fp->fp;
c6e2a6c8 74986@@ -372,7 +372,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
6e9df6a3
MT
74987 return -EFAULT;
74988 old_fs = get_fs();
74989 set_fs(KERNEL_DS);
74990- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
74991+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
74992 set_fs(old_fs);
74993
74994 return err;
c6e2a6c8 74995@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
6e9df6a3
MT
74996 len = sizeof(ktime);
74997 old_fs = get_fs();
74998 set_fs(KERNEL_DS);
74999- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
75000+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
75001 set_fs(old_fs);
75002
75003 if (!err) {
c6e2a6c8 75004@@ -576,7 +576,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
6e9df6a3
MT
75005 case MCAST_JOIN_GROUP:
75006 case MCAST_LEAVE_GROUP:
75007 {
75008- struct compat_group_req __user *gr32 = (void *)optval;
75009+ struct compat_group_req __user *gr32 = (void __user *)optval;
75010 struct group_req __user *kgr =
75011 compat_alloc_user_space(sizeof(struct group_req));
75012 u32 interface;
c6e2a6c8 75013@@ -597,7 +597,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
6e9df6a3
MT
75014 case MCAST_BLOCK_SOURCE:
75015 case MCAST_UNBLOCK_SOURCE:
75016 {
75017- struct compat_group_source_req __user *gsr32 = (void *)optval;
75018+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
75019 struct group_source_req __user *kgsr = compat_alloc_user_space(
75020 sizeof(struct group_source_req));
75021 u32 interface;
c6e2a6c8 75022@@ -618,7 +618,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
6e9df6a3
MT
75023 }
75024 case MCAST_MSFILTER:
75025 {
75026- struct compat_group_filter __user *gf32 = (void *)optval;
75027+ struct compat_group_filter __user *gf32 = (void __user *)optval;
75028 struct group_filter __user *kgf;
75029 u32 interface, fmode, numsrc;
75030
c6e2a6c8 75031@@ -656,7 +656,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
6e9df6a3
MT
75032 char __user *optval, int __user *optlen,
75033 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
75034 {
75035- struct compat_group_filter __user *gf32 = (void *)optval;
75036+ struct compat_group_filter __user *gf32 = (void __user *)optval;
75037 struct group_filter __user *kgf;
75038 int __user *koptlen;
75039 u32 interface, fmode, numsrc;
fe2de317 75040diff --git a/net/core/datagram.c b/net/core/datagram.c
c6e2a6c8 75041index e4fbfd6..6a6ac94 100644
fe2de317
MT
75042--- a/net/core/datagram.c
75043+++ b/net/core/datagram.c
c6e2a6c8 75044@@ -290,7 +290,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
8308f9c9
MT
75045 }
75046
75047 kfree_skb(skb);
75048- atomic_inc(&sk->sk_drops);
75049+ atomic_inc_unchecked(&sk->sk_drops);
75050 sk_mem_reclaim_partial(sk);
75051
75052 return err;
fe2de317 75053diff --git a/net/core/dev.c b/net/core/dev.c
572b4308 75054index 533c586..f78a55f 100644
fe2de317
MT
75055--- a/net/core/dev.c
75056+++ b/net/core/dev.c
c6e2a6c8 75057@@ -1136,9 +1136,13 @@ void dev_load(struct net *net, const char *name)
16454cff
MT
75058 if (no_module && capable(CAP_NET_ADMIN))
75059 no_module = request_module("netdev-%s", name);
75060 if (no_module && capable(CAP_SYS_MODULE)) {
71d190be
MT
75061+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75062+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
75063+#else
75064 if (!request_module("%s", name))
c6e2a6c8
MT
75065 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
75066 name);
71d190be
MT
75067+#endif
75068 }
75069 }
75070 EXPORT_SYMBOL(dev_load);
c6e2a6c8 75071@@ -1602,7 +1606,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
4c928ab7
MT
75072 {
75073 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
75074 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
75075- atomic_long_inc(&dev->rx_dropped);
75076+ atomic_long_inc_unchecked(&dev->rx_dropped);
75077 kfree_skb(skb);
75078 return NET_RX_DROP;
75079 }
c6e2a6c8 75080@@ -1612,7 +1616,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
4c928ab7
MT
75081 nf_reset(skb);
75082
75083 if (unlikely(!is_skb_forwardable(dev, skb))) {
75084- atomic_long_inc(&dev->rx_dropped);
75085+ atomic_long_inc_unchecked(&dev->rx_dropped);
75086 kfree_skb(skb);
75087 return NET_RX_DROP;
75088 }
c6e2a6c8 75089@@ -2042,7 +2046,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
66a7e928 75090
15a11c5b
MT
75091 struct dev_gso_cb {
75092 void (*destructor)(struct sk_buff *skb);
75093-};
75094+} __no_const;
66a7e928 75095
15a11c5b
MT
75096 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
75097
572b4308 75098@@ -2877,7 +2881,7 @@ enqueue:
4c928ab7
MT
75099
75100 local_irq_restore(flags);
75101
75102- atomic_long_inc(&skb->dev->rx_dropped);
75103+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75104 kfree_skb(skb);
75105 return NET_RX_DROP;
75106 }
572b4308 75107@@ -2949,7 +2953,7 @@ int netif_rx_ni(struct sk_buff *skb)
ae4e228f
MT
75108 }
75109 EXPORT_SYMBOL(netif_rx_ni);
75110
75111-static void net_tx_action(struct softirq_action *h)
75112+static void net_tx_action(void)
75113 {
75114 struct softnet_data *sd = &__get_cpu_var(softnet_data);
75115
572b4308 75116@@ -3237,7 +3241,7 @@ ncls:
4c928ab7
MT
75117 if (pt_prev) {
75118 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
75119 } else {
75120- atomic_long_inc(&skb->dev->rx_dropped);
75121+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
75122 kfree_skb(skb);
75123 /* Jamal, now you will not able to escape explaining
75124 * me how you were going to use this. :-)
572b4308 75125@@ -3797,7 +3801,7 @@ void netif_napi_del(struct napi_struct *napi)
57199397 75126 }
ae4e228f
MT
75127 EXPORT_SYMBOL(netif_napi_del);
75128
ae4e228f
MT
75129-static void net_rx_action(struct softirq_action *h)
75130+static void net_rx_action(void)
75131 {
57199397 75132 struct softnet_data *sd = &__get_cpu_var(softnet_data);
ae4e228f 75133 unsigned long time_limit = jiffies + 2;
572b4308 75134@@ -4267,8 +4271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
c6e2a6c8
MT
75135 else
75136 seq_printf(seq, "%04x", ntohs(pt->type));
75137
75138+#ifdef CONFIG_GRKERNSEC_HIDESYM
75139+ seq_printf(seq, " %-8s %p\n",
75140+ pt->dev ? pt->dev->name : "", NULL);
75141+#else
75142 seq_printf(seq, " %-8s %pF\n",
75143 pt->dev ? pt->dev->name : "", pt->func);
75144+#endif
75145 }
75146
75147 return 0;
572b4308 75148@@ -5818,7 +5827,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
4c928ab7
MT
75149 } else {
75150 netdev_stats_to_stats64(storage, &dev->stats);
75151 }
75152- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
75153+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
75154 return storage;
75155 }
75156 EXPORT_SYMBOL(dev_get_stats);
fe2de317 75157diff --git a/net/core/flow.c b/net/core/flow.c
4c928ab7 75158index e318c7e..168b1d0 100644
fe2de317
MT
75159--- a/net/core/flow.c
75160+++ b/net/core/flow.c
6e9df6a3 75161@@ -61,7 +61,7 @@ struct flow_cache {
8308f9c9
MT
75162 struct timer_list rnd_timer;
75163 };
75164
75165-atomic_t flow_cache_genid = ATOMIC_INIT(0);
75166+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
75167 EXPORT_SYMBOL(flow_cache_genid);
75168 static struct flow_cache flow_cache_global;
75169 static struct kmem_cache *flow_cachep __read_mostly;
fe2de317 75170@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
8308f9c9
MT
75171
75172 static int flow_entry_valid(struct flow_cache_entry *fle)
75173 {
75174- if (atomic_read(&flow_cache_genid) != fle->genid)
75175+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
75176 return 0;
75177 if (fle->object && !fle->object->ops->check(fle->object))
75178 return 0;
fe2de317 75179@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
8308f9c9
MT
75180 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
75181 fcp->hash_count++;
75182 }
75183- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
75184+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
75185 flo = fle->object;
75186 if (!flo)
75187 goto ret_object;
6e9df6a3 75188@@ -280,7 +280,7 @@ nocache:
8308f9c9
MT
75189 }
75190 flo = resolver(net, key, family, dir, flo, ctx);
75191 if (fle) {
75192- fle->genid = atomic_read(&flow_cache_genid);
75193+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
75194 if (!IS_ERR(flo))
75195 fle->object = flo;
75196 else
fe2de317 75197diff --git a/net/core/iovec.c b/net/core/iovec.c
c6e2a6c8 75198index 7e7aeb0..2a998cb 100644
fe2de317
MT
75199--- a/net/core/iovec.c
75200+++ b/net/core/iovec.c
c6e2a6c8 75201@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
6e9df6a3
MT
75202 if (m->msg_namelen) {
75203 if (mode == VERIFY_READ) {
75204 void __user *namep;
75205- namep = (void __user __force *) m->msg_name;
75206+ namep = (void __force_user *) m->msg_name;
75207 err = move_addr_to_kernel(namep, m->msg_namelen,
75208 address);
75209 if (err < 0)
c6e2a6c8 75210@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
6e9df6a3
MT
75211 }
75212
75213 size = m->msg_iovlen * sizeof(struct iovec);
75214- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
75215+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
75216 return -EFAULT;
75217
75218 m->msg_iov = iov;
fe2de317 75219diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
c6e2a6c8 75220index 90430b7..0032ec0 100644
fe2de317
MT
75221--- a/net/core/rtnetlink.c
75222+++ b/net/core/rtnetlink.c
c6e2a6c8 75223@@ -56,7 +56,7 @@ struct rtnl_link {
15a11c5b
MT
75224 rtnl_doit_func doit;
75225 rtnl_dumpit_func dumpit;
6e9df6a3 75226 rtnl_calcit_func calcit;
15a11c5b
MT
75227-};
75228+} __no_const;
75229
75230 static DEFINE_MUTEX(rtnl_mutex);
5e856224 75231
fe2de317 75232diff --git a/net/core/scm.c b/net/core/scm.c
c6e2a6c8 75233index 611c5ef..88f6d6d 100644
fe2de317
MT
75234--- a/net/core/scm.c
75235+++ b/net/core/scm.c
c6e2a6c8 75236@@ -219,7 +219,7 @@ EXPORT_SYMBOL(__scm_send);
6e9df6a3
MT
75237 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
75238 {
75239 struct cmsghdr __user *cm
75240- = (__force struct cmsghdr __user *)msg->msg_control;
75241+ = (struct cmsghdr __force_user *)msg->msg_control;
75242 struct cmsghdr cmhdr;
75243 int cmlen = CMSG_LEN(len);
75244 int err;
c6e2a6c8 75245@@ -242,7 +242,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
6e9df6a3
MT
75246 err = -EFAULT;
75247 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
75248 goto out;
75249- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
75250+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
75251 goto out;
75252 cmlen = CMSG_SPACE(len);
75253 if (msg->msg_controllen < cmlen)
c6e2a6c8 75254@@ -258,7 +258,7 @@ EXPORT_SYMBOL(put_cmsg);
6e9df6a3
MT
75255 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
75256 {
75257 struct cmsghdr __user *cm
75258- = (__force struct cmsghdr __user*)msg->msg_control;
75259+ = (struct cmsghdr __force_user *)msg->msg_control;
75260
75261 int fdmax = 0;
75262 int fdnum = scm->fp->count;
c6e2a6c8 75263@@ -278,7 +278,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
6e9df6a3
MT
75264 if (fdnum < fdmax)
75265 fdmax = fdnum;
75266
75267- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
75268+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
75269 i++, cmfptr++)
75270 {
75271 int new_fd;
fe2de317 75272diff --git a/net/core/sock.c b/net/core/sock.c
572b4308 75273index 0f8402e..f0b6338 100644
fe2de317
MT
75274--- a/net/core/sock.c
75275+++ b/net/core/sock.c
c6e2a6c8 75276@@ -340,7 +340,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
4c928ab7
MT
75277 struct sk_buff_head *list = &sk->sk_receive_queue;
75278
75279 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
8308f9c9
MT
75280- atomic_inc(&sk->sk_drops);
75281+ atomic_inc_unchecked(&sk->sk_drops);
6e9df6a3 75282 trace_sock_rcvqueue_full(sk, skb);
8308f9c9
MT
75283 return -ENOMEM;
75284 }
c6e2a6c8 75285@@ -350,7 +350,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
75286 return err;
75287
75288 if (!sk_rmem_schedule(sk, skb->truesize)) {
75289- atomic_inc(&sk->sk_drops);
75290+ atomic_inc_unchecked(&sk->sk_drops);
75291 return -ENOBUFS;
75292 }
75293
c6e2a6c8 75294@@ -370,7 +370,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
75295 skb_dst_force(skb);
75296
75297 spin_lock_irqsave(&list->lock, flags);
75298- skb->dropcount = atomic_read(&sk->sk_drops);
75299+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
75300 __skb_queue_tail(list, skb);
75301 spin_unlock_irqrestore(&list->lock, flags);
75302
c6e2a6c8 75303@@ -390,7 +390,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
8308f9c9
MT
75304 skb->dev = NULL;
75305
75306 if (sk_rcvqueues_full(sk, skb)) {
75307- atomic_inc(&sk->sk_drops);
75308+ atomic_inc_unchecked(&sk->sk_drops);
75309 goto discard_and_relse;
75310 }
75311 if (nested)
c6e2a6c8 75312@@ -408,7 +408,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
8308f9c9
MT
75313 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
75314 } else if (sk_add_backlog(sk, skb)) {
75315 bh_unlock_sock(sk);
75316- atomic_inc(&sk->sk_drops);
75317+ atomic_inc_unchecked(&sk->sk_drops);
75318 goto discard_and_relse;
75319 }
75320
c6e2a6c8 75321@@ -984,7 +984,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
15a11c5b
MT
75322 if (len > sizeof(peercred))
75323 len = sizeof(peercred);
75324 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
75325- if (copy_to_user(optval, &peercred, len))
75326+ if (len > sizeof(peercred) || copy_to_user(optval, &peercred, len))
75327 return -EFAULT;
75328 goto lenout;
75329 }
c6e2a6c8 75330@@ -997,7 +997,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
ae4e228f
MT
75331 return -ENOTCONN;
75332 if (lv < len)
75333 return -EINVAL;
75334- if (copy_to_user(optval, address, len))
75335+ if (len > sizeof(address) || copy_to_user(optval, address, len))
75336 return -EFAULT;
75337 goto lenout;
75338 }
c6e2a6c8 75339@@ -1043,7 +1043,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
ae4e228f
MT
75340
75341 if (len > lv)
75342 len = lv;
75343- if (copy_to_user(optval, &v, len))
75344+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
75345 return -EFAULT;
75346 lenout:
75347 if (put_user(len, optlen))
572b4308 75348@@ -2131,7 +2131,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
8308f9c9
MT
75349 */
75350 smp_wmb();
75351 atomic_set(&sk->sk_refcnt, 1);
75352- atomic_set(&sk->sk_drops, 0);
75353+ atomic_set_unchecked(&sk->sk_drops, 0);
75354 }
75355 EXPORT_SYMBOL(sock_init_data);
75356
5e856224
MT
75357diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
75358index b9868e1..849f809 100644
75359--- a/net/core/sock_diag.c
75360+++ b/net/core/sock_diag.c
75361@@ -16,20 +16,27 @@ static DEFINE_MUTEX(sock_diag_table_mutex);
75362
75363 int sock_diag_check_cookie(void *sk, __u32 *cookie)
75364 {
75365+#ifndef CONFIG_GRKERNSEC_HIDESYM
75366 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
75367 cookie[1] != INET_DIAG_NOCOOKIE) &&
75368 ((u32)(unsigned long)sk != cookie[0] ||
75369 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
75370 return -ESTALE;
75371 else
75372+#endif
75373 return 0;
75374 }
75375 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
75376
75377 void sock_diag_save_cookie(void *sk, __u32 *cookie)
75378 {
75379+#ifdef CONFIG_GRKERNSEC_HIDESYM
75380+ cookie[0] = 0;
75381+ cookie[1] = 0;
75382+#else
75383 cookie[0] = (u32)(unsigned long)sk;
75384 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
75385+#endif
75386 }
75387 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
75388
fe2de317
MT
75389diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
75390index 02e75d1..9a57a7c 100644
75391--- a/net/decnet/sysctl_net_decnet.c
75392+++ b/net/decnet/sysctl_net_decnet.c
75393@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
ae4e228f
MT
75394
75395 if (len > *lenp) len = *lenp;
75396
75397- if (copy_to_user(buffer, addr, len))
bc901d79 75398+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
ae4e228f
MT
75399 return -EFAULT;
75400
75401 *lenp = len;
fe2de317 75402@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
ae4e228f
MT
75403
75404 if (len > *lenp) len = *lenp;
75405
75406- if (copy_to_user(buffer, devname, len))
bc901d79 75407+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
ae4e228f
MT
75408 return -EFAULT;
75409
75410 *lenp = len;
fe2de317
MT
75411diff --git a/net/econet/Kconfig b/net/econet/Kconfig
75412index 39a2d29..f39c0fe 100644
75413--- a/net/econet/Kconfig
75414+++ b/net/econet/Kconfig
bc901d79
MT
75415@@ -4,7 +4,7 @@
75416
75417 config ECONET
75418 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
75419- depends on EXPERIMENTAL && INET
75420+ depends on EXPERIMENTAL && INET && BROKEN
75421 ---help---
75422 Econet is a fairly old and slow networking protocol mainly used by
75423 Acorn computers to access file and print servers. It uses native
572b4308
MT
75424diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
75425index c48adc5..667c1d4 100644
75426--- a/net/ipv4/cipso_ipv4.c
75427+++ b/net/ipv4/cipso_ipv4.c
75428@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
75429 case CIPSO_V4_TAG_LOCAL:
75430 /* This is a non-standard tag that we only allow for
75431 * local connections, so if the incoming interface is
75432- * not the loopback device drop the packet. */
75433- if (!(skb->dev->flags & IFF_LOOPBACK)) {
75434+ * not the loopback device drop the packet. Further,
75435+ * there is no legitimate reason for setting this from
75436+ * userspace so reject it if skb is NULL. */
75437+ if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
75438 err_offset = opt_iter;
75439 goto validate_return_locked;
75440 }
fe2de317 75441diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
c6e2a6c8 75442index cbe3a68..a879b75 100644
fe2de317
MT
75443--- a/net/ipv4/fib_frontend.c
75444+++ b/net/ipv4/fib_frontend.c
c6e2a6c8 75445@@ -969,12 +969,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
66a7e928
MT
75446 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75447 fib_sync_up(dev);
75448 #endif
75449- atomic_inc(&net->ipv4.dev_addr_genid);
75450+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75451 rt_cache_flush(dev_net(dev), -1);
75452 break;
75453 case NETDEV_DOWN:
75454 fib_del_ifaddr(ifa, NULL);
75455- atomic_inc(&net->ipv4.dev_addr_genid);
75456+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75457 if (ifa->ifa_dev->ifa_list == NULL) {
75458 /* Last address was deleted from this interface.
75459 * Disable IP.
c6e2a6c8 75460@@ -1010,7 +1010,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
66a7e928
MT
75461 #ifdef CONFIG_IP_ROUTE_MULTIPATH
75462 fib_sync_up(dev);
75463 #endif
75464- atomic_inc(&net->ipv4.dev_addr_genid);
75465+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
75466 rt_cache_flush(dev_net(dev), -1);
75467 break;
75468 case NETDEV_DOWN:
fe2de317 75469diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
c6e2a6c8 75470index 8861f91..ab1e3c1 100644
fe2de317
MT
75471--- a/net/ipv4/fib_semantics.c
75472+++ b/net/ipv4/fib_semantics.c
c6e2a6c8 75473@@ -698,7 +698,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
66a7e928
MT
75474 nh->nh_saddr = inet_select_addr(nh->nh_dev,
75475 nh->nh_gw,
75476 nh->nh_parent->fib_scope);
75477- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
75478+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
75479
75480 return nh->nh_saddr;
75481 }
fe2de317
MT
75482diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
75483index 984ec65..97ac518 100644
75484--- a/net/ipv4/inet_hashtables.c
75485+++ b/net/ipv4/inet_hashtables.c
15a11c5b 75486@@ -18,12 +18,15 @@
58c5fc13
MT
75487 #include <linux/sched.h>
75488 #include <linux/slab.h>
75489 #include <linux/wait.h>
75490+#include <linux/security.h>
75491
75492 #include <net/inet_connection_sock.h>
75493 #include <net/inet_hashtables.h>
15a11c5b 75494 #include <net/secure_seq.h>
58c5fc13
MT
75495 #include <net/ip.h>
75496
75497+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
75498+
75499 /*
75500 * Allocate and initialize a new local port bind bucket.
75501 * The bindhash mutex for snum's hash chain must be held here.
15a11c5b 75502@@ -530,6 +533,8 @@ ok:
ae4e228f 75503 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
58c5fc13
MT
75504 spin_unlock(&head->lock);
75505
75506+ gr_update_task_in_ip_table(current, inet_sk(sk));
75507+
75508 if (tw) {
75509 inet_twsk_deschedule(tw, death_row);
ae4e228f 75510 while (twrefcnt) {
fe2de317 75511diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
572b4308 75512index dfba343..c827d50 100644
fe2de317
MT
75513--- a/net/ipv4/inetpeer.c
75514+++ b/net/ipv4/inetpeer.c
5e856224 75515@@ -487,8 +487,8 @@ relookup:
6892158b 75516 if (p) {
16454cff 75517 p->daddr = *daddr;
6892158b
MT
75518 atomic_set(&p->refcnt, 1);
75519- atomic_set(&p->rid, 0);
6e9df6a3 75520- atomic_set(&p->ip_id_count,
6892158b 75521+ atomic_set_unchecked(&p->rid, 0);
6e9df6a3
MT
75522+ atomic_set_unchecked(&p->ip_id_count,
75523 (daddr->family == AF_INET) ?
75524 secure_ip_id(daddr->addr.a4) :
75525 secure_ipv6_id(daddr->addr.a6));
fe2de317 75526diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
c6e2a6c8 75527index 3727e23..517f5df 100644
fe2de317
MT
75528--- a/net/ipv4/ip_fragment.c
75529+++ b/net/ipv4/ip_fragment.c
c6e2a6c8 75530@@ -318,7 +318,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
6892158b
MT
75531 return 0;
75532
75533 start = qp->rid;
75534- end = atomic_inc_return(&peer->rid);
75535+ end = atomic_inc_return_unchecked(&peer->rid);
75536 qp->rid = end;
75537
75538 rc = qp->q.fragments && (end - start) > max;
fe2de317 75539diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
c6e2a6c8 75540index 2fd0fba..83fac99 100644
fe2de317
MT
75541--- a/net/ipv4/ip_sockglue.c
75542+++ b/net/ipv4/ip_sockglue.c
c6e2a6c8 75543@@ -1137,7 +1137,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
15a11c5b
MT
75544 len = min_t(unsigned int, len, opt->optlen);
75545 if (put_user(len, optlen))
75546 return -EFAULT;
75547- if (copy_to_user(optval, opt->__data, len))
75548+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
75549+ copy_to_user(optval, opt->__data, len))
75550 return -EFAULT;
75551 return 0;
75552 }
c6e2a6c8 75553@@ -1268,7 +1269,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
6e9df6a3
MT
75554 if (sk->sk_type != SOCK_STREAM)
75555 return -ENOPROTOOPT;
75556
75557- msg.msg_control = optval;
75558+ msg.msg_control = (void __force_kernel *)optval;
75559 msg.msg_controllen = len;
75560 msg.msg_flags = flags;
75561
fe2de317 75562diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
c6e2a6c8 75563index 92ac7e7..13f93d9 100644
fe2de317
MT
75564--- a/net/ipv4/ipconfig.c
75565+++ b/net/ipv4/ipconfig.c
c6e2a6c8 75566@@ -321,7 +321,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
fe2de317
MT
75567
75568 mm_segment_t oldfs = get_fs();
75569 set_fs(get_ds());
75570- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75571+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75572 set_fs(oldfs);
75573 return res;
75574 }
c6e2a6c8 75575@@ -332,7 +332,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
fe2de317
MT
75576
75577 mm_segment_t oldfs = get_fs();
75578 set_fs(get_ds());
75579- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
75580+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
75581 set_fs(oldfs);
75582 return res;
75583 }
c6e2a6c8 75584@@ -343,7 +343,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
fe2de317
MT
75585
75586 mm_segment_t oldfs = get_fs();
75587 set_fs(get_ds());
75588- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
75589+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
75590 set_fs(oldfs);
75591 return res;
75592 }
fe2de317 75593diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
c6e2a6c8 75594index 50009c7..5996a9f 100644
fe2de317
MT
75595--- a/net/ipv4/ping.c
75596+++ b/net/ipv4/ping.c
5e856224 75597@@ -838,7 +838,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
15a11c5b
MT
75598 sk_rmem_alloc_get(sp),
75599 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75600 atomic_read(&sp->sk_refcnt), sp,
75601- atomic_read(&sp->sk_drops), len);
75602+ atomic_read_unchecked(&sp->sk_drops), len);
75603 }
75604
75605 static int ping_seq_show(struct seq_file *seq, void *v)
fe2de317 75606diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
c6e2a6c8 75607index bbd604c..4d5469c 100644
fe2de317
MT
75608--- a/net/ipv4/raw.c
75609+++ b/net/ipv4/raw.c
5e856224 75610@@ -304,7 +304,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
8308f9c9
MT
75611 int raw_rcv(struct sock *sk, struct sk_buff *skb)
75612 {
75613 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
75614- atomic_inc(&sk->sk_drops);
75615+ atomic_inc_unchecked(&sk->sk_drops);
75616 kfree_skb(skb);
75617 return NET_RX_DROP;
75618 }
c6e2a6c8 75619@@ -740,16 +740,20 @@ static int raw_init(struct sock *sk)
71d190be
MT
75620
75621 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
75622 {
75623+ struct icmp_filter filter;
75624+
75625 if (optlen > sizeof(struct icmp_filter))
75626 optlen = sizeof(struct icmp_filter);
75627- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
75628+ if (copy_from_user(&filter, optval, optlen))
75629 return -EFAULT;
15a11c5b 75630+ raw_sk(sk)->filter = filter;
71d190be
MT
75631 return 0;
75632 }
75633
75634 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
75635 {
71d190be 75636 int len, ret = -EFAULT;
15a11c5b 75637+ struct icmp_filter filter;
71d190be
MT
75638
75639 if (get_user(len, optlen))
15a11c5b 75640 goto out;
c6e2a6c8 75641@@ -759,8 +763,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
71d190be
MT
75642 if (len > sizeof(struct icmp_filter))
75643 len = sizeof(struct icmp_filter);
75644 ret = -EFAULT;
15a11c5b 75645- if (put_user(len, optlen) ||
71d190be 75646- copy_to_user(optval, &raw_sk(sk)->filter, len))
15a11c5b 75647+ filter = raw_sk(sk)->filter;
6e9df6a3 75648+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
71d190be
MT
75649 goto out;
75650 ret = 0;
75651 out: return ret;
c6e2a6c8 75652@@ -988,7 +992,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
8308f9c9
MT
75653 sk_wmem_alloc_get(sp),
75654 sk_rmem_alloc_get(sp),
75655 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75656- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66a7e928
MT
75657+ atomic_read(&sp->sk_refcnt),
75658+#ifdef CONFIG_GRKERNSEC_HIDESYM
75659+ NULL,
75660+#else
75661+ sp,
75662+#endif
75663+ atomic_read_unchecked(&sp->sk_drops));
8308f9c9
MT
75664 }
75665
75666 static int raw_seq_show(struct seq_file *seq, void *v)
fe2de317 75667diff --git a/net/ipv4/route.c b/net/ipv4/route.c
c6e2a6c8 75668index 167ea10..4b15883 100644
fe2de317
MT
75669--- a/net/ipv4/route.c
75670+++ b/net/ipv4/route.c
c6e2a6c8 75671@@ -312,7 +312,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
8308f9c9
MT
75672
75673 static inline int rt_genid(struct net *net)
75674 {
75675- return atomic_read(&net->ipv4.rt_genid);
75676+ return atomic_read_unchecked(&net->ipv4.rt_genid);
75677 }
75678
75679 #ifdef CONFIG_PROC_FS
c6e2a6c8 75680@@ -936,7 +936,7 @@ static void rt_cache_invalidate(struct net *net)
8308f9c9
MT
75681 unsigned char shuffle;
75682
75683 get_random_bytes(&shuffle, sizeof(shuffle));
75684- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
75685+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
5e856224 75686 inetpeer_invalidate_tree(AF_INET);
8308f9c9
MT
75687 }
75688
c6e2a6c8 75689@@ -3009,7 +3009,7 @@ static int rt_fill_info(struct net *net,
15a11c5b
MT
75690 error = rt->dst.error;
75691 if (peer) {
6892158b 75692 inet_peer_refcheck(rt->peer);
15a11c5b
MT
75693- id = atomic_read(&peer->ip_id_count) & 0xffff;
75694+ id = atomic_read_unchecked(&peer->ip_id_count) & 0xffff;
75695 if (peer->tcp_ts_stamp) {
75696 ts = peer->tcp_ts;
75697 tsage = get_seconds() - peer->tcp_ts_stamp;
fe2de317 75698diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
c6e2a6c8 75699index 0cb86ce..8e7fda8 100644
fe2de317
MT
75700--- a/net/ipv4/tcp_ipv4.c
75701+++ b/net/ipv4/tcp_ipv4.c
c6e2a6c8 75702@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
6892158b 75703 EXPORT_SYMBOL(sysctl_tcp_low_latency);
58c5fc13 75704
c6e2a6c8 75705
58c5fc13 75706+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
ae4e228f 75707+extern int grsec_enable_blackhole;
58c5fc13 75708+#endif
c6e2a6c8 75709+
ae4e228f 75710 #ifdef CONFIG_TCP_MD5SIG
c6e2a6c8
MT
75711 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
75712 __be32 daddr, __be32 saddr, const struct tcphdr *th);
75713@@ -1641,6 +1645,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
57199397
MT
75714 return 0;
75715
75716 reset:
75717+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75718+ if (!grsec_enable_blackhole)
75719+#endif
75720 tcp_v4_send_reset(rsk, skb);
75721 discard:
75722 kfree_skb(skb);
c6e2a6c8 75723@@ -1703,12 +1710,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
ae4e228f
MT
75724 TCP_SKB_CB(skb)->sacked = 0;
75725
75726 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
75727- if (!sk)
75728+ if (!sk) {
75729+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75730+ ret = 1;
75731+#endif
75732 goto no_tcp_socket;
df50ba0c 75733-
ae4e228f 75734+ }
ae4e228f
MT
75735 process:
75736- if (sk->sk_state == TCP_TIME_WAIT)
75737+ if (sk->sk_state == TCP_TIME_WAIT) {
75738+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75739+ ret = 2;
75740+#endif
75741 goto do_time_wait;
75742+ }
75743
df50ba0c
MT
75744 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
75745 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
c6e2a6c8 75746@@ -1758,6 +1772,10 @@ no_tcp_socket:
58c5fc13
MT
75747 bad_packet:
75748 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
75749 } else {
75750+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
ae4e228f
MT
75751+ if (!grsec_enable_blackhole || (ret == 1 &&
75752+ (skb->dev->flags & IFF_LOOPBACK)))
58c5fc13
MT
75753+#endif
75754 tcp_v4_send_reset(NULL, skb);
75755 }
75756
c6e2a6c8 75757@@ -2419,7 +2437,11 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
57199397
MT
75758 0, /* non standard timer */
75759 0, /* open_requests have no inode */
75760 atomic_read(&sk->sk_refcnt),
75761+#ifdef CONFIG_GRKERNSEC_HIDESYM
75762+ NULL,
75763+#else
75764 req,
75765+#endif
75766 len);
75767 }
75768
c6e2a6c8 75769@@ -2469,7 +2491,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
57199397
MT
75770 sock_i_uid(sk),
75771 icsk->icsk_probes_out,
75772 sock_i_ino(sk),
75773- atomic_read(&sk->sk_refcnt), sk,
75774+ atomic_read(&sk->sk_refcnt),
75775+#ifdef CONFIG_GRKERNSEC_HIDESYM
75776+ NULL,
75777+#else
75778+ sk,
75779+#endif
75780 jiffies_to_clock_t(icsk->icsk_rto),
75781 jiffies_to_clock_t(icsk->icsk_ack.ato),
75782 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
c6e2a6c8 75783@@ -2497,7 +2524,13 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
15a11c5b 75784 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
57199397
MT
75785 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
75786 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
75787- atomic_read(&tw->tw_refcnt), tw, len);
75788+ atomic_read(&tw->tw_refcnt),
75789+#ifdef CONFIG_GRKERNSEC_HIDESYM
75790+ NULL,
75791+#else
75792+ tw,
75793+#endif
75794+ len);
75795 }
75796
75797 #define TMPSZ 150
fe2de317 75798diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
c6e2a6c8 75799index 3cabafb..640525b 100644
fe2de317
MT
75800--- a/net/ipv4/tcp_minisocks.c
75801+++ b/net/ipv4/tcp_minisocks.c
df50ba0c 75802@@ -27,6 +27,10 @@
ae4e228f
MT
75803 #include <net/inet_common.h>
75804 #include <net/xfrm.h>
75805
75806+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75807+extern int grsec_enable_blackhole;
75808+#endif
75809+
75810 int sysctl_tcp_syncookies __read_mostly = 1;
75811 EXPORT_SYMBOL(sysctl_tcp_syncookies);
75812
5e856224 75813@@ -753,6 +757,10 @@ listen_overflow:
58c5fc13
MT
75814
75815 embryonic_reset:
75816 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
75817+
df50ba0c
MT
75818+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75819+ if (!grsec_enable_blackhole)
58c5fc13 75820+#endif
df50ba0c
MT
75821 if (!(flg & TCP_FLAG_RST))
75822 req->rsk_ops->send_reset(sk, skb);
58c5fc13 75823
fe2de317 75824diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
c6e2a6c8 75825index a981cdc..48f4c3a 100644
fe2de317
MT
75826--- a/net/ipv4/tcp_probe.c
75827+++ b/net/ipv4/tcp_probe.c
c6e2a6c8 75828@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
ae4e228f
MT
75829 if (cnt + width >= len)
75830 break;
75831
75832- if (copy_to_user(buf + cnt, tbuf, width))
bc901d79 75833+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
ae4e228f
MT
75834 return -EFAULT;
75835 cnt += width;
75836 }
fe2de317 75837diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
c6e2a6c8 75838index 34d4a02..3b57f86 100644
fe2de317
MT
75839--- a/net/ipv4/tcp_timer.c
75840+++ b/net/ipv4/tcp_timer.c
df50ba0c
MT
75841@@ -22,6 +22,10 @@
75842 #include <linux/gfp.h>
ae4e228f
MT
75843 #include <net/tcp.h>
75844
75845+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75846+extern int grsec_lastack_retries;
75847+#endif
75848+
75849 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
75850 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
75851 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
5e856224 75852@@ -196,6 +200,13 @@ static int tcp_write_timeout(struct sock *sk)
ae4e228f
MT
75853 }
75854 }
75855
75856+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75857+ if ((sk->sk_state == TCP_LAST_ACK) &&
75858+ (grsec_lastack_retries > 0) &&
75859+ (grsec_lastack_retries < retry_until))
75860+ retry_until = grsec_lastack_retries;
75861+#endif
75862+
bc901d79
MT
75863 if (retransmits_timed_out(sk, retry_until,
75864 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
ae4e228f 75865 /* Has it gone just too far? */
fe2de317 75866diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
c6e2a6c8 75867index fe14105..0618260 100644
fe2de317
MT
75868--- a/net/ipv4/udp.c
75869+++ b/net/ipv4/udp.c
c6e2a6c8 75870@@ -87,6 +87,7 @@
58c5fc13
MT
75871 #include <linux/types.h>
75872 #include <linux/fcntl.h>
75873 #include <linux/module.h>
75874+#include <linux/security.h>
75875 #include <linux/socket.h>
75876 #include <linux/sockios.h>
75877 #include <linux/igmp.h>
c6e2a6c8 75878@@ -109,6 +110,10 @@
6e9df6a3 75879 #include <trace/events/udp.h>
ae4e228f
MT
75880 #include "udp_impl.h"
75881
75882+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
75883+extern int grsec_enable_blackhole;
75884+#endif
75885+
75886 struct udp_table udp_table __read_mostly;
75887 EXPORT_SYMBOL(udp_table);
75888
c6e2a6c8 75889@@ -567,6 +572,9 @@ found:
58c5fc13
MT
75890 return s;
75891 }
75892
75893+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
75894+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
75895+
75896 /*
75897 * This routine is called by the ICMP module when it gets some
75898 * sort of error condition. If err < 0 then the socket should
c6e2a6c8 75899@@ -858,9 +866,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
58c5fc13
MT
75900 dport = usin->sin_port;
75901 if (dport == 0)
75902 return -EINVAL;
75903+
75904+ err = gr_search_udp_sendmsg(sk, usin);
75905+ if (err)
75906+ return err;
75907 } else {
75908 if (sk->sk_state != TCP_ESTABLISHED)
75909 return -EDESTADDRREQ;
75910+
75911+ err = gr_search_udp_sendmsg(sk, NULL);
75912+ if (err)
75913+ return err;
75914+
ae4e228f
MT
75915 daddr = inet->inet_daddr;
75916 dport = inet->inet_dport;
58c5fc13 75917 /* Open fast path for connected socket.
c6e2a6c8 75918@@ -1102,7 +1119,7 @@ static unsigned int first_packet_length(struct sock *sk)
8308f9c9
MT
75919 udp_lib_checksum_complete(skb)) {
75920 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
75921 IS_UDPLITE(sk));
75922- atomic_inc(&sk->sk_drops);
75923+ atomic_inc_unchecked(&sk->sk_drops);
75924 __skb_unlink(skb, rcvq);
75925 __skb_queue_tail(&list_kill, skb);
75926 }
c6e2a6c8 75927@@ -1188,6 +1205,10 @@ try_again:
58c5fc13
MT
75928 if (!skb)
75929 goto out;
75930
75931+ err = gr_search_udp_recvmsg(sk, skb);
75932+ if (err)
75933+ goto out_free;
75934+
75935 ulen = skb->len - sizeof(struct udphdr);
4c928ab7
MT
75936 copied = len;
75937 if (copied > ulen)
5e856224 75938@@ -1489,7 +1510,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
75939
75940 drop:
75941 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
75942- atomic_inc(&sk->sk_drops);
75943+ atomic_inc_unchecked(&sk->sk_drops);
75944 kfree_skb(skb);
75945 return -1;
75946 }
5e856224 75947@@ -1508,7 +1529,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
8308f9c9
MT
75948 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
75949
75950 if (!skb1) {
75951- atomic_inc(&sk->sk_drops);
75952+ atomic_inc_unchecked(&sk->sk_drops);
75953 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
75954 IS_UDPLITE(sk));
75955 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
5e856224 75956@@ -1677,6 +1698,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
58c5fc13
MT
75957 goto csum_error;
75958
75959 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
75960+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
ae4e228f 75961+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
58c5fc13
MT
75962+#endif
75963 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
75964
75965 /*
c6e2a6c8 75966@@ -2094,8 +2118,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
57199397
MT
75967 sk_wmem_alloc_get(sp),
75968 sk_rmem_alloc_get(sp),
75969 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
75970- atomic_read(&sp->sk_refcnt), sp,
8308f9c9 75971- atomic_read(&sp->sk_drops), len);
57199397
MT
75972+ atomic_read(&sp->sk_refcnt),
75973+#ifdef CONFIG_GRKERNSEC_HIDESYM
75974+ NULL,
75975+#else
75976+ sp,
75977+#endif
8308f9c9 75978+ atomic_read_unchecked(&sp->sk_drops), len);
57199397
MT
75979 }
75980
8308f9c9 75981 int udp4_seq_show(struct seq_file *seq, void *v)
fe2de317 75982diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
c6e2a6c8 75983index 7d5cb97..c56564f 100644
fe2de317
MT
75984--- a/net/ipv6/addrconf.c
75985+++ b/net/ipv6/addrconf.c
c6e2a6c8 75986@@ -2142,7 +2142,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
6e9df6a3
MT
75987 p.iph.ihl = 5;
75988 p.iph.protocol = IPPROTO_IPV6;
75989 p.iph.ttl = 64;
75990- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
75991+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
75992
75993 if (ops->ndo_do_ioctl) {
75994 mm_segment_t oldfs = get_fs();
fe2de317 75995diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
5e856224 75996index 02dd203..e03fcc9 100644
fe2de317
MT
75997--- a/net/ipv6/inet6_connection_sock.c
75998+++ b/net/ipv6/inet6_connection_sock.c
75999@@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
8308f9c9
MT
76000 #ifdef CONFIG_XFRM
76001 {
76002 struct rt6_info *rt = (struct rt6_info *)dst;
76003- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
76004+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
76005 }
76006 #endif
76007 }
fe2de317 76008@@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
8308f9c9
MT
76009 #ifdef CONFIG_XFRM
76010 if (dst) {
76011 struct rt6_info *rt = (struct rt6_info *)dst;
76012- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
76013+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
76014 __sk_dst_reset(sk);
76015 dst = NULL;
76016 }
fe2de317 76017diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
c6e2a6c8 76018index 63dd1f8..e7f53ca 100644
fe2de317
MT
76019--- a/net/ipv6/ipv6_sockglue.c
76020+++ b/net/ipv6/ipv6_sockglue.c
c6e2a6c8 76021@@ -990,7 +990,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
6e9df6a3
MT
76022 if (sk->sk_type != SOCK_STREAM)
76023 return -ENOPROTOOPT;
76024
76025- msg.msg_control = optval;
76026+ msg.msg_control = (void __force_kernel *)optval;
76027 msg.msg_controllen = len;
76028 msg.msg_flags = flags;
76029
fe2de317 76030diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
c6e2a6c8 76031index 5bddea7..82d9d67 100644
fe2de317
MT
76032--- a/net/ipv6/raw.c
76033+++ b/net/ipv6/raw.c
4c928ab7 76034@@ -377,7 +377,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
8308f9c9 76035 {
4c928ab7 76036 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
8308f9c9
MT
76037 skb_checksum_complete(skb)) {
76038- atomic_inc(&sk->sk_drops);
76039+ atomic_inc_unchecked(&sk->sk_drops);
76040 kfree_skb(skb);
76041 return NET_RX_DROP;
76042 }
5e856224 76043@@ -405,7 +405,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
76044 struct raw6_sock *rp = raw6_sk(sk);
76045
76046 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
76047- atomic_inc(&sk->sk_drops);
76048+ atomic_inc_unchecked(&sk->sk_drops);
76049 kfree_skb(skb);
76050 return NET_RX_DROP;
76051 }
5e856224 76052@@ -429,7 +429,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
8308f9c9
MT
76053
76054 if (inet->hdrincl) {
76055 if (skb_checksum_complete(skb)) {
76056- atomic_inc(&sk->sk_drops);
76057+ atomic_inc_unchecked(&sk->sk_drops);
76058 kfree_skb(skb);
76059 return NET_RX_DROP;
76060 }
5e856224 76061@@ -602,7 +602,7 @@ out:
58c5fc13
MT
76062 return err;
76063 }
76064
76065-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
76066+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
66a7e928 76067 struct flowi6 *fl6, struct dst_entry **dstp,
58c5fc13
MT
76068 unsigned int flags)
76069 {
c6e2a6c8 76070@@ -914,12 +914,15 @@ do_confirm:
71d190be
MT
76071 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
76072 char __user *optval, int optlen)
76073 {
76074+ struct icmp6_filter filter;
76075+
76076 switch (optname) {
76077 case ICMPV6_FILTER:
76078 if (optlen > sizeof(struct icmp6_filter))
76079 optlen = sizeof(struct icmp6_filter);
76080- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
76081+ if (copy_from_user(&filter, optval, optlen))
76082 return -EFAULT;
15a11c5b 76083+ raw6_sk(sk)->filter = filter;
71d190be
MT
76084 return 0;
76085 default:
76086 return -ENOPROTOOPT;
c6e2a6c8 76087@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
71d190be
MT
76088 char __user *optval, int __user *optlen)
76089 {
71d190be 76090 int len;
15a11c5b 76091+ struct icmp6_filter filter;
71d190be
MT
76092
76093 switch (optname) {
15a11c5b 76094 case ICMPV6_FILTER:
c6e2a6c8 76095@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
71d190be
MT
76096 len = sizeof(struct icmp6_filter);
76097 if (put_user(len, optlen))
76098 return -EFAULT;
76099- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
15a11c5b
MT
76100+ filter = raw6_sk(sk)->filter;
76101+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
71d190be
MT
76102 return -EFAULT;
76103 return 0;
76104 default:
c6e2a6c8 76105@@ -1250,7 +1255,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
6892158b
MT
76106 0, 0L, 0,
76107 sock_i_uid(sp), 0,
76108 sock_i_ino(sp),
76109- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
76110+ atomic_read(&sp->sk_refcnt),
76111+#ifdef CONFIG_GRKERNSEC_HIDESYM
76112+ NULL,
76113+#else
76114+ sp,
76115+#endif
8308f9c9 76116+ atomic_read_unchecked(&sp->sk_drops));
6892158b
MT
76117 }
76118
76119 static int raw6_seq_show(struct seq_file *seq, void *v)
fe2de317 76120diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
c6e2a6c8 76121index 98256cf..7f16dbd 100644
fe2de317
MT
76122--- a/net/ipv6/tcp_ipv6.c
76123+++ b/net/ipv6/tcp_ipv6.c
5e856224 76124@@ -94,6 +94,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
df50ba0c
MT
76125 }
76126 #endif
76127
76128+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76129+extern int grsec_enable_blackhole;
76130+#endif
76131+
76132 static void tcp_v6_hash(struct sock *sk)
76133 {
76134 if (sk->sk_state != TCP_CLOSE) {
c6e2a6c8 76135@@ -1542,6 +1546,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
57199397
MT
76136 return 0;
76137
76138 reset:
76139+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76140+ if (!grsec_enable_blackhole)
76141+#endif
76142 tcp_v6_send_reset(sk, skb);
76143 discard:
76144 if (opt_skb)
c6e2a6c8 76145@@ -1623,12 +1630,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
df50ba0c 76146 TCP_SKB_CB(skb)->sacked = 0;
58c5fc13 76147
df50ba0c
MT
76148 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
76149- if (!sk)
76150+ if (!sk) {
76151+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76152+ ret = 1;
76153+#endif
76154 goto no_tcp_socket;
76155+ }
76156
76157 process:
76158- if (sk->sk_state == TCP_TIME_WAIT)
76159+ if (sk->sk_state == TCP_TIME_WAIT) {
58c5fc13 76160+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
df50ba0c 76161+ ret = 2;
58c5fc13 76162+#endif
df50ba0c
MT
76163 goto do_time_wait;
76164+ }
76165
57199397
MT
76166 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
76167 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
c6e2a6c8 76168@@ -1676,6 +1691,10 @@ no_tcp_socket:
58c5fc13
MT
76169 bad_packet:
76170 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
76171 } else {
76172+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
df50ba0c
MT
76173+ if (!grsec_enable_blackhole || (ret == 1 &&
76174+ (skb->dev->flags & IFF_LOOPBACK)))
58c5fc13
MT
76175+#endif
76176 tcp_v6_send_reset(NULL, skb);
76177 }
76178
c6e2a6c8 76179@@ -1930,7 +1949,13 @@ static void get_openreq6(struct seq_file *seq,
6892158b
MT
76180 uid,
76181 0, /* non standard timer */
76182 0, /* open_requests have no inode */
76183- 0, req);
76184+ 0,
76185+#ifdef CONFIG_GRKERNSEC_HIDESYM
76186+ NULL
76187+#else
76188+ req
76189+#endif
76190+ );
76191 }
76192
76193 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
c6e2a6c8 76194@@ -1980,7 +2005,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
6892158b
MT
76195 sock_i_uid(sp),
76196 icsk->icsk_probes_out,
76197 sock_i_ino(sp),
76198- atomic_read(&sp->sk_refcnt), sp,
76199+ atomic_read(&sp->sk_refcnt),
76200+#ifdef CONFIG_GRKERNSEC_HIDESYM
76201+ NULL,
76202+#else
76203+ sp,
76204+#endif
76205 jiffies_to_clock_t(icsk->icsk_rto),
76206 jiffies_to_clock_t(icsk->icsk_ack.ato),
76207 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
c6e2a6c8 76208@@ -2015,7 +2045,13 @@ static void get_timewait6_sock(struct seq_file *seq,
6892158b
MT
76209 dest->s6_addr32[2], dest->s6_addr32[3], destp,
76210 tw->tw_substate, 0, 0,
76211 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
76212- atomic_read(&tw->tw_refcnt), tw);
76213+ atomic_read(&tw->tw_refcnt),
76214+#ifdef CONFIG_GRKERNSEC_HIDESYM
76215+ NULL
76216+#else
76217+ tw
76218+#endif
76219+ );
76220 }
76221
76222 static int tcp6_seq_show(struct seq_file *seq, void *v)
fe2de317 76223diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
c6e2a6c8 76224index 37b0699..d323408 100644
fe2de317
MT
76225--- a/net/ipv6/udp.c
76226+++ b/net/ipv6/udp.c
df50ba0c
MT
76227@@ -50,6 +50,10 @@
76228 #include <linux/seq_file.h>
76229 #include "udp_impl.h"
76230
76231+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
76232+extern int grsec_enable_blackhole;
76233+#endif
76234+
76235 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
76236 {
76237 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
5e856224 76238@@ -551,7 +555,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
8308f9c9
MT
76239
76240 return 0;
76241 drop:
76242- atomic_inc(&sk->sk_drops);
76243+ atomic_inc_unchecked(&sk->sk_drops);
76244 drop_no_sk_drops_inc:
76245 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
76246 kfree_skb(skb);
5e856224 76247@@ -627,7 +631,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
8308f9c9
MT
76248 continue;
76249 }
76250 drop:
76251- atomic_inc(&sk->sk_drops);
76252+ atomic_inc_unchecked(&sk->sk_drops);
76253 UDP6_INC_STATS_BH(sock_net(sk),
76254 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
76255 UDP6_INC_STATS_BH(sock_net(sk),
5e856224 76256@@ -782,6 +786,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
58c5fc13
MT
76257 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
76258 proto == IPPROTO_UDPLITE);
76259
76260+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
df50ba0c 76261+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
58c5fc13 76262+#endif
df50ba0c 76263 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
58c5fc13
MT
76264
76265 kfree_skb(skb);
5e856224 76266@@ -798,7 +805,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
8308f9c9
MT
76267 if (!sock_owned_by_user(sk))
76268 udpv6_queue_rcv_skb(sk, skb);
76269 else if (sk_add_backlog(sk, skb)) {
76270- atomic_inc(&sk->sk_drops);
76271+ atomic_inc_unchecked(&sk->sk_drops);
76272 bh_unlock_sock(sk);
76273 sock_put(sk);
76274 goto discard;
c6e2a6c8 76275@@ -1411,8 +1418,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
6892158b
MT
76276 0, 0L, 0,
76277 sock_i_uid(sp), 0,
76278 sock_i_ino(sp),
76279- atomic_read(&sp->sk_refcnt), sp,
8308f9c9 76280- atomic_read(&sp->sk_drops));
6892158b
MT
76281+ atomic_read(&sp->sk_refcnt),
76282+#ifdef CONFIG_GRKERNSEC_HIDESYM
76283+ NULL,
76284+#else
76285+ sp,
76286+#endif
8308f9c9 76287+ atomic_read_unchecked(&sp->sk_drops));
6892158b
MT
76288 }
76289
8308f9c9 76290 int udp6_seq_show(struct seq_file *seq, void *v)
fe2de317 76291diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
c6e2a6c8 76292index 6b9d5a0..4dffaf1 100644
fe2de317
MT
76293--- a/net/irda/ircomm/ircomm_tty.c
76294+++ b/net/irda/ircomm/ircomm_tty.c
c6e2a6c8 76295@@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
58c5fc13
MT
76296 add_wait_queue(&self->open_wait, &wait);
76297
76298 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
76299- __FILE__,__LINE__, tty->driver->name, self->open_count );
c52201e0 76300+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
58c5fc13
MT
76301
76302 /* As far as I can see, we protect open_count - Jean II */
76303 spin_lock_irqsave(&self->spinlock, flags);
76304 if (!tty_hung_up_p(filp)) {
76305 extra_count = 1;
76306- self->open_count--;
c52201e0 76307+ local_dec(&self->open_count);
58c5fc13
MT
76308 }
76309 spin_unlock_irqrestore(&self->spinlock, flags);
76310- self->blocked_open++;
c52201e0 76311+ local_inc(&self->blocked_open);
58c5fc13
MT
76312
76313 while (1) {
76314 if (tty->termios->c_cflag & CBAUD) {
c6e2a6c8 76315@@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
58c5fc13
MT
76316 }
76317
76318 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
76319- __FILE__,__LINE__, tty->driver->name, self->open_count );
c52201e0 76320+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
58c5fc13
MT
76321
76322 schedule();
76323 }
c6e2a6c8 76324@@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
58c5fc13
MT
76325 if (extra_count) {
76326 /* ++ is not atomic, so this should be protected - Jean II */
76327 spin_lock_irqsave(&self->spinlock, flags);
76328- self->open_count++;
c52201e0 76329+ local_inc(&self->open_count);
58c5fc13
MT
76330 spin_unlock_irqrestore(&self->spinlock, flags);
76331 }
76332- self->blocked_open--;
c52201e0 76333+ local_dec(&self->blocked_open);
58c5fc13
MT
76334
76335 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
76336- __FILE__,__LINE__, tty->driver->name, self->open_count);
c52201e0 76337+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
58c5fc13
MT
76338
76339 if (!retval)
76340 self->flags |= ASYNC_NORMAL_ACTIVE;
c6e2a6c8 76341@@ -412,14 +412,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
76342 }
76343 /* ++ is not atomic, so this should be protected - Jean II */
76344 spin_lock_irqsave(&self->spinlock, flags);
76345- self->open_count++;
c52201e0 76346+ local_inc(&self->open_count);
58c5fc13
MT
76347
76348 tty->driver_data = self;
76349 self->tty = tty;
76350 spin_unlock_irqrestore(&self->spinlock, flags);
76351
76352 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
76353- self->line, self->open_count);
c52201e0 76354+ self->line, local_read(&self->open_count));
58c5fc13
MT
76355
76356 /* Not really used by us, but lets do it anyway */
76357 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
c6e2a6c8 76358@@ -505,7 +505,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
76359 return;
76360 }
76361
76362- if ((tty->count == 1) && (self->open_count != 1)) {
c52201e0 76363+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
58c5fc13
MT
76364 /*
76365 * Uh, oh. tty->count is 1, which means that the tty
76366 * structure will be freed. state->count should always
c6e2a6c8 76367@@ -515,16 +515,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
76368 */
76369 IRDA_DEBUG(0, "%s(), bad serial port count; "
76370 "tty->count is 1, state->count is %d\n", __func__ ,
76371- self->open_count);
76372- self->open_count = 1;
c52201e0
MT
76373+ local_read(&self->open_count));
76374+ local_set(&self->open_count, 1);
58c5fc13
MT
76375 }
76376
76377- if (--self->open_count < 0) {
c52201e0 76378+ if (local_dec_return(&self->open_count) < 0) {
58c5fc13
MT
76379 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
76380- __func__, self->line, self->open_count);
76381- self->open_count = 0;
c52201e0
MT
76382+ __func__, self->line, local_read(&self->open_count));
76383+ local_set(&self->open_count, 0);
58c5fc13
MT
76384 }
76385- if (self->open_count) {
c52201e0 76386+ if (local_read(&self->open_count)) {
58c5fc13
MT
76387 spin_unlock_irqrestore(&self->spinlock, flags);
76388
76389 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
c6e2a6c8 76390@@ -556,7 +556,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
58c5fc13
MT
76391 tty->closing = 0;
76392 self->tty = NULL;
76393
76394- if (self->blocked_open) {
c52201e0 76395+ if (local_read(&self->blocked_open)) {
58c5fc13
MT
76396 if (self->close_delay)
76397 schedule_timeout_interruptible(self->close_delay);
76398 wake_up_interruptible(&self->open_wait);
c6e2a6c8 76399@@ -1008,7 +1008,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
58c5fc13
MT
76400 spin_lock_irqsave(&self->spinlock, flags);
76401 self->flags &= ~ASYNC_NORMAL_ACTIVE;
76402 self->tty = NULL;
76403- self->open_count = 0;
c52201e0 76404+ local_set(&self->open_count, 0);
58c5fc13
MT
76405 spin_unlock_irqrestore(&self->spinlock, flags);
76406
76407 wake_up_interruptible(&self->open_wait);
c6e2a6c8 76408@@ -1355,7 +1355,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
58c5fc13
MT
76409 seq_putc(m, '\n');
76410
76411 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
76412- seq_printf(m, "Open count: %d\n", self->open_count);
c52201e0 76413+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
58c5fc13
MT
76414 seq_printf(m, "Max data size: %d\n", self->max_data_size);
76415 seq_printf(m, "Max header size: %d\n", self->max_header_size);
76416
fe2de317 76417diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
572b4308 76418index cd6f7a9..e63fe89 100644
fe2de317
MT
76419--- a/net/iucv/af_iucv.c
76420+++ b/net/iucv/af_iucv.c
572b4308 76421@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
8308f9c9
MT
76422
76423 write_lock_bh(&iucv_sk_list.lock);
76424
76425- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
76426+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76427 while (__iucv_get_sock_by_name(name)) {
76428 sprintf(name, "%08x",
76429- atomic_inc_return(&iucv_sk_list.autobind_name));
76430+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
76431 }
76432
76433 write_unlock_bh(&iucv_sk_list.lock);
fe2de317 76434diff --git a/net/key/af_key.c b/net/key/af_key.c
c6e2a6c8 76435index 7e5d927..cdbb54e 100644
fe2de317
MT
76436--- a/net/key/af_key.c
76437+++ b/net/key/af_key.c
4c928ab7 76438@@ -3016,10 +3016,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
8308f9c9
MT
76439 static u32 get_acqseq(void)
76440 {
76441 u32 res;
76442- static atomic_t acqseq;
76443+ static atomic_unchecked_t acqseq;
76444
76445 do {
76446- res = atomic_inc_return(&acqseq);
76447+ res = atomic_inc_return_unchecked(&acqseq);
76448 } while (!res);
76449 return res;
76450 }
fe2de317 76451diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
c6e2a6c8 76452index db8fae5..ff070cd 100644
fe2de317
MT
76453--- a/net/mac80211/ieee80211_i.h
76454+++ b/net/mac80211/ieee80211_i.h
5e856224 76455@@ -28,6 +28,7 @@
c52201e0
MT
76456 #include <net/ieee80211_radiotap.h>
76457 #include <net/cfg80211.h>
76458 #include <net/mac80211.h>
76459+#include <asm/local.h>
76460 #include "key.h"
76461 #include "sta_info.h"
76462
c6e2a6c8 76463@@ -842,7 +843,7 @@ struct ieee80211_local {
ae4e228f 76464 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
58c5fc13
MT
76465 spinlock_t queue_stop_reason_lock;
76466
58c5fc13 76467- int open_count;
c52201e0 76468+ local_t open_count;
58c5fc13
MT
76469 int monitors, cooked_mntrs;
76470 /* number of interfaces with corresponding FIF_ flags */
bc901d79 76471 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
fe2de317 76472diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
c6e2a6c8 76473index 48f937e..4ccd7b8 100644
fe2de317
MT
76474--- a/net/mac80211/iface.c
76475+++ b/net/mac80211/iface.c
5e856224 76476@@ -222,7 +222,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
58c5fc13
MT
76477 break;
76478 }
76479
76480- if (local->open_count == 0) {
c52201e0 76481+ if (local_read(&local->open_count) == 0) {
58c5fc13
MT
76482 res = drv_start(local);
76483 if (res)
76484 goto err_del_bss;
5e856224 76485@@ -246,7 +246,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
bc901d79
MT
76486 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
76487
76488 if (!is_valid_ether_addr(dev->dev_addr)) {
76489- if (!local->open_count)
c52201e0 76490+ if (!local_read(&local->open_count))
bc901d79
MT
76491 drv_stop(local);
76492 return -EADDRNOTAVAIL;
76493 }
5e856224 76494@@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
bc901d79 76495 mutex_unlock(&local->mtx);
58c5fc13 76496
bc901d79
MT
76497 if (coming_up)
76498- local->open_count++;
c52201e0 76499+ local_inc(&local->open_count);
58c5fc13 76500
5e856224 76501 if (hw_reconf_flags)
58c5fc13 76502 ieee80211_hw_config(local, hw_reconf_flags);
5e856224 76503@@ -360,7 +360,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
58c5fc13 76504 err_del_interface:
5e856224 76505 drv_remove_interface(local, sdata);
58c5fc13
MT
76506 err_stop:
76507- if (!local->open_count)
c52201e0 76508+ if (!local_read(&local->open_count))
58c5fc13
MT
76509 drv_stop(local);
76510 err_del_bss:
76511 sdata->bss = NULL;
c6e2a6c8 76512@@ -491,7 +491,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
58c5fc13
MT
76513 }
76514
bc901d79
MT
76515 if (going_down)
76516- local->open_count--;
c52201e0 76517+ local_dec(&local->open_count);
58c5fc13
MT
76518
76519 switch (sdata->vif.type) {
76520 case NL80211_IFTYPE_AP_VLAN:
c6e2a6c8 76521@@ -562,7 +562,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
58c5fc13
MT
76522
76523 ieee80211_recalc_ps(local, -1);
76524
76525- if (local->open_count == 0) {
c52201e0 76526+ if (local_read(&local->open_count) == 0) {
bc901d79
MT
76527 if (local->ops->napi_poll)
76528 napi_disable(&local->napi);
ae4e228f 76529 ieee80211_clear_tx_pending(local);
fe2de317 76530diff --git a/net/mac80211/main.c b/net/mac80211/main.c
c6e2a6c8 76531index 1633648..d45ebfa 100644
fe2de317
MT
76532--- a/net/mac80211/main.c
76533+++ b/net/mac80211/main.c
c6e2a6c8 76534@@ -164,7 +164,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
58c5fc13
MT
76535 local->hw.conf.power_level = power;
76536 }
76537
76538- if (changed && local->open_count) {
c52201e0 76539+ if (changed && local_read(&local->open_count)) {
58c5fc13
MT
76540 ret = drv_config(local, changed);
76541 /*
76542 * Goal:
fe2de317 76543diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
c6e2a6c8 76544index ef8eba1..5c63952 100644
fe2de317
MT
76545--- a/net/mac80211/pm.c
76546+++ b/net/mac80211/pm.c
76547@@ -34,7 +34,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
6e9df6a3
MT
76548 struct ieee80211_sub_if_data *sdata;
76549 struct sta_info *sta;
76550
76551- if (!local->open_count)
76552+ if (!local_read(&local->open_count))
76553 goto suspend;
76554
76555 ieee80211_scan_cancel(local);
fe2de317 76556@@ -72,7 +72,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
15a11c5b
MT
76557 cancel_work_sync(&local->dynamic_ps_enable_work);
76558 del_timer_sync(&local->dynamic_ps_timer);
76559
76560- local->wowlan = wowlan && local->open_count;
76561+ local->wowlan = wowlan && local_read(&local->open_count);
76562 if (local->wowlan) {
76563 int err = drv_suspend(local, wowlan);
6e9df6a3 76564 if (err < 0) {
c6e2a6c8 76565@@ -128,7 +128,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
58c5fc13
MT
76566 }
76567
76568 /* stop hardware - this must stop RX */
ae4e228f 76569- if (local->open_count)
c52201e0 76570+ if (local_read(&local->open_count))
ae4e228f
MT
76571 ieee80211_stop_device(local);
76572
15a11c5b 76573 suspend:
fe2de317 76574diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
c6e2a6c8 76575index 3313c11..bec9f17 100644
fe2de317
MT
76576--- a/net/mac80211/rate.c
76577+++ b/net/mac80211/rate.c
c6e2a6c8 76578@@ -494,7 +494,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
58c5fc13
MT
76579
76580 ASSERT_RTNL();
ae4e228f
MT
76581
76582- if (local->open_count)
c52201e0 76583+ if (local_read(&local->open_count))
58c5fc13
MT
76584 return -EBUSY;
76585
ae4e228f 76586 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
fe2de317 76587diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
4c928ab7 76588index c97a065..ff61928 100644
fe2de317
MT
76589--- a/net/mac80211/rc80211_pid_debugfs.c
76590+++ b/net/mac80211/rc80211_pid_debugfs.c
4c928ab7 76591@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
58c5fc13 76592
ae4e228f 76593 spin_unlock_irqrestore(&events->lock, status);
58c5fc13 76594
ae4e228f
MT
76595- if (copy_to_user(buf, pb, p))
76596+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
76597 return -EFAULT;
58c5fc13 76598
ae4e228f 76599 return p;
fe2de317 76600diff --git a/net/mac80211/util.c b/net/mac80211/util.c
c6e2a6c8 76601index eb9d7c0..d34b832 100644
fe2de317
MT
76602--- a/net/mac80211/util.c
76603+++ b/net/mac80211/util.c
c6e2a6c8 76604@@ -1179,7 +1179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
5e856224
MT
76605 }
76606 #endif
6e9df6a3
MT
76607 /* everything else happens only if HW was up & running */
76608- if (!local->open_count)
76609+ if (!local_read(&local->open_count))
76610 goto wake_up;
76611
76612 /*
fe2de317 76613diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
c6e2a6c8 76614index 0c6f67e..d02cdfc 100644
fe2de317
MT
76615--- a/net/netfilter/Kconfig
76616+++ b/net/netfilter/Kconfig
c6e2a6c8 76617@@ -836,6 +836,16 @@ config NETFILTER_XT_MATCH_ESP
fe2de317
MT
76618
76619 To compile it as a module, choose M here. If unsure, say N.
76620
76621+config NETFILTER_XT_MATCH_GRADM
76622+ tristate '"gradm" match support'
76623+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
76624+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
76625+ ---help---
76626+ The gradm match allows to match on grsecurity RBAC being enabled.
76627+ It is useful when iptables rules are applied early on bootup to
76628+ prevent connections to the machine (except from a trusted host)
76629+ while the RBAC system is disabled.
76630+
76631 config NETFILTER_XT_MATCH_HASHLIMIT
76632 tristate '"hashlimit" match support'
76633 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
76634diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
c6e2a6c8 76635index ca36765..0882e7c 100644
fe2de317
MT
76636--- a/net/netfilter/Makefile
76637+++ b/net/netfilter/Makefile
c6e2a6c8 76638@@ -86,6 +86,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
fe2de317 76639 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
5e856224 76640 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
fe2de317
MT
76641 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
76642+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
76643 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
76644 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
76645 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
76646diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
4c928ab7 76647index 29fa5ba..8debc79 100644
fe2de317
MT
76648--- a/net/netfilter/ipvs/ip_vs_conn.c
76649+++ b/net/netfilter/ipvs/ip_vs_conn.c
76650@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
8308f9c9
MT
76651 /* Increase the refcnt counter of the dest */
76652 atomic_inc(&dest->refcnt);
76653
76654- conn_flags = atomic_read(&dest->conn_flags);
76655+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
76656 if (cp->protocol != IPPROTO_UDP)
76657 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
76658 /* Bind with the destination and its corresponding transmitter */
fe2de317 76659@@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
8308f9c9
MT
76660 atomic_set(&cp->refcnt, 1);
76661
76662 atomic_set(&cp->n_control, 0);
76663- atomic_set(&cp->in_pkts, 0);
76664+ atomic_set_unchecked(&cp->in_pkts, 0);
76665
66a7e928 76666 atomic_inc(&ipvs->conn_count);
8308f9c9 76667 if (flags & IP_VS_CONN_F_NO_CPORT)
fe2de317 76668@@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
8308f9c9
MT
76669
76670 /* Don't drop the entry if its number of incoming packets is not
76671 located in [0, 8] */
76672- i = atomic_read(&cp->in_pkts);
76673+ i = atomic_read_unchecked(&cp->in_pkts);
76674 if (i > 8 || i < 0) return 0;
76675
76676 if (!todrop_rate[i]) return 0;
fe2de317 76677diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
c6e2a6c8 76678index 00bdb1d..6725a48 100644
fe2de317
MT
76679--- a/net/netfilter/ipvs/ip_vs_core.c
76680+++ b/net/netfilter/ipvs/ip_vs_core.c
4c928ab7 76681@@ -562,7 +562,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
66a7e928 76682 ret = cp->packet_xmit(skb, cp, pd->pp);
8308f9c9
MT
76683 /* do not touch skb anymore */
76684
76685- atomic_inc(&cp->in_pkts);
76686+ atomic_inc_unchecked(&cp->in_pkts);
76687 ip_vs_conn_put(cp);
76688 return ret;
76689 }
4c928ab7 76690@@ -1611,7 +1611,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
66a7e928
MT
76691 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
76692 pkts = sysctl_sync_threshold(ipvs);
76693 else
76694- pkts = atomic_add_return(1, &cp->in_pkts);
76695+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76696
76697 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
8308f9c9 76698 cp->protocol == IPPROTO_SCTP) {
fe2de317 76699diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
572b4308 76700index f558998..7dfb054 100644
fe2de317
MT
76701--- a/net/netfilter/ipvs/ip_vs_ctl.c
76702+++ b/net/netfilter/ipvs/ip_vs_ctl.c
4c928ab7 76703@@ -788,7 +788,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
66a7e928
MT
76704 ip_vs_rs_hash(ipvs, dest);
76705 write_unlock_bh(&ipvs->rs_lock);
8308f9c9
MT
76706 }
76707- atomic_set(&dest->conn_flags, conn_flags);
76708+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
76709
76710 /* bind the service */
76711 if (!dest->svc) {
572b4308
MT
76712@@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
76713 {
76714 struct net_device *dev = ptr;
76715 struct net *net = dev_net(dev);
76716+ struct netns_ipvs *ipvs = net_ipvs(net);
76717 struct ip_vs_service *svc;
76718 struct ip_vs_dest *dest;
76719 unsigned int idx;
76720
76721- if (event != NETDEV_UNREGISTER)
76722+ if (event != NETDEV_UNREGISTER || !ipvs)
76723 return NOTIFY_DONE;
76724 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
76725 EnterFunction(2);
76726@@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
76727 }
76728 }
76729
76730- list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
76731+ list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
76732 __ip_vs_dev_reset(dest, dev);
76733 }
76734 mutex_unlock(&__ip_vs_mutex);
76735@@ -2028,7 +2029,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
8308f9c9
MT
76736 " %-7s %-6d %-10d %-10d\n",
76737 &dest->addr.in6,
76738 ntohs(dest->port),
76739- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76740+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76741 atomic_read(&dest->weight),
76742 atomic_read(&dest->activeconns),
76743 atomic_read(&dest->inactconns));
572b4308 76744@@ -2039,7 +2040,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
8308f9c9
MT
76745 "%-7s %-6d %-10d %-10d\n",
76746 ntohl(dest->addr.ip),
76747 ntohs(dest->port),
76748- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
76749+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
76750 atomic_read(&dest->weight),
76751 atomic_read(&dest->activeconns),
76752 atomic_read(&dest->inactconns));
572b4308 76753@@ -2509,7 +2510,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
8308f9c9
MT
76754
76755 entry.addr = dest->addr.ip;
76756 entry.port = dest->port;
76757- entry.conn_flags = atomic_read(&dest->conn_flags);
76758+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
76759 entry.weight = atomic_read(&dest->weight);
76760 entry.u_threshold = dest->u_threshold;
76761 entry.l_threshold = dest->l_threshold;
572b4308 76762@@ -3042,7 +3043,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
8308f9c9
MT
76763 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
76764
76765 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
76766- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76767+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
76768 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
76769 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
76770 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
fe2de317 76771diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
5e856224 76772index 8a0d6d6..90ec197 100644
fe2de317
MT
76773--- a/net/netfilter/ipvs/ip_vs_sync.c
76774+++ b/net/netfilter/ipvs/ip_vs_sync.c
6e9df6a3 76775@@ -649,7 +649,7 @@ control:
66a7e928
MT
76776 * i.e only increment in_pkts for Templates.
76777 */
76778 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
76779- int pkts = atomic_add_return(1, &cp->in_pkts);
76780+ int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
76781
76782 if (pkts % sysctl_sync_period(ipvs) != 1)
76783 return;
fe2de317 76784@@ -795,7 +795,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
66a7e928
MT
76785
76786 if (opt)
76787 memcpy(&cp->in_seq, opt, sizeof(*opt));
76788- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76789+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
76790 cp->state = state;
76791 cp->old_state = cp->state;
76792 /*
fe2de317 76793diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
5e856224 76794index 7fd66de..e6fb361 100644
fe2de317
MT
76795--- a/net/netfilter/ipvs/ip_vs_xmit.c
76796+++ b/net/netfilter/ipvs/ip_vs_xmit.c
76797@@ -1151,7 +1151,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
8308f9c9
MT
76798 else
76799 rc = NF_ACCEPT;
76800 /* do not touch skb anymore */
76801- atomic_inc(&cp->in_pkts);
76802+ atomic_inc_unchecked(&cp->in_pkts);
76803 goto out;
76804 }
76805
fe2de317 76806@@ -1272,7 +1272,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
8308f9c9
MT
76807 else
76808 rc = NF_ACCEPT;
76809 /* do not touch skb anymore */
76810- atomic_inc(&cp->in_pkts);
76811+ atomic_inc_unchecked(&cp->in_pkts);
76812 goto out;
76813 }
76814
fe2de317 76815diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
4c928ab7 76816index 66b2c54..c7884e3 100644
fe2de317
MT
76817--- a/net/netfilter/nfnetlink_log.c
76818+++ b/net/netfilter/nfnetlink_log.c
8308f9c9
MT
76819@@ -70,7 +70,7 @@ struct nfulnl_instance {
76820 };
76821
76822 static DEFINE_SPINLOCK(instances_lock);
76823-static atomic_t global_seq;
76824+static atomic_unchecked_t global_seq;
76825
76826 #define INSTANCE_BUCKETS 16
76827 static struct hlist_head instance_table[INSTANCE_BUCKETS];
4c928ab7 76828@@ -502,7 +502,7 @@ __build_packet_message(struct nfulnl_instance *inst,
8308f9c9
MT
76829 /* global sequence number */
76830 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
76831 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
76832- htonl(atomic_inc_return(&global_seq)));
76833+ htonl(atomic_inc_return_unchecked(&global_seq)));
76834
76835 if (data_len) {
76836 struct nlattr *nla;
fe2de317
MT
76837diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
76838new file mode 100644
76839index 0000000..6905327
76840--- /dev/null
76841+++ b/net/netfilter/xt_gradm.c
6892158b
MT
76842@@ -0,0 +1,51 @@
76843+/*
76844+ * gradm match for netfilter
76845